summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1126_linux-5.10.127.patch4804
2 files changed, 4808 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 5378e1d4..0104ffd6 100644
--- a/0000_README
+++ b/0000_README
@@ -547,6 +547,10 @@ Patch: 1125_linux-5.10.126.patch
From: http://www.kernel.org
Desc: Linux 5.10.126
+Patch: 1126_linux-5.10.127.patch
+From: http://www.kernel.org
+Desc: Linux 5.10.127
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1126_linux-5.10.127.patch b/1126_linux-5.10.127.patch
new file mode 100644
index 00000000..db80f4e8
--- /dev/null
+++ b/1126_linux-5.10.127.patch
@@ -0,0 +1,4804 @@
+diff --git a/Documentation/ABI/testing/sysfs-bus-iio-vf610 b/Documentation/ABI/testing/sysfs-bus-iio-vf610
+index 308a6756d3bf3..491ead8044888 100644
+--- a/Documentation/ABI/testing/sysfs-bus-iio-vf610
++++ b/Documentation/ABI/testing/sysfs-bus-iio-vf610
+@@ -1,4 +1,4 @@
+-What: /sys/bus/iio/devices/iio:deviceX/conversion_mode
++What: /sys/bus/iio/devices/iio:deviceX/in_conversion_mode
+ KernelVersion: 4.2
+ Contact: linux-iio@vger.kernel.org
+ Description:
+diff --git a/Makefile b/Makefile
+index 57434487c2b4d..e3eb9ba19f86e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 126
++SUBLEVEL = 127
+ EXTRAVERSION =
+ NAME = Dare mighty things
+
+@@ -1156,7 +1156,7 @@ KBUILD_MODULES := 1
+
+ autoksyms_recursive: descend modules.order
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
+- "$(MAKE) -f $(srctree)/Makefile vmlinux"
++ "$(MAKE) -f $(srctree)/Makefile autoksyms_recursive"
+ endif
+
+ autoksyms_h := $(if $(CONFIG_TRIM_UNUSED_KSYMS), include/generated/autoksyms.h)
+diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
+index 7a8837cbe21bf..7858ae5d39df7 100644
+--- a/arch/arm/boot/dts/imx6qdl.dtsi
++++ b/arch/arm/boot/dts/imx6qdl.dtsi
+@@ -756,7 +756,7 @@
+ regulator-name = "vddpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+- regulator-enable-ramp-delay = <150>;
++ regulator-enable-ramp-delay = <380>;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <9>;
+ anatop-vol-bit-width = <5>;
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index 84d9cc13afb95..9e1b0af0aa43f 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -102,6 +102,7 @@
+ compatible = "usb-nop-xceiv";
+ clocks = <&clks IMX7D_USB_HSIC_ROOT_CLK>;
+ clock-names = "main_clk";
++ power-domains = <&pgc_hsic_phy>;
+ #phy-cells = <0>;
+ };
+
+@@ -1104,7 +1105,6 @@
+ compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
+ reg = <0x30b30000 0x200>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+- power-domains = <&pgc_hsic_phy>;
+ clocks = <&clks IMX7D_USB_CTRL_CLK>;
+ fsl,usbphy = <&usbphynop3>;
+ fsl,usbmisc = <&usbmisc3 0>;
+diff --git a/arch/arm/mach-axxia/platsmp.c b/arch/arm/mach-axxia/platsmp.c
+index 512943eae30a5..2e203626eda52 100644
+--- a/arch/arm/mach-axxia/platsmp.c
++++ b/arch/arm/mach-axxia/platsmp.c
+@@ -39,6 +39,7 @@ static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ return -ENOENT;
+
+ syscon = of_iomap(syscon_np, 0);
++ of_node_put(syscon_np);
+ if (!syscon)
+ return -ENOMEM;
+
+diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
+index e4f4b20b83a2d..3fc4ec830e3a3 100644
+--- a/arch/arm/mach-cns3xxx/core.c
++++ b/arch/arm/mach-cns3xxx/core.c
+@@ -372,6 +372,7 @@ static void __init cns3xxx_init(void)
+ /* De-Asscer SATA Reset */
+ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SATA));
+ }
++ of_node_put(dn);
+
+ dn = of_find_compatible_node(NULL, NULL, "cavium,cns3420-sdhci");
+ if (of_device_is_available(dn)) {
+@@ -385,6 +386,7 @@ static void __init cns3xxx_init(void)
+ cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SDIO));
+ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SDIO));
+ }
++ of_node_put(dn);
+
+ pm_power_off = cns3xxx_power_off;
+
+diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
+index 83d1d1327f96e..1276585f72c53 100644
+--- a/arch/arm/mach-exynos/exynos.c
++++ b/arch/arm/mach-exynos/exynos.c
+@@ -149,6 +149,7 @@ static void exynos_map_pmu(void)
+ np = of_find_matching_node(NULL, exynos_dt_pmu_match);
+ if (np)
+ pmu_base_addr = of_iomap(np, 0);
++ of_node_put(np);
+ }
+
+ static void __init exynos_init_irq(void)
+diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
+index 7b7f25b4b057e..9240bcdbe74e4 100644
+--- a/arch/mips/vr41xx/common/icu.c
++++ b/arch/mips/vr41xx/common/icu.c
+@@ -640,8 +640,6 @@ static int icu_get_irq(unsigned int irq)
+
+ printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
+
+- atomic_inc(&irq_err_count);
+-
+ return -1;
+ }
+
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 14f3252f2da03..2d89f79f460cb 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -11,6 +11,7 @@ config PARISC
+ select ARCH_WANT_FRAME_POINTERS
+ select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_STRICT_KERNEL_RWX
++ select ARCH_HAS_STRICT_MODULE_RWX
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_NO_SG_CHAIN
+ select ARCH_SUPPORTS_MEMORY_FAILURE
+diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h
+index d63a2acb91f2b..55d29c4f716e6 100644
+--- a/arch/parisc/include/asm/fb.h
++++ b/arch/parisc/include/asm/fb.h
+@@ -12,7 +12,7 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+ }
+
+-#if defined(CONFIG_STI_CONSOLE) || defined(CONFIG_FB_STI)
++#if defined(CONFIG_FB_STI)
+ int fb_is_primary_device(struct fb_info *info);
+ #else
+ static inline int fb_is_primary_device(struct fb_info *info)
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index cfb8fd76afb43..c43cc26bde5db 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1800,7 +1800,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
+ tm_reclaim_current(0);
+ #endif
+
+- memset(regs->gpr, 0, sizeof(regs->gpr));
++ memset(&regs->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
+ regs->ctr = 0;
+ regs->link = 0;
+ regs->xer = 0;
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index cf421eb7f90d4..bf962051af0a0 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -1040,7 +1040,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = {
+ { "get-time-of-day", -1, -1, -1, -1, -1 },
+ { "ibm,get-vpd", -1, 0, -1, 1, 2 },
+ { "ibm,lpar-perftools", -1, 2, 3, -1, -1 },
+- { "ibm,platform-dump", -1, 4, 5, -1, -1 },
++ { "ibm,platform-dump", -1, 4, 5, -1, -1 }, /* Special cased */
+ { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 },
+ { "ibm,scan-log-dump", -1, 0, 1, -1, -1 },
+ { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 },
+@@ -1087,6 +1087,15 @@ static bool block_rtas_call(int token, int nargs,
+ size = 1;
+
+ end = base + size - 1;
++
++ /*
++ * Special case for ibm,platform-dump - NULL buffer
++ * address is used to indicate end of dump processing
++ */
++ if (!strcmp(f->name, "ibm,platform-dump") &&
++ base == 0)
++ return false;
++
+ if (!in_rmo_buf(base, end))
+ goto err;
+ }
+diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
+index 11df4e16a1cc3..528946ee7a777 100644
+--- a/arch/powerpc/platforms/powernv/powernv.h
++++ b/arch/powerpc/platforms/powernv/powernv.h
+@@ -42,4 +42,6 @@ ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count);
+ u32 memcons_get_size(struct memcons *mc);
+ struct memcons *memcons_init(struct device_node *node, const char *mc_prop_name);
+
++void pnv_rng_init(void);
++
+ #endif /* _POWERNV_H */
+diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
+index 69c344c8884f3..2b5a1a41234cc 100644
+--- a/arch/powerpc/platforms/powernv/rng.c
++++ b/arch/powerpc/platforms/powernv/rng.c
+@@ -17,6 +17,7 @@
+ #include <asm/prom.h>
+ #include <asm/machdep.h>
+ #include <asm/smp.h>
++#include "powernv.h"
+
+ #define DARN_ERR 0xFFFFFFFFFFFFFFFFul
+
+@@ -28,7 +29,6 @@ struct powernv_rng {
+
+ static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng);
+
+-
+ int powernv_hwrng_present(void)
+ {
+ struct powernv_rng *rng;
+@@ -98,9 +98,6 @@ static int initialise_darn(void)
+ return 0;
+ }
+ }
+-
+- pr_warn("Unable to use DARN for get_random_seed()\n");
+-
+ return -EIO;
+ }
+
+@@ -163,32 +160,55 @@ static __init int rng_create(struct device_node *dn)
+
+ rng_init_per_cpu(rng, dn);
+
+- pr_info_once("Registering arch random hook.\n");
+-
+ ppc_md.get_random_seed = powernv_get_random_long;
+
+ return 0;
+ }
+
+-static __init int rng_init(void)
++static int __init pnv_get_random_long_early(unsigned long *v)
+ {
+ struct device_node *dn;
+- int rc;
++
++ if (!slab_is_available())
++ return 0;
++
++ if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early,
++ NULL) != pnv_get_random_long_early)
++ return 0;
+
+ for_each_compatible_node(dn, NULL, "ibm,power-rng") {
+- rc = rng_create(dn);
+- if (rc) {
+- pr_err("Failed creating rng for %pOF (%d).\n",
+- dn, rc);
++ if (rng_create(dn))
+ continue;
+- }
+-
+ /* Create devices for hwrng driver */
+ of_platform_device_create(dn, NULL, NULL);
+ }
+
+- initialise_darn();
++ if (!ppc_md.get_random_seed)
++ return 0;
++ return ppc_md.get_random_seed(v);
++}
++
++void __init pnv_rng_init(void)
++{
++ struct device_node *dn;
+
++ /* Prefer darn over the rest. */
++ if (!initialise_darn())
++ return;
++
++ dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng");
++ if (dn)
++ ppc_md.get_random_seed = pnv_get_random_long_early;
++
++ of_node_put(dn);
++}
++
++static int __init pnv_rng_late_init(void)
++{
++ unsigned long v;
++ /* In case it wasn't called during init for some other reason. */
++ if (ppc_md.get_random_seed == pnv_get_random_long_early)
++ pnv_get_random_long_early(&v);
+ return 0;
+ }
+-machine_subsys_initcall(powernv, rng_init);
++machine_subsys_initcall(powernv, pnv_rng_late_init);
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 4426a109ec2f4..1a2f12dc05525 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -193,6 +193,8 @@ static void __init pnv_setup_arch(void)
+ pnv_check_guarded_cores();
+
+ /* XXX PMCS */
++
++ pnv_rng_init();
+ }
+
+ static void __init pnv_init(void)
+diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
+index 593840847cd3d..ada9601aaff1a 100644
+--- a/arch/powerpc/platforms/pseries/pseries.h
++++ b/arch/powerpc/platforms/pseries/pseries.h
+@@ -114,4 +114,6 @@ int dlpar_workqueue_init(void);
+ void pseries_setup_security_mitigations(void);
+ void pseries_lpar_read_hblkrm_characteristics(void);
+
++void pseries_rng_init(void);
++
+ #endif /* _PSERIES_PSERIES_H */
+diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
+index 6268545947b83..6ddfdeaace9ef 100644
+--- a/arch/powerpc/platforms/pseries/rng.c
++++ b/arch/powerpc/platforms/pseries/rng.c
+@@ -10,6 +10,7 @@
+ #include <asm/archrandom.h>
+ #include <asm/machdep.h>
+ #include <asm/plpar_wrappers.h>
++#include "pseries.h"
+
+
+ static int pseries_get_random_long(unsigned long *v)
+@@ -24,19 +25,13 @@ static int pseries_get_random_long(unsigned long *v)
+ return 0;
+ }
+
+-static __init int rng_init(void)
++void __init pseries_rng_init(void)
+ {
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "ibm,random");
+ if (!dn)
+- return -ENODEV;
+-
+- pr_info("Registering arch random hook.\n");
+-
++ return;
+ ppc_md.get_random_seed = pseries_get_random_long;
+-
+ of_node_put(dn);
+- return 0;
+ }
+-machine_subsys_initcall(pseries, rng_init);
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 47dfada140e19..0eac9ca782c21 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -824,6 +824,8 @@ static void __init pSeries_setup_arch(void)
+
+ if (swiotlb_force == SWIOTLB_FORCE)
+ ppc_swiotlb_enable = 1;
++
++ pseries_rng_init();
+ }
+
+ static void pseries_panic(char *str)
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index 0eb1d1cc53a88..dddb32e53db8b 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -292,6 +292,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
+ return err;
+ }
+
++/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
++ * attribute::type values:
++ * - PERF_TYPE_HARDWARE:
++ * - pmu->type:
++ * Handle both type of invocations identical. They address the same hardware.
++ * The result is different when event modifiers exclude_kernel and/or
++ * exclude_user are also set.
++ */
++static int cpumf_pmu_event_type(struct perf_event *event)
++{
++ u64 ev = event->attr.config;
++
++ if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev ||
++ cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev ||
++ cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
++ cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev)
++ return PERF_TYPE_HARDWARE;
++ return PERF_TYPE_RAW;
++}
++
+ static int cpumf_pmu_event_init(struct perf_event *event)
+ {
+ unsigned int type = event->attr.type;
+@@ -301,7 +321,7 @@ static int cpumf_pmu_event_init(struct perf_event *event)
+ err = __hw_perf_event_init(event, type);
+ else if (event->pmu->type == type)
+ /* Registered as unknown PMU */
+- err = __hw_perf_event_init(event, PERF_TYPE_RAW);
++ err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
+ else
+ return -ENOENT;
+
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index a0a7ead52698c..1714e85eb26d2 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1261,8 +1261,9 @@ xadd: if (is_imm8(insn->off))
+ case BPF_JMP | BPF_CALL:
+ func = (u8 *) __bpf_call_base + imm32;
+ if (tail_call_reachable) {
++ /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+ EMIT3_off32(0x48, 0x8B, 0x85,
+- -(bpf_prog->aux->stack_depth + 8));
++ -round_up(bpf_prog->aux->stack_depth, 8) - 8);
+ if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
+ return -EINVAL;
+ } else {
+diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
+index 77971fe4cc95b..8e81ba63ed389 100644
+--- a/arch/xtensa/kernel/time.c
++++ b/arch/xtensa/kernel/time.c
+@@ -154,6 +154,7 @@ static void __init calibrate_ccount(void)
+ cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
+ if (cpu) {
+ clk = of_clk_get(cpu, 0);
++ of_node_put(cpu);
+ if (!IS_ERR(clk)) {
+ ccount_freq = clk_get_rate(clk);
+ return;
+diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
+index 538e6748e85a7..c79c1d09ea863 100644
+--- a/arch/xtensa/platforms/xtfpga/setup.c
++++ b/arch/xtensa/platforms/xtfpga/setup.c
+@@ -133,6 +133,7 @@ static int __init machine_setup(void)
+
+ if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
+ update_local_mac(eth);
++ of_node_put(eth);
+ return 0;
+ }
+ arch_initcall(machine_setup);
+diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
+index 87c5c421e0f46..4466f8bdab2e1 100644
+--- a/drivers/base/regmap/regmap-irq.c
++++ b/drivers/base/regmap/regmap-irq.c
+@@ -220,6 +220,7 @@ static void regmap_irq_enable(struct irq_data *data)
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
++ unsigned int reg = irq_data->reg_offset / map->reg_stride;
+ unsigned int mask, type;
+
+ type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
+@@ -236,14 +237,14 @@ static void regmap_irq_enable(struct irq_data *data)
+ * at the corresponding offset in regmap_irq_set_type().
+ */
+ if (d->chip->type_in_mask && type)
+- mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
++ mask = d->type_buf[reg] & irq_data->mask;
+ else
+ mask = irq_data->mask;
+
+ if (d->chip->clear_on_unmask)
+ d->clear_status = true;
+
+- d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
++ d->mask_buf[reg] &= ~mask;
+ }
+
+ static void regmap_irq_disable(struct irq_data *data)
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 5776dfd4a6fca..f769d858eda73 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -88,7 +88,7 @@ static RAW_NOTIFIER_HEAD(random_ready_chain);
+
+ /* Control how we warn userspace. */
+ static struct ratelimit_state urandom_warning =
+- RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
++ RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
+ static int ratelimit_disable __read_mostly =
+ IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
+ module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+@@ -452,7 +452,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
+
+ /*
+ * Immediately overwrite the ChaCha key at index 4 with random
+- * bytes, in case userspace causes copy_to_user() below to sleep
++ * bytes, in case userspace causes copy_to_iter() below to sleep
+ * forever, so that we still retain forward secrecy in that case.
+ */
+ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+@@ -1001,7 +1001,7 @@ void add_interrupt_randomness(int irq)
+ if (new_count & MIX_INFLIGHT)
+ return;
+
+- if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ))
++ if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
+ return;
+
+ if (unlikely(!fast_pool->mix.func))
+diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
+index cfbf10128aaed..2e3b76519b49d 100644
+--- a/drivers/dma-buf/udmabuf.c
++++ b/drivers/dma-buf/udmabuf.c
+@@ -26,8 +26,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ struct udmabuf *ubuf = vma->vm_private_data;
++ pgoff_t pgoff = vmf->pgoff;
+
+- vmf->page = ubuf->pages[vmf->pgoff];
++ if (pgoff >= ubuf->pagecount)
++ return VM_FAULT_SIGBUS;
++ vmf->page = ubuf->pages[pgoff];
+ get_page(vmf->page);
+ return 0;
+ }
+diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
+index 98cd715ccc33c..8d09b619c1669 100644
+--- a/drivers/gpio/gpio-vr41xx.c
++++ b/drivers/gpio/gpio-vr41xx.c
+@@ -217,8 +217,6 @@ static int giu_get_irq(unsigned int irq)
+ printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
+ maskl, pendl, maskh, pendh);
+
+- atomic_inc(&irq_err_count);
+-
+ return -EINVAL;
+ }
+
+diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
+index 7f8f5b02e31d5..4b61d975cc0ec 100644
+--- a/drivers/gpio/gpio-winbond.c
++++ b/drivers/gpio/gpio-winbond.c
+@@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset)
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ bool val;
++ int ret;
+
+ winbond_gpio_get_info(&offset, &info);
+
+- val = winbond_sio_enter(*base);
+- if (val)
+- return val;
++ ret = winbond_sio_enter(*base);
++ if (ret)
++ return ret;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index 458b5b26d3c26..de8cc25506d61 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -960,7 +960,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
+ release_firmware(adreno_gpu->fw[i]);
+
+- pm_runtime_disable(&priv->gpu_pdev->dev);
++ if (pm_runtime_enabled(&priv->gpu_pdev->dev))
++ pm_runtime_disable(&priv->gpu_pdev->dev);
+
+ msm_gpu_cleanup(&adreno_gpu->base);
+
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+index 913de5938782a..b4d0bfc83d70e 100644
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+@@ -221,6 +221,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
+ encoder = mdp4_lcdc_encoder_init(dev, panel_node);
+ if (IS_ERR(encoder)) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
++ of_node_put(panel_node);
+ return PTR_ERR(encoder);
+ }
+
+@@ -230,6 +231,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
+ connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
+ if (IS_ERR(connector)) {
+ DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
++ of_node_put(panel_node);
+ return PTR_ERR(connector);
+ }
+
+diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
+index aeca8b2ac5c6b..2da6982efdbfc 100644
+--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
++++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
+@@ -572,7 +572,7 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
+ dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
+ }
+
+-u32 dp_catalog_hpd_get_state_status(struct dp_catalog *dp_catalog)
++u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
+ {
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
+index 6d257dbebf294..176a9020a520c 100644
+--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
++++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
+@@ -97,7 +97,7 @@ void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
+ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+ u32 intr_mask, bool en);
+ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
+-u32 dp_catalog_hpd_get_state_status(struct dp_catalog *dp_catalog);
++u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog);
+ u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
+ void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
+ int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level,
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index c83a1650437da..b9ca844ce2ad0 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -1460,6 +1460,30 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
+ return ret;
+ }
+
++static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
++{
++ struct dp_io *dp_io;
++ struct phy *phy;
++ int ret;
++
++ dp_io = &ctrl->parser->io;
++ phy = dp_io->phy;
++
++ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
++
++ dp_catalog_ctrl_reset(ctrl->catalog);
++
++ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
++ if (ret) {
++ DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
++ }
++
++ phy_power_off(phy);
++ phy_exit(phy);
++
++ return 0;
++}
++
+ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
+ {
+ int ret = 0;
+@@ -1640,8 +1664,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+ if (rc)
+ return rc;
+
+- while (--link_train_max_retries &&
+- !atomic_read(&ctrl->dp_ctrl.aborted)) {
++ while (--link_train_max_retries) {
+ rc = dp_ctrl_reinitialize_mainlink(ctrl);
+ if (rc) {
+ DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
+@@ -1656,6 +1679,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+ break;
+ } else if (training_step == DP_TRAINING_1) {
+ /* link train_1 failed */
++ if (!dp_catalog_link_is_connected(ctrl->catalog)) {
++ break;
++ }
++
+ rc = dp_ctrl_link_rate_down_shift(ctrl);
+ if (rc < 0) { /* already in RBR = 1.6G */
+ if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) {
+@@ -1675,6 +1702,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+ }
+ } else if (training_step == DP_TRAINING_2) {
+ /* link train_2 failed, lower lane rate */
++ if (!dp_catalog_link_is_connected(ctrl->catalog)) {
++ break;
++ }
++
+ rc = dp_ctrl_link_lane_down_shift(ctrl);
+ if (rc < 0) {
+ /* end with failure */
+@@ -1695,6 +1726,11 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+ */
+ if (rc == 0) /* link train successfully */
+ dp_ctrl_push_idle(dp_ctrl);
++ else {
++ /* link training failed */
++ dp_ctrl_deinitialize_mainlink(ctrl);
++ rc = -ECONNRESET;
++ }
+
+ return rc;
+ }
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index ebd05678a27ba..a3de1d0523ea0 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -45,7 +45,7 @@ enum {
+ ST_CONNECT_PENDING,
+ ST_CONNECTED,
+ ST_DISCONNECT_PENDING,
+- ST_SUSPEND_PENDING,
++ ST_DISPLAY_OFF,
+ ST_SUSPENDED,
+ };
+
+@@ -102,6 +102,8 @@ struct dp_display_private {
+ struct dp_display_mode dp_mode;
+ struct msm_dp dp_display;
+
++ bool encoder_mode_set;
++
+ /* wait for audio signaling */
+ struct completion audio_comp;
+
+@@ -268,7 +270,8 @@ static void dp_display_unbind(struct device *dev, struct device *master,
+ }
+
+ /* disable all HPD interrupts */
+- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
++ if (dp->core_initialized)
++ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+
+ kthread_stop(dp->ev_tsk);
+
+@@ -305,13 +308,24 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+ drm_helper_hpd_irq_event(connector->dev);
+ }
+
+-static int dp_display_send_hpd_notification(struct dp_display_private *dp,
+- bool hpd)
++
++static void dp_display_set_encoder_mode(struct dp_display_private *dp)
+ {
+- static bool encoder_mode_set;
+ struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
++ if (!dp->encoder_mode_set && dp->dp_display.encoder &&
++ kms->funcs->set_encoder_mode) {
++ kms->funcs->set_encoder_mode(kms,
++ dp->dp_display.encoder, false);
++
++ dp->encoder_mode_set = true;
++ }
++}
++
++static int dp_display_send_hpd_notification(struct dp_display_private *dp,
++ bool hpd)
++{
+ if ((hpd && dp->dp_display.is_connected) ||
+ (!hpd && !dp->dp_display.is_connected)) {
+ DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off"));
+@@ -324,15 +338,6 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
+
+ dp->dp_display.is_connected = hpd;
+
+- if (dp->dp_display.is_connected && dp->dp_display.encoder
+- && !encoder_mode_set
+- && kms->funcs->set_encoder_mode) {
+- kms->funcs->set_encoder_mode(kms,
+- dp->dp_display.encoder, false);
+- DRM_DEBUG_DP("set_encoder_mode() Completed\n");
+- encoder_mode_set = true;
+- }
+-
+ dp_display_send_hpd_event(&dp->dp_display);
+
+ return 0;
+@@ -368,7 +373,6 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
+
+ dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
+
+-
+ end:
+ return rc;
+ }
+@@ -385,6 +389,8 @@ static void dp_display_host_init(struct dp_display_private *dp)
+ if (dp->usbpd->orientation == ORIENTATION_CC2)
+ flip = true;
+
++ dp_display_set_encoder_mode(dp);
++
+ dp_power_init(dp->power, flip);
+ dp_ctrl_host_init(dp->ctrl, flip);
+ dp_aux_init(dp->aux);
+@@ -468,25 +474,42 @@ static void dp_display_handle_video_request(struct dp_display_private *dp)
+ }
+ }
+
+-static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
++static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
+ {
+- u32 sink_request;
+-
+- sink_request = dp->link->sink_request;
++ int rc = 0;
+
+- if (sink_request & DS_PORT_STATUS_CHANGED) {
+- dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+- if (dp_display_is_sink_count_zero(dp)) {
+- DRM_DEBUG_DP("sink count is zero, nothing to do\n");
+- return 0;
++ if (dp_display_is_sink_count_zero(dp)) {
++ DRM_DEBUG_DP("sink count is zero, nothing to do\n");
++ if (dp->hpd_state != ST_DISCONNECTED) {
++ dp->hpd_state = ST_DISCONNECT_PENDING;
++ dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+ }
++ } else {
++ if (dp->hpd_state == ST_DISCONNECTED) {
++ dp->hpd_state = ST_CONNECT_PENDING;
++ rc = dp_display_process_hpd_high(dp);
++ if (rc)
++ dp->hpd_state = ST_DISCONNECTED;
++ }
++ }
+
+- return dp_display_process_hpd_high(dp);
++ return rc;
++}
++
++static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
++{
++ u32 sink_request = dp->link->sink_request;
++
++ if (dp->hpd_state == ST_DISCONNECTED) {
++ if (sink_request & DP_LINK_STATUS_UPDATED) {
++ DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n");
++ return -EINVAL;
++ }
+ }
+
+ dp_ctrl_handle_sink_request(dp->ctrl);
+
+- if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN)
++ if (sink_request & DP_TEST_LINK_VIDEO_PATTERN)
+ dp_display_handle_video_request(dp);
+
+ return 0;
+@@ -495,7 +518,9 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
+ static int dp_display_usbpd_attention_cb(struct device *dev)
+ {
+ int rc = 0;
++ u32 sink_request;
+ struct dp_display_private *dp;
++ struct dp_usbpd *hpd;
+
+ if (!dev) {
+ DRM_ERROR("invalid dev\n");
+@@ -509,10 +534,17 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
+ return -ENODEV;
+ }
+
++ hpd = dp->usbpd;
++
+ /* check for any test request issued by sink */
+ rc = dp_link_process_request(dp->link);
+- if (!rc)
+- dp_display_handle_irq_hpd(dp);
++ if (!rc) {
++ sink_request = dp->link->sink_request;
++ if (sink_request & DS_PORT_STATUS_CHANGED)
++ rc = dp_display_handle_port_ststus_changed(dp);
++ else
++ rc = dp_display_handle_irq_hpd(dp);
++ }
+
+ return rc;
+ }
+@@ -530,7 +562,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+ mutex_lock(&dp->event_mutex);
+
+ state = dp->hpd_state;
+- if (state == ST_SUSPEND_PENDING) {
++ if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+@@ -552,13 +584,18 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+ hpd->hpd_high = 1;
+
+ ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
+- if (ret) { /* failed */
++ if (ret) { /* link train failed */
+ hpd->hpd_high = 0;
+ dp->hpd_state = ST_DISCONNECTED;
+- }
+
+- /* start sanity checking */
+- dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
++ if (ret == -ECONNRESET) { /* cable unplugged */
++ dp->core_initialized = false;
++ }
++
++ } else {
++ /* start sentinel checking in case of missing uevent */
++ dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
++ }
+
+ mutex_unlock(&dp->event_mutex);
+
+@@ -611,11 +648,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ mutex_lock(&dp->event_mutex);
+
+ state = dp->hpd_state;
+- if (state == ST_SUSPEND_PENDING) {
+- mutex_unlock(&dp->event_mutex);
+- return 0;
+- }
+-
+ if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+@@ -642,7 +674,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ */
+ dp_display_usbpd_disconnect_cb(&dp->pdev->dev);
+
+- /* start sanity checking */
++ /* start sentinel checking in case of missing uevent */
+ dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+
+ /* signal the disconnect event early to ensure proper teardown */
+@@ -676,17 +708,21 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
+ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
+ {
+ u32 state;
++ int ret;
+
+ mutex_lock(&dp->event_mutex);
+
+ /* irq_hpd can happen at either connected or disconnected state */
+ state = dp->hpd_state;
+- if (state == ST_SUSPEND_PENDING) {
++ if (state == ST_DISPLAY_OFF) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+- dp_display_usbpd_attention_cb(&dp->pdev->dev);
++ ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
++ if (ret == -ECONNRESET) { /* cable unplugged */
++ dp->core_initialized = false;
++ }
+
+ mutex_unlock(&dp->event_mutex);
+
+@@ -831,6 +867,11 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
+
+ dp_display = g_dp_display;
+
++ if (dp_display->power_on) {
++ DRM_DEBUG_DP("Link already setup, return\n");
++ return 0;
++ }
++
+ rc = dp_ctrl_on_stream(dp->ctrl);
+ if (!rc)
+ dp_display->power_on = true;
+@@ -863,6 +904,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
+
+ dp_display = g_dp_display;
+
++ if (!dp_display->power_on)
++ return 0;
++
+ /* wait only if audio was enabled */
+ if (dp_display->audio_enabled) {
+ /* signal the disconnect event */
+@@ -1118,7 +1162,7 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
+ }
+
+ if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
+- /* delete connect pending event first */
++ /* stop sentinel connect pending checking */
+ dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT);
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
+ }
+@@ -1249,15 +1293,12 @@ static int dp_pm_resume(struct device *dev)
+
+ dp_catalog_ctrl_hpd_config(dp->catalog);
+
+- status = dp_catalog_hpd_get_state_status(dp->catalog);
++ status = dp_catalog_link_is_connected(dp->catalog);
+
+- if (status) {
++ if (status)
+ dp->dp_display.is_connected = true;
+- } else {
++ else
+ dp->dp_display.is_connected = false;
+- /* make sure next resume host_init be called */
+- dp->core_initialized = false;
+- }
+
+ mutex_unlock(&dp->event_mutex);
+
+@@ -1279,6 +1320,9 @@ static int dp_pm_suspend(struct device *dev)
+
+ dp->hpd_state = ST_SUSPENDED;
+
++ /* host_init will be called at pm_resume */
++ dp->core_initialized = false;
++
+ mutex_unlock(&dp->event_mutex);
+
+ return 0;
+@@ -1411,6 +1455,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
+
+ mutex_lock(&dp_display->event_mutex);
+
++ /* stop sentinel checking */
+ dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT);
+
+ rc = dp_display_set_mode(dp, &dp_display->dp_mode);
+@@ -1429,7 +1474,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
+
+ state = dp_display->hpd_state;
+
+- if (state == ST_SUSPEND_PENDING)
++ if (state == ST_DISPLAY_OFF)
+ dp_display_host_init(dp_display);
+
+ dp_display_enable(dp_display, 0);
+@@ -1441,7 +1486,8 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
+ dp_display_unprepare(dp);
+ }
+
+- if (state == ST_SUSPEND_PENDING)
++ /* manual kick off plug event to train link */
++ if (state == ST_DISPLAY_OFF)
+ dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
+
+ /* completed connection */
+@@ -1473,6 +1519,7 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder)
+
+ mutex_lock(&dp_display->event_mutex);
+
++ /* stop sentinel checking */
+ dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT);
+
+ dp_display_disable(dp_display, 0);
+@@ -1486,7 +1533,7 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder)
+ /* completed disconnection */
+ dp_display->hpd_state = ST_DISCONNECTED;
+ } else {
+- dp_display->hpd_state = ST_SUSPEND_PENDING;
++ dp_display->hpd_state = ST_DISPLAY_OFF;
+ }
+
+ mutex_unlock(&dp_display->event_mutex);
+diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
+index 2768d1d306f00..4e8a19114e87d 100644
+--- a/drivers/gpu/drm/msm/dp/dp_panel.c
++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
+@@ -196,6 +196,11 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+ &panel->aux->ddc);
+ if (!dp_panel->edid) {
+ DRM_ERROR("panel edid read failed\n");
++ /* check edid read fail is due to unplug */
++ if (!dp_catalog_link_is_connected(panel->catalog)) {
++ rc = -ETIMEDOUT;
++ goto end;
++ }
+
+ /* fail safe edid */
+ mutex_lock(&connector->dev->mode_config.mutex);
+diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
+index 22ac7c692a81d..ecab6287c1c39 100644
+--- a/drivers/gpu/drm/msm/msm_iommu.c
++++ b/drivers/gpu/drm/msm/msm_iommu.c
+@@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
+ u64 addr = iova;
+ unsigned int i;
+
+- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
++ for_each_sgtable_sg(sgt, sg, i) {
+ size_t size = sg->length;
+ phys_addr_t phys = sg_phys(sg);
+
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
+index 29861fc81b35f..c5912fd537729 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -71,7 +71,6 @@ static int sun4i_drv_bind(struct device *dev)
+ goto free_drm;
+ }
+
+- dev_set_drvdata(dev, drm);
+ drm->dev_private = drv;
+ INIT_LIST_HEAD(&drv->frontend_list);
+ INIT_LIST_HEAD(&drv->engine_list);
+@@ -112,6 +111,8 @@ static int sun4i_drv_bind(struct device *dev)
+
+ drm_fbdev_generic_setup(drm, 32);
+
++ dev_set_drvdata(dev, drm);
++
+ return 0;
+
+ finish_poll:
+@@ -128,6 +129,7 @@ static void sun4i_drv_unbind(struct device *dev)
+ {
+ struct drm_device *drm = dev_get_drvdata(dev);
+
++ dev_set_drvdata(dev, NULL);
+ drm_dev_unregister(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
+diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
+index da56488182d07..6aa5a72c89b2b 100644
+--- a/drivers/iio/accel/bma180.c
++++ b/drivers/iio/accel/bma180.c
+@@ -1068,11 +1068,12 @@ static int bma180_probe(struct i2c_client *client,
+ data->trig->dev.parent = dev;
+ data->trig->ops = &bma180_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+- indio_dev->trig = iio_trigger_get(data->trig);
+
+ ret = iio_trigger_register(data->trig);
+ if (ret)
+ goto err_trigger_free;
++
++ indio_dev->trig = iio_trigger_get(data->trig);
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
+index e7e2802827740..b12e804647063 100644
+--- a/drivers/iio/accel/mma8452.c
++++ b/drivers/iio/accel/mma8452.c
+@@ -1496,10 +1496,14 @@ static int mma8452_reset(struct i2c_client *client)
+ int i;
+ int ret;
+
+- ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
++ /*
++ * Find on fxls8471, after config reset bit, it reset immediately,
++ * and will not give ACK, so here do not check the return value.
++ * The following code will read the reset register, and check whether
++ * this reset works.
++ */
++ i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
+ MMA8452_CTRL_REG2_RST);
+- if (ret < 0)
+- return ret;
+
+ for (i = 0; i < 10; i++) {
+ usleep_range(100, 200);
+@@ -1542,11 +1546,13 @@ static int mma8452_probe(struct i2c_client *client,
+ mutex_init(&data->lock);
+
+ data->chip_info = device_get_match_data(&client->dev);
+- if (!data->chip_info && id) {
+- data->chip_info = &mma_chip_info_table[id->driver_data];
+- } else {
+- dev_err(&client->dev, "unknown device model\n");
+- return -ENODEV;
++ if (!data->chip_info) {
++ if (id) {
++ data->chip_info = &mma_chip_info_table[id->driver_data];
++ } else {
++ dev_err(&client->dev, "unknown device model\n");
++ return -ENODEV;
++ }
+ }
+
+ data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
+diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
+index 5a2b0ffbb145d..ecd9d8ad59288 100644
+--- a/drivers/iio/accel/mxc4005.c
++++ b/drivers/iio/accel/mxc4005.c
+@@ -461,8 +461,6 @@ static int mxc4005_probe(struct i2c_client *client,
+ data->dready_trig->dev.parent = &client->dev;
+ data->dready_trig->ops = &mxc4005_trigger_ops;
+ iio_trigger_set_drvdata(data->dready_trig, indio_dev);
+- indio_dev->trig = data->dready_trig;
+- iio_trigger_get(indio_dev->trig);
+ ret = devm_iio_trigger_register(&client->dev,
+ data->dready_trig);
+ if (ret) {
+@@ -470,6 +468,8 @@ static int mxc4005_probe(struct i2c_client *client,
+ "failed to register trigger\n");
+ return ret;
+ }
++
++ indio_dev->trig = iio_trigger_get(data->dready_trig);
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
+index 9109da2d2e15f..cbe1011a2408a 100644
+--- a/drivers/iio/adc/adi-axi-adc.c
++++ b/drivers/iio/adc/adi-axi-adc.c
+@@ -334,16 +334,19 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
+
+ if (!try_module_get(cl->dev->driver->owner)) {
+ mutex_unlock(&registered_clients_lock);
++ of_node_put(cln);
+ return ERR_PTR(-ENODEV);
+ }
+
+ get_device(cl->dev);
+ cl->info = info;
+ mutex_unlock(&registered_clients_lock);
++ of_node_put(cln);
+ return cl;
+ }
+
+ mutex_unlock(&registered_clients_lock);
++ of_node_put(cln);
+
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
+index 5f5e8b39e4d22..84dbe9e2f0eff 100644
+--- a/drivers/iio/adc/axp288_adc.c
++++ b/drivers/iio/adc/axp288_adc.c
+@@ -196,6 +196,14 @@ static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
+ },
+ .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+ },
++ {
++ /* Nuvision Solo 10 Draw */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TMAX"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "TM101W610L"),
++ },
++ .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
++ },
+ {}
+ };
+
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index a83199b212a43..20fc867e39986 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -64,6 +64,7 @@ struct stm32_adc_priv;
+ * @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
+ * @has_syscfg: SYSCFG capability flags
+ * @num_irqs: number of interrupt lines
++ * @num_adcs: maximum number of ADC instances in the common registers
+ */
+ struct stm32_adc_priv_cfg {
+ const struct stm32_adc_common_regs *regs;
+@@ -71,6 +72,7 @@ struct stm32_adc_priv_cfg {
+ u32 max_clk_rate_hz;
+ unsigned int has_syscfg;
+ unsigned int num_irqs;
++ unsigned int num_adcs;
+ };
+
+ /**
+@@ -333,7 +335,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
+ * before invoking the interrupt handler (e.g. call ISR only for
+ * IRQ-enabled ADCs).
+ */
+- for (i = 0; i < priv->cfg->num_irqs; i++) {
++ for (i = 0; i < priv->cfg->num_adcs; i++) {
+ if ((status & priv->cfg->regs->eoc_msk[i] &&
+ stm32_adc_eoc_enabled(priv, i)) ||
+ (status & priv->cfg->regs->ovr_msk[i]))
+@@ -784,6 +786,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
+ .clk_sel = stm32f4_adc_clk_sel,
+ .max_clk_rate_hz = 36000000,
+ .num_irqs = 1,
++ .num_adcs = 3,
+ };
+
+ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
+@@ -792,14 +795,16 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
+ .max_clk_rate_hz = 36000000,
+ .has_syscfg = HAS_VBOOSTER,
+ .num_irqs = 1,
++ .num_adcs = 2,
+ };
+
+ static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
+ .regs = &stm32h7_adc_common_regs,
+ .clk_sel = stm32h7_adc_clk_sel,
+- .max_clk_rate_hz = 40000000,
++ .max_clk_rate_hz = 36000000,
+ .has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
+ .num_irqs = 2,
++ .num_adcs = 2,
+ };
+
+ static const struct of_device_id stm32_adc_of_match[] = {
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index 9939dee017433..e60ad48196ff8 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -1265,7 +1265,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ const struct stm32_adc_regspec *regs = adc->cfg->regs;
+ u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
+- u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
+
+ /* Check ovr status right now, as ovr mask should be already disabled */
+ if (status & regs->isr_ovr.mask) {
+@@ -1280,11 +1279,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
+- if (!(status & mask))
+- dev_err_ratelimited(&indio_dev->dev,
+- "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n",
+- mask, status);
+-
+ return IRQ_NONE;
+ }
+
+@@ -1294,10 +1288,6 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ const struct stm32_adc_regspec *regs = adc->cfg->regs;
+ u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
+- u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
+-
+- if (!(status & mask))
+- return IRQ_WAKE_THREAD;
+
+ if (status & regs->isr_ovr.mask) {
+ /*
+diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
+index 60dd87e96f5f8..384d167b4fd65 100644
+--- a/drivers/iio/chemical/ccs811.c
++++ b/drivers/iio/chemical/ccs811.c
+@@ -500,11 +500,11 @@ static int ccs811_probe(struct i2c_client *client,
+ data->drdy_trig->dev.parent = &client->dev;
+ data->drdy_trig->ops = &ccs811_trigger_ops;
+ iio_trigger_set_drvdata(data->drdy_trig, indio_dev);
+- indio_dev->trig = data->drdy_trig;
+- iio_trigger_get(indio_dev->trig);
+ ret = iio_trigger_register(data->drdy_trig);
+ if (ret)
+ goto err_poweroff;
++
++ indio_dev->trig = iio_trigger_get(data->drdy_trig);
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
+index 39e1c4306c474..84c6ad4bcccba 100644
+--- a/drivers/iio/gyro/mpu3050-core.c
++++ b/drivers/iio/gyro/mpu3050-core.c
+@@ -872,6 +872,7 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050)
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_SLEEP, 0);
+ if (ret) {
++ regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
+ dev_err(mpu3050->dev, "error setting power mode\n");
+ return ret;
+ }
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+index c0f5059b13b31..995a9dc06521d 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+@@ -17,6 +17,7 @@
+ #include "inv_icm42600_buffer.h"
+
+ enum inv_icm42600_chip {
++ INV_CHIP_INVALID,
+ INV_CHIP_ICM42600,
+ INV_CHIP_ICM42602,
+ INV_CHIP_ICM42605,
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+index 8bd77185ccb71..dcbd4e9288519 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -565,7 +565,7 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
+ bool open_drain;
+ int ret;
+
+- if (chip < 0 || chip >= INV_CHIP_NB) {
++ if (chip <= INV_CHIP_INVALID || chip >= INV_CHIP_NB) {
+ dev_err(dev, "invalid chip = %d\n", chip);
+ return -ENODEV;
+ }
+diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
+index e09e58072872c..2277d6336ac06 100644
+--- a/drivers/iio/trigger/iio-trig-sysfs.c
++++ b/drivers/iio/trigger/iio-trig-sysfs.c
+@@ -196,6 +196,7 @@ static int iio_sysfs_trigger_remove(int id)
+ }
+
+ iio_trigger_unregister(t->trig);
++ irq_work_sync(&t->work);
+ iio_trigger_free(t->trig);
+
+ list_del(&t->l);
+diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
+index d9ac7372108c9..96bad057bea2f 100644
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -1396,7 +1396,7 @@ static void start_worker(struct era *era)
+ static void stop_worker(struct era *era)
+ {
+ atomic_set(&era->suspended, 1);
+- flush_workqueue(era->wq);
++ drain_workqueue(era->wq);
+ }
+
+ /*----------------------------------------------------------------
+@@ -1566,6 +1566,12 @@ static void era_postsuspend(struct dm_target *ti)
+ }
+
+ stop_worker(era);
++
++ r = metadata_commit(era->md);
++ if (r) {
++ DMERR("%s: metadata_commit failed", __func__);
++ /* FIXME: fail mode */
++ }
+ }
+
+ static int era_preresume(struct dm_target *ti)
+diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
+index 8b15f53cbdd95..fe3a9473f3387 100644
+--- a/drivers/md/dm-log.c
++++ b/drivers/md/dm-log.c
+@@ -615,7 +615,7 @@ static int disk_resume(struct dm_dirty_log *log)
+ log_clear_bit(lc, lc->clean_bits, i);
+
+ /* clear any old bits -- device has shrunk */
+- for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
++ for (i = lc->region_count; i % BITS_PER_LONG; i++)
+ log_clear_bit(lc, lc->clean_bits, i);
+
+ /* copy clean across to sync */
+diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
+index 049a1356f7dd4..5cec30d2dc7db 100644
+--- a/drivers/memory/samsung/exynos5422-dmc.c
++++ b/drivers/memory/samsung/exynos5422-dmc.c
+@@ -1192,33 +1192,39 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc)
+
+ dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+- if (!dmc->timing_row)
+- return -ENOMEM;
++ if (!dmc->timing_row) {
++ ret = -ENOMEM;
++ goto put_node;
++ }
+
+ dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+- if (!dmc->timing_data)
+- return -ENOMEM;
++ if (!dmc->timing_data) {
++ ret = -ENOMEM;
++ goto put_node;
++ }
+
+ dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+- if (!dmc->timing_power)
+- return -ENOMEM;
++ if (!dmc->timing_power) {
++ ret = -ENOMEM;
++ goto put_node;
++ }
+
+ dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev,
+ DDR_TYPE_LPDDR3,
+ &dmc->timings_arr_size);
+ if (!dmc->timings) {
+- of_node_put(np_ddr);
+ dev_warn(dmc->dev, "could not get timings from DT\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto put_node;
+ }
+
+ dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev);
+ if (!dmc->min_tck) {
+- of_node_put(np_ddr);
+ dev_warn(dmc->dev, "could not get tck from DT\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto put_node;
+ }
+
+ /* Sorted array of OPPs with frequency ascending */
+@@ -1232,13 +1238,14 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc)
+ clk_period_ps);
+ }
+
+- of_node_put(np_ddr);
+
+ /* Take the highest frequency's timings as 'bypass' */
+ dmc->bypass_timing_row = dmc->timing_row[idx - 1];
+ dmc->bypass_timing_data = dmc->timing_data[idx - 1];
+ dmc->bypass_timing_power = dmc->timing_power[idx - 1];
+
++put_node:
++ of_node_put(np_ddr);
+ return ret;
+ }
+
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index 94e3f72f6405d..8c357e3b78d7c 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -147,6 +147,8 @@ static int sdhci_o2_get_cd(struct mmc_host *mmc)
+
+ if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS))
+ sdhci_o2_enable_internal_clock(host);
++ else
++ sdhci_o2_wait_card_detect_stable(host);
+
+ return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+ }
+diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+index 92e8ca56f5665..8d096ca770b04 100644
+--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+@@ -683,7 +683,7 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
+ hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
+ BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
+ BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
+- hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
++ hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096));
+
+ /*
+ * Derive NFC ideal delay from {3}:
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index cbeb69bca0bba..9c4b45341fd28 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3368,9 +3368,11 @@ re_arm:
+ if (!rtnl_trylock())
+ return;
+
+- if (should_notify_peers)
++ if (should_notify_peers) {
++ bond->send_peer_notif--;
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
++ }
+ if (should_notify_rtnl) {
+ bond_slave_state_notify(bond);
+ bond_slave_link_notify(bond);
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 421fc707f80af..060897eb9cabe 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -2174,6 +2174,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
+ return err;
+ }
+
++/**
++ * ice_set_phy_type_from_speed - set phy_types based on speeds
++ * and advertised modes
++ * @ks: ethtool link ksettings struct
++ * @phy_type_low: pointer to the lower part of phy_type
++ * @phy_type_high: pointer to the higher part of phy_type
++ * @adv_link_speed: targeted link speeds bitmap
++ */
++static void
++ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
++ u64 *phy_type_low, u64 *phy_type_high,
++ u16 adv_link_speed)
++{
++ /* Handle 1000M speed in a special way because ice_update_phy_type
++ * enables all link modes, but having mixed copper and optical
++ * standards is not supported.
++ */
++ adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
++
++ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
++ 1000baseT_Full))
++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
++ ICE_PHY_TYPE_LOW_1G_SGMII;
++
++ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
++ 1000baseKX_Full))
++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
++
++ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
++ 1000baseX_Full))
++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
++ ICE_PHY_TYPE_LOW_1000BASE_LX;
++
++ ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
++}
++
+ /**
+ * ice_set_link_ksettings - Set Speed and Duplex
+ * @netdev: network interface device structure
+@@ -2310,7 +2346,8 @@ ice_set_link_ksettings(struct net_device *netdev,
+ adv_link_speed = curr_link_speed;
+
+ /* Convert the advertise link speeds to their corresponded PHY_TYPE */
+- ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
++ ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
++ adv_link_speed);
+
+ if (!autoneg_changed && adv_link_speed == curr_link_speed) {
+ netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 5e67c9c119d2f..4e51f4bb58ffc 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -4813,8 +4813,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+ while (i != tx_ring->next_to_use) {
+ union e1000_adv_tx_desc *eop_desc, *tx_desc;
+
+- /* Free all the Tx ring sk_buffs */
+- dev_kfree_skb_any(tx_buffer->skb);
++ /* Free all the Tx ring sk_buffs or xdp frames */
++ if (tx_buffer->type == IGB_TYPE_SKB)
++ dev_kfree_skb_any(tx_buffer->skb);
++ else
++ xdp_return_frame(tx_buffer->xdpf);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+@@ -9826,11 +9829,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+ struct e1000_hw *hw = &adapter->hw;
+ u32 dmac_thr;
+ u16 hwm;
++ u32 reg;
+
+ if (hw->mac.type > e1000_82580) {
+ if (adapter->flags & IGB_FLAG_DMAC) {
+- u32 reg;
+-
+ /* force threshold to 0. */
+ wr32(E1000_DMCTXTH, 0);
+
+@@ -9863,7 +9865,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+ /* Disable BMC-to-OS Watchdog Enable */
+ if (hw->mac.type != e1000_i354)
+ reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
+-
+ wr32(E1000_DMACR, reg);
+
+ /* no lower threshold to disable
+@@ -9880,12 +9881,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+ */
+ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
+ (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
++ }
+
+- /* make low power state decision controlled
+- * by DMA coal
+- */
++ if (hw->mac.type >= e1000_i210 ||
++ (adapter->flags & IGB_FLAG_DMAC)) {
+ reg = rd32(E1000_PCIEMISC);
+- reg &= ~E1000_PCIEMISC_LX_DECISION;
++ reg |= E1000_PCIEMISC_LX_DECISION;
+ wr32(E1000_PCIEMISC, reg);
+ } /* endif adapter->dmac is not disabled */
+ } else if (hw->mac.type == e1000_82580) {
+diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
+index 41e7c1432497a..75a62d1cc7375 100644
+--- a/drivers/net/phy/aquantia_main.c
++++ b/drivers/net/phy/aquantia_main.c
+@@ -34,6 +34,8 @@
+ #define MDIO_AN_VEND_PROV 0xc400
+ #define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
+ #define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
++#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11)
++#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10)
+ #define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
+ #define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
+ #define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
+@@ -230,9 +232,20 @@ static int aqr_config_aneg(struct phy_device *phydev)
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+
++ /* Handle the case when the 2.5G and 5G speeds are not advertised */
++ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
++ phydev->advertising))
++ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
++
++ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
++ phydev->advertising))
++ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
++
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+ MDIO_AN_VEND_PROV_1000BASET_HALF |
+- MDIO_AN_VEND_PROV_1000BASET_FULL, reg);
++ MDIO_AN_VEND_PROV_1000BASET_FULL |
++ MDIO_AN_VEND_PROV_2500BASET_FULL |
++ MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index cbe47eed7cc3c..ad9064df3debb 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2366,7 +2366,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
+ static void virtnet_freeze_down(struct virtio_device *vdev)
+ {
+ struct virtnet_info *vi = vdev->priv;
+- int i;
+
+ /* Make sure no work handler is accessing the device */
+ flush_work(&vi->config_work);
+@@ -2374,14 +2373,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
+ netif_tx_lock_bh(vi->dev);
+ netif_device_detach(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
+- cancel_delayed_work_sync(&vi->refill);
+-
+- if (netif_running(vi->dev)) {
+- for (i = 0; i < vi->max_queue_pairs; i++) {
+- napi_disable(&vi->rq[i].napi);
+- virtnet_napi_tx_disable(&vi->sq[i].napi);
+- }
+- }
++ if (netif_running(vi->dev))
++ virtnet_close(vi->dev);
+ }
+
+ static int init_vqs(struct virtnet_info *vi);
+@@ -2389,7 +2382,7 @@ static int init_vqs(struct virtnet_info *vi);
+ static int virtnet_restore_up(struct virtio_device *vdev)
+ {
+ struct virtnet_info *vi = vdev->priv;
+- int err, i;
++ int err;
+
+ err = init_vqs(vi);
+ if (err)
+@@ -2398,15 +2391,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
+ virtio_device_ready(vdev);
+
+ if (netif_running(vi->dev)) {
+- for (i = 0; i < vi->curr_queue_pairs; i++)
+- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
+- schedule_delayed_work(&vi->refill, 0);
+-
+- for (i = 0; i < vi->max_queue_pairs; i++) {
+- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+- &vi->sq[i].napi);
+- }
++ err = virtnet_open(vi->dev);
++ if (err)
++ return err;
+ }
+
+ netif_tx_lock_bh(vi->dev);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 0aa68da51ed70..af2902d70b196 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -531,36 +531,54 @@ EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
+
+ static inline void nvme_clear_nvme_request(struct request *req)
+ {
+- if (!(req->rq_flags & RQF_DONTPREP)) {
+- nvme_req(req)->retries = 0;
+- nvme_req(req)->flags = 0;
+- req->rq_flags |= RQF_DONTPREP;
+- }
++ nvme_req(req)->retries = 0;
++ nvme_req(req)->flags = 0;
++ req->rq_flags |= RQF_DONTPREP;
+ }
+
+-struct request *nvme_alloc_request(struct request_queue *q,
+- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
++static inline unsigned int nvme_req_op(struct nvme_command *cmd)
+ {
+- unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
+- struct request *req;
++ return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
++}
+
+- if (qid == NVME_QID_ANY) {
+- req = blk_mq_alloc_request(q, op, flags);
+- } else {
+- req = blk_mq_alloc_request_hctx(q, op, flags,
+- qid ? qid - 1 : 0);
+- }
+- if (IS_ERR(req))
+- return req;
++static inline void nvme_init_request(struct request *req,
++ struct nvme_command *cmd)
++{
++ if (req->q->queuedata)
++ req->timeout = NVME_IO_TIMEOUT;
++ else /* no queuedata implies admin queue */
++ req->timeout = ADMIN_TIMEOUT;
+
+ req->cmd_flags |= REQ_FAILFAST_DRIVER;
+ nvme_clear_nvme_request(req);
+ nvme_req(req)->cmd = cmd;
++}
++
++struct request *nvme_alloc_request(struct request_queue *q,
++ struct nvme_command *cmd, blk_mq_req_flags_t flags)
++{
++ struct request *req;
+
++ req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
++ if (!IS_ERR(req))
++ nvme_init_request(req, cmd);
+ return req;
+ }
+ EXPORT_SYMBOL_GPL(nvme_alloc_request);
+
++struct request *nvme_alloc_request_qid(struct request_queue *q,
++ struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
++{
++ struct request *req;
++
++ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
++ qid ? qid - 1 : 0);
++ if (!IS_ERR(req))
++ nvme_init_request(req, cmd);
++ return req;
++}
++EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
++
+ static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
+ {
+ struct nvme_command c;
+@@ -663,7 +681,7 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
+ req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
+ }
+
+-static void nvme_setup_passthrough(struct request *req,
++static inline void nvme_setup_passthrough(struct request *req,
+ struct nvme_command *cmd)
+ {
+ memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
+@@ -834,7 +852,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+ blk_status_t ret = BLK_STS_OK;
+
+- nvme_clear_nvme_request(req);
++ if (!(req->rq_flags & RQF_DONTPREP))
++ nvme_clear_nvme_request(req);
+
+ memset(cmd, 0, sizeof(*cmd));
+ switch (req_op(req)) {
+@@ -923,11 +942,15 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+ struct request *req;
+ int ret;
+
+- req = nvme_alloc_request(q, cmd, flags, qid);
++ if (qid == NVME_QID_ANY)
++ req = nvme_alloc_request(q, cmd, flags);
++ else
++ req = nvme_alloc_request_qid(q, cmd, flags, qid);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
++ if (timeout)
++ req->timeout = timeout;
+
+ if (buffer && bufflen) {
+ ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
+@@ -1093,11 +1116,12 @@ static int nvme_submit_user_cmd(struct request_queue *q,
+ void *meta = NULL;
+ int ret;
+
+- req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
++ req = nvme_alloc_request(q, cmd, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
++ if (timeout)
++ req->timeout = timeout;
+ nvme_req(req)->flags |= NVME_REQ_USERCMD;
+
+ if (ubuffer && bufflen) {
+@@ -1167,8 +1191,8 @@ static int nvme_keep_alive(struct nvme_ctrl *ctrl)
+ {
+ struct request *rq;
+
+- rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
+- NVME_QID_ANY);
++ rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
++ BLK_MQ_REQ_RESERVED);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+@@ -2675,6 +2699,34 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
+ .vid = 0x14a4,
+ .fr = "22301111",
+ .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
++ },
++ {
++ /*
++ * This Kioxia CD6-V Series / HPE PE8030 device times out and
++ * aborts I/O during any load, but more easily reproducible
++ * with discards (fstrim).
++ *
++ * The device is left in a state where it is also not possible
++ * to use "nvme set-feature" to disable APST, but booting with
++ * nvme_core.default_ps_max_latency=0 works.
++ */
++ .vid = 0x1e0f,
++ .mn = "KCD6XVUL6T40",
++ .quirks = NVME_QUIRK_NO_APST,
++ },
++ {
++ /*
++ * The external Samsung X5 SSD fails initialization without a
++ * delay before checking if it is ready and has a whole set of
++ * other problems. To make this even more interesting, it
++ * shares the PCI ID with internal Samsung 970 Evo Plus that
++ * does not need or want these quirks.
++ */
++ .vid = 0x144d,
++ .mn = "Samsung Portable SSD X5",
++ .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
++ NVME_QUIRK_NO_DEEPEST_PS |
++ NVME_QUIRK_IGNORE_DEV_SUBNQN,
+ }
+ };
+
+diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
+index 8e562d0f2c301..470cef3abec3d 100644
+--- a/drivers/nvme/host/lightnvm.c
++++ b/drivers/nvme/host/lightnvm.c
+@@ -653,7 +653,7 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q,
+
+ nvme_nvm_rqtocmd(rqd, ns, cmd);
+
+- rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
++ rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0);
+ if (IS_ERR(rq))
+ return rq;
+
+@@ -767,14 +767,14 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int ret = 0;
+
+- rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
+- NVME_QID_ANY);
++ rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0);
+ if (IS_ERR(rq)) {
+ ret = -ENOMEM;
+ goto err_cmd;
+ }
+
+- rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
++ if (timeout)
++ rq->timeout = timeout;
+
+ if (ppa_buf && ppa_len) {
+ ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 95b9657cabaf1..8e40a6306e53d 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -662,6 +662,8 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
+
+ #define NVME_QID_ANY -1
+ struct request *nvme_alloc_request(struct request_queue *q,
++ struct nvme_command *cmd, blk_mq_req_flags_t flags);
++struct request *nvme_alloc_request_qid(struct request_queue *q,
+ struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
+ void nvme_cleanup_cmd(struct request *req);
+ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 7de24a10dd921..9e633f4dcec71 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -224,6 +224,7 @@ struct nvme_queue {
+ */
+ struct nvme_iod {
+ struct nvme_request req;
++ struct nvme_command cmd;
+ struct nvme_queue *nvmeq;
+ bool use_sgl;
+ int aborted;
+@@ -917,7 +918,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ struct nvme_dev *dev = nvmeq->dev;
+ struct request *req = bd->rq;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+- struct nvme_command cmnd;
++ struct nvme_command *cmnd = &iod->cmd;
+ blk_status_t ret;
+
+ iod->aborted = 0;
+@@ -931,24 +932,24 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+ return BLK_STS_IOERR;
+
+- ret = nvme_setup_cmd(ns, req, &cmnd);
++ ret = nvme_setup_cmd(ns, req, cmnd);
+ if (ret)
+ return ret;
+
+ if (blk_rq_nr_phys_segments(req)) {
+- ret = nvme_map_data(dev, req, &cmnd);
++ ret = nvme_map_data(dev, req, cmnd);
+ if (ret)
+ goto out_free_cmd;
+ }
+
+ if (blk_integrity_rq(req)) {
+- ret = nvme_map_metadata(dev, req, &cmnd);
++ ret = nvme_map_metadata(dev, req, cmnd);
+ if (ret)
+ goto out_unmap_data;
+ }
+
+ blk_mq_start_request(req);
+- nvme_submit_cmd(nvmeq, &cmnd, bd->last);
++ nvme_submit_cmd(nvmeq, cmnd, bd->last);
+ return BLK_STS_OK;
+ out_unmap_data:
+ nvme_unmap_data(dev, req);
+@@ -1350,13 +1351,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
+ req->tag, nvmeq->qid);
+
+ abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
+- BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
++ BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(abort_req)) {
+ atomic_inc(&dev->ctrl.abort_limit);
+ return BLK_EH_RESET_TIMER;
+ }
+
+- abort_req->timeout = ADMIN_TIMEOUT;
+ abort_req->end_io_data = NULL;
+ blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
+
+@@ -2279,11 +2279,10 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
+ cmd.delete_queue.opcode = opcode;
+ cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
+
+- req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
++ req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+- req->timeout = ADMIN_TIMEOUT;
+ req->end_io_data = nvmeq;
+
+ init_completion(&nvmeq->delete_done);
+@@ -3266,10 +3265,6 @@ static const struct pci_device_id nvme_id_table[] = {
+ NVME_QUIRK_128_BYTES_SQES |
+ NVME_QUIRK_SHARED_TAGS |
+ NVME_QUIRK_SKIP_CID_GEN },
+- { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
+- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
+- NVME_QUIRK_NO_DEEPEST_PS |
+- NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { 0, }
+ };
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index 8ee94f0568983..d24251ece5023 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -244,7 +244,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
+ q = ns->queue;
+ }
+
+- rq = nvme_alloc_request(q, req->cmd, 0, NVME_QID_ANY);
++ rq = nvme_alloc_request(q, req->cmd, 0);
+ if (IS_ERR(rq)) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_ns;
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 6b00de6b6f0ef..5eb959b5f7010 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -2746,6 +2746,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
+ }
+ }
+
++static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
++ struct sdeb_zone_state *zsp)
++{
++ switch (zsp->z_cond) {
++ case ZC2_IMPLICIT_OPEN:
++ devip->nr_imp_open--;
++ break;
++ case ZC3_EXPLICIT_OPEN:
++ devip->nr_exp_open--;
++ break;
++ default:
++ WARN_ONCE(true, "Invalid zone %llu condition %x\n",
++ zsp->z_start, zsp->z_cond);
++ break;
++ }
++ zsp->z_cond = ZC5_FULL;
++}
++
+ static void zbc_inc_wp(struct sdebug_dev_info *devip,
+ unsigned long long lba, unsigned int num)
+ {
+@@ -2758,7 +2776,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
+ if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+ zsp->z_wp += num;
+ if (zsp->z_wp >= zend)
+- zsp->z_cond = ZC5_FULL;
++ zbc_set_zone_full(devip, zsp);
+ return;
+ }
+
+@@ -2777,7 +2795,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
+ n = num;
+ }
+ if (zsp->z_wp >= zend)
+- zsp->z_cond = ZC5_FULL;
++ zbc_set_zone_full(devip, zsp);
+
+ num -= n;
+ lba += n;
+diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+index b1062334e6089..c6ec7d95bcfcc 100644
+--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
++++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+@@ -780,6 +780,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+ }
+
+ ret = brcmstb_init_sram(dn);
++ of_node_put(dn);
+ if (ret) {
+ pr_err("error setting up SRAM for PM\n");
+ return ret;
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index a7ee1171eeb3e..0a6336d54a650 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -4625,16 +4625,8 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
+
+ if (op->data && font.charcount > op->charcount)
+ rc = -ENOSPC;
+- if (!(op->flags & KD_FONT_FLAG_OLD)) {
+- if (font.width > op->width || font.height > op->height)
+- rc = -ENOSPC;
+- } else {
+- if (font.width != 8)
+- rc = -EIO;
+- else if ((op->height && font.height > op->height) ||
+- font.height > 32)
+- rc = -ENOSPC;
+- }
++ if (font.width > op->width || font.height > op->height)
++ rc = -ENOSPC;
+ if (rc)
+ goto out;
+
+@@ -4662,7 +4654,7 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
+ return -EINVAL;
+ if (op->charcount > 512)
+ return -EINVAL;
+- if (op->width <= 0 || op->width > 32 || op->height > 32)
++ if (op->width <= 0 || op->width > 32 || !op->height || op->height > 32)
+ return -EINVAL;
+ size = (op->width+7)/8 * 32 * op->charcount;
+ if (size > max_font_size)
+@@ -4672,31 +4664,6 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
+ if (IS_ERR(font.data))
+ return PTR_ERR(font.data);
+
+- if (!op->height) { /* Need to guess font height [compat] */
+- int h, i;
+- u8 *charmap = font.data;
+-
+- /*
+- * If from KDFONTOP ioctl, don't allow things which can be done
+- * in userland,so that we can get rid of this soon
+- */
+- if (!(op->flags & KD_FONT_FLAG_OLD)) {
+- kfree(font.data);
+- return -EINVAL;
+- }
+-
+- for (h = 32; h > 0; h--)
+- for (i = 0; i < op->charcount; i++)
+- if (charmap[32*i+h-1])
+- goto nonzero;
+-
+- kfree(font.data);
+- return -EINVAL;
+-
+- nonzero:
+- op->height = h;
+- }
+-
+ font.charcount = op->charcount;
+ font.width = op->width;
+ font.height = op->height;
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index a9c6ea8986af0..b10b86e2c17e9 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -486,70 +486,6 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
+ return 0;
+ }
+
+-static inline int do_fontx_ioctl(struct vc_data *vc, int cmd,
+- struct consolefontdesc __user *user_cfd,
+- struct console_font_op *op)
+-{
+- struct consolefontdesc cfdarg;
+- int i;
+-
+- if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc)))
+- return -EFAULT;
+-
+- switch (cmd) {
+- case PIO_FONTX:
+- op->op = KD_FONT_OP_SET;
+- op->flags = KD_FONT_FLAG_OLD;
+- op->width = 8;
+- op->height = cfdarg.charheight;
+- op->charcount = cfdarg.charcount;
+- op->data = cfdarg.chardata;
+- return con_font_op(vc, op);
+-
+- case GIO_FONTX:
+- op->op = KD_FONT_OP_GET;
+- op->flags = KD_FONT_FLAG_OLD;
+- op->width = 8;
+- op->height = cfdarg.charheight;
+- op->charcount = cfdarg.charcount;
+- op->data = cfdarg.chardata;
+- i = con_font_op(vc, op);
+- if (i)
+- return i;
+- cfdarg.charheight = op->height;
+- cfdarg.charcount = op->charcount;
+- if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc)))
+- return -EFAULT;
+- return 0;
+- }
+- return -EINVAL;
+-}
+-
+-static int vt_io_fontreset(struct vc_data *vc, struct console_font_op *op)
+-{
+- int ret;
+-
+- if (__is_defined(BROKEN_GRAPHICS_PROGRAMS)) {
+- /*
+- * With BROKEN_GRAPHICS_PROGRAMS defined, the default font is
+- * not saved.
+- */
+- return -ENOSYS;
+- }
+-
+- op->op = KD_FONT_OP_SET_DEFAULT;
+- op->data = NULL;
+- ret = con_font_op(vc, op);
+- if (ret)
+- return ret;
+-
+- console_lock();
+- con_set_default_unimap(vc);
+- console_unlock();
+-
+- return 0;
+-}
+-
+ static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
+ bool perm, struct vc_data *vc)
+ {
+@@ -574,29 +510,7 @@ static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
+ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
+ bool perm)
+ {
+- struct console_font_op op; /* used in multiple places here */
+-
+ switch (cmd) {
+- case PIO_FONT:
+- if (!perm)
+- return -EPERM;
+- op.op = KD_FONT_OP_SET;
+- op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */
+- op.width = 8;
+- op.height = 0;
+- op.charcount = 256;
+- op.data = up;
+- return con_font_op(vc, &op);
+-
+- case GIO_FONT:
+- op.op = KD_FONT_OP_GET;
+- op.flags = KD_FONT_FLAG_OLD;
+- op.width = 8;
+- op.height = 32;
+- op.charcount = 256;
+- op.data = up;
+- return con_font_op(vc, &op);
+-
+ case PIO_CMAP:
+ if (!perm)
+ return -EPERM;
+@@ -605,20 +519,6 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
+ case GIO_CMAP:
+ return con_get_cmap(up);
+
+- case PIO_FONTX:
+- if (!perm)
+- return -EPERM;
+-
+- fallthrough;
+- case GIO_FONTX:
+- return do_fontx_ioctl(vc, cmd, up, &op);
+-
+- case PIO_FONTRESET:
+- if (!perm)
+- return -EPERM;
+-
+- return vt_io_fontreset(vc, &op);
+-
+ case PIO_SCRNMAP:
+ if (!perm)
+ return -EPERM;
+@@ -1099,54 +999,6 @@ void vc_SAK(struct work_struct *work)
+
+ #ifdef CONFIG_COMPAT
+
+-struct compat_consolefontdesc {
+- unsigned short charcount; /* characters in font (256 or 512) */
+- unsigned short charheight; /* scan lines per character (1-32) */
+- compat_caddr_t chardata; /* font data in expanded form */
+-};
+-
+-static inline int
+-compat_fontx_ioctl(struct vc_data *vc, int cmd,
+- struct compat_consolefontdesc __user *user_cfd,
+- int perm, struct console_font_op *op)
+-{
+- struct compat_consolefontdesc cfdarg;
+- int i;
+-
+- if (copy_from_user(&cfdarg, user_cfd, sizeof(struct compat_consolefontdesc)))
+- return -EFAULT;
+-
+- switch (cmd) {
+- case PIO_FONTX:
+- if (!perm)
+- return -EPERM;
+- op->op = KD_FONT_OP_SET;
+- op->flags = KD_FONT_FLAG_OLD;
+- op->width = 8;
+- op->height = cfdarg.charheight;
+- op->charcount = cfdarg.charcount;
+- op->data = compat_ptr(cfdarg.chardata);
+- return con_font_op(vc, op);
+-
+- case GIO_FONTX:
+- op->op = KD_FONT_OP_GET;
+- op->flags = KD_FONT_FLAG_OLD;
+- op->width = 8;
+- op->height = cfdarg.charheight;
+- op->charcount = cfdarg.charcount;
+- op->data = compat_ptr(cfdarg.chardata);
+- i = con_font_op(vc, op);
+- if (i)
+- return i;
+- cfdarg.charheight = op->height;
+- cfdarg.charcount = op->charcount;
+- if (copy_to_user(user_cfd, &cfdarg, sizeof(struct compat_consolefontdesc)))
+- return -EFAULT;
+- return 0;
+- }
+- return -EINVAL;
+-}
+-
+ struct compat_console_font_op {
+ compat_uint_t op; /* operation code KD_FONT_OP_* */
+ compat_uint_t flags; /* KD_FONT_FLAG_* */
+@@ -1223,9 +1075,6 @@ long vt_compat_ioctl(struct tty_struct *tty,
+ /*
+ * these need special handlers for incompatible data structures
+ */
+- case PIO_FONTX:
+- case GIO_FONTX:
+- return compat_fontx_ioctl(vc, cmd, up, perm, &op);
+
+ case KDFONTOP:
+ return compat_kdfontop_ioctl(up, perm, &op, vc);
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 5f35cdd2cf1dd..67d8da04848ec 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1034,6 +1034,9 @@ isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
+ struct ci_hdrc *ci = req->context;
+ unsigned long flags;
+
++ if (req->status < 0)
++ return;
++
+ if (ci->setaddr) {
+ hw_usb_set_address(ci, ci->address);
+ ci->setaddr = false;
+diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
+index 34cecd3660bfc..b496ca937deed 100644
+--- a/drivers/usb/gadget/legacy/raw_gadget.c
++++ b/drivers/usb/gadget/legacy/raw_gadget.c
+@@ -10,6 +10,7 @@
+ #include <linux/ctype.h>
+ #include <linux/debugfs.h>
+ #include <linux/delay.h>
++#include <linux/idr.h>
+ #include <linux/kref.h>
+ #include <linux/miscdevice.h>
+ #include <linux/module.h>
+@@ -35,6 +36,9 @@ MODULE_LICENSE("GPL");
+
+ /*----------------------------------------------------------------------*/
+
++static DEFINE_IDA(driver_id_numbers);
++#define DRIVER_DRIVER_NAME_LENGTH_MAX 32
++
+ #define RAW_EVENT_QUEUE_SIZE 16
+
+ struct raw_event_queue {
+@@ -160,6 +164,9 @@ struct raw_dev {
+ /* Reference to misc device: */
+ struct device *dev;
+
++ /* Make driver names unique */
++ int driver_id_number;
++
+ /* Protected by lock: */
+ enum dev_state state;
+ bool gadget_registered;
+@@ -188,6 +195,7 @@ static struct raw_dev *dev_new(void)
+ spin_lock_init(&dev->lock);
+ init_completion(&dev->ep0_done);
+ raw_event_queue_init(&dev->queue);
++ dev->driver_id_number = -1;
+ return dev;
+ }
+
+@@ -198,6 +206,9 @@ static void dev_free(struct kref *kref)
+
+ kfree(dev->udc_name);
+ kfree(dev->driver.udc_name);
++ kfree(dev->driver.driver.name);
++ if (dev->driver_id_number >= 0)
++ ida_free(&driver_id_numbers, dev->driver_id_number);
+ if (dev->req) {
+ if (dev->ep0_urb_queued)
+ usb_ep_dequeue(dev->gadget->ep0, dev->req);
+@@ -418,9 +429,11 @@ out_put:
+ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
+ {
+ int ret = 0;
++ int driver_id_number;
+ struct usb_raw_init arg;
+ char *udc_driver_name;
+ char *udc_device_name;
++ char *driver_driver_name;
+ unsigned long flags;
+
+ if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
+@@ -439,36 +452,43 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
+ return -EINVAL;
+ }
+
++ driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
++ if (driver_id_number < 0)
++ return driver_id_number;
++
++ driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
++ if (!driver_driver_name) {
++ ret = -ENOMEM;
++ goto out_free_driver_id_number;
++ }
++ snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
++ DRIVER_NAME ".%d", driver_id_number);
++
+ udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
+- if (!udc_driver_name)
+- return -ENOMEM;
++ if (!udc_driver_name) {
++ ret = -ENOMEM;
++ goto out_free_driver_driver_name;
++ }
+ ret = strscpy(udc_driver_name, &arg.driver_name[0],
+ UDC_NAME_LENGTH_MAX);
+- if (ret < 0) {
+- kfree(udc_driver_name);
+- return ret;
+- }
++ if (ret < 0)
++ goto out_free_udc_driver_name;
+ ret = 0;
+
+ udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
+ if (!udc_device_name) {
+- kfree(udc_driver_name);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto out_free_udc_driver_name;
+ }
+ ret = strscpy(udc_device_name, &arg.device_name[0],
+ UDC_NAME_LENGTH_MAX);
+- if (ret < 0) {
+- kfree(udc_driver_name);
+- kfree(udc_device_name);
+- return ret;
+- }
++ if (ret < 0)
++ goto out_free_udc_device_name;
+ ret = 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_OPENED) {
+ dev_dbg(dev->dev, "fail, device is not opened\n");
+- kfree(udc_driver_name);
+- kfree(udc_device_name);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+@@ -483,14 +503,25 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
+ dev->driver.suspend = gadget_suspend;
+ dev->driver.resume = gadget_resume;
+ dev->driver.reset = gadget_reset;
+- dev->driver.driver.name = DRIVER_NAME;
++ dev->driver.driver.name = driver_driver_name;
+ dev->driver.udc_name = udc_device_name;
+ dev->driver.match_existing_only = 1;
++ dev->driver_id_number = driver_id_number;
+
+ dev->state = STATE_DEV_INITIALIZED;
++ spin_unlock_irqrestore(&dev->lock, flags);
++ return ret;
+
+ out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
++out_free_udc_device_name:
++ kfree(udc_device_name);
++out_free_udc_driver_name:
++ kfree(udc_driver_name);
++out_free_driver_driver_name:
++ kfree(driver_driver_name);
++out_free_driver_id_number:
++ ida_free(&driver_id_numbers, driver_id_number);
+ return ret;
+ }
+
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 1eb3b5deb940e..94adae8b19f00 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -566,7 +566,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
+ * It will release and re-aquire the lock while calling ACPI
+ * method.
+ */
+-static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
++void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
+ u16 index, bool on, unsigned long *flags)
+ __must_hold(&xhci->lock)
+ {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 886279755804e..8952492d43be6 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -61,6 +61,8 @@
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
++#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI 0xa71e
++#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI 0x7ec0
+
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
+@@ -265,7 +267,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI ||
+- pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
++ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI))
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index a1ed5e0d06128..997de5f294f15 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -775,6 +775,8 @@ static void xhci_stop(struct usb_hcd *hcd)
+ void xhci_shutdown(struct usb_hcd *hcd)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
++ unsigned long flags;
++ int i;
+
+ if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
+ usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
+@@ -790,12 +792,21 @@ void xhci_shutdown(struct usb_hcd *hcd)
+ del_timer_sync(&xhci->shared_hcd->rh_timer);
+ }
+
+- spin_lock_irq(&xhci->lock);
++ spin_lock_irqsave(&xhci->lock, flags);
+ xhci_halt(xhci);
++
++ /* Power off USB2 ports*/
++ for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
++ xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
++
++ /* Power off USB3 ports*/
++ for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
++ xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
++
+ /* Workaround for spurious wakeups at shutdown with HSW */
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
+- spin_unlock_irq(&xhci->lock);
++ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ xhci_cleanup_msix(xhci);
+
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index a46bbf5beffa9..0c66424b34ba9 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -2162,6 +2162,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
+ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
++void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
++ bool on, unsigned long *flags);
+
+ void xhci_hc_died(struct xhci_hcd *xhci);
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 3744cde5146f4..44e06b95584e5 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -252,10 +252,12 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EG95 0x0195
+ #define QUECTEL_PRODUCT_BG96 0x0296
+ #define QUECTEL_PRODUCT_EP06 0x0306
++#define QUECTEL_PRODUCT_EM05G 0x030a
+ #define QUECTEL_PRODUCT_EM12 0x0512
+ #define QUECTEL_PRODUCT_RM500Q 0x0800
+ #define QUECTEL_PRODUCT_EC200S_CN 0x6002
+ #define QUECTEL_PRODUCT_EC200T 0x6026
++#define QUECTEL_PRODUCT_RM500K 0x7001
+
+ #define CMOTECH_VENDOR_ID 0x16d8
+ #define CMOTECH_PRODUCT_6001 0x6001
+@@ -1134,6 +1136,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
++ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
++ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+@@ -1147,6 +1151,7 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
+
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+@@ -1279,6 +1284,7 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
++ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) }, /* Telit LE910Cx (rmnet) */
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
+diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
+index 557f392fe24da..073fd2ea5e0bb 100644
+--- a/drivers/usb/typec/tcpm/Kconfig
++++ b/drivers/usb/typec/tcpm/Kconfig
+@@ -56,7 +56,6 @@ config TYPEC_WCOVE
+ tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
+ depends on ACPI
+ depends on MFD_INTEL_PMC_BXT
+- depends on INTEL_SOC_PMIC
+ depends on BXT_WC_PMIC_OPREGION
+ help
+ This driver adds support for USB Type-C on Intel Broxton platforms
+diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
+index 77622ef401d8f..68fb531f245a5 100644
+--- a/drivers/video/console/sticore.c
++++ b/drivers/video/console/sticore.c
+@@ -1127,6 +1127,7 @@ int sti_call(const struct sti_struct *sti, unsigned long func,
+ return ret;
+ }
+
++#if defined(CONFIG_FB_STI)
+ /* check if given fb_info is the primary device */
+ int fb_is_primary_device(struct fb_info *info)
+ {
+@@ -1142,6 +1143,7 @@ int fb_is_primary_device(struct fb_info *info)
+ return (sti->info == info);
+ }
+ EXPORT_SYMBOL(fb_is_primary_device);
++#endif
+
+ MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
+ MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
+diff --git a/drivers/xen/features.c b/drivers/xen/features.c
+index 25c053b096051..2c306de228db3 100644
+--- a/drivers/xen/features.c
++++ b/drivers/xen/features.c
+@@ -29,6 +29,6 @@ void xen_setup_features(void)
+ if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
+ break;
+ for (j = 0; j < 32; j++)
+- xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
++ xen_features[i * 32 + j] = !!(fi.submap & 1U << j);
+ }
+ }
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 7e7a9454bcb9d..826fae22a8cc9 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -734,7 +734,8 @@ int afs_getattr(const struct path *path, struct kstat *stat,
+
+ _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
+
+- if (!(query_flags & AT_STATX_DONT_SYNC) &&
++ if (vnode->volume &&
++ !(query_flags & AT_STATX_DONT_SYNC) &&
+ !test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+ key = afs_request_key(vnode->volume->cell);
+ if (IS_ERR(key))
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 2663485c17cb8..8bf8cdb62a3af 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -652,6 +652,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ compress_force = false;
+ no_compress++;
+ } else {
++ btrfs_err(info, "unrecognized compression value %s",
++ args[0].from);
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -710,8 +712,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ case Opt_thread_pool:
+ ret = match_int(&args[0], &intarg);
+ if (ret) {
++ btrfs_err(info, "unrecognized thread_pool value %s",
++ args[0].from);
+ goto out;
+ } else if (intarg == 0) {
++ btrfs_err(info, "invalid value 0 for thread_pool");
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -772,8 +777,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ break;
+ case Opt_ratio:
+ ret = match_int(&args[0], &intarg);
+- if (ret)
++ if (ret) {
++ btrfs_err(info, "unrecognized metadata_ratio value %s",
++ args[0].from);
+ goto out;
++ }
+ info->metadata_ratio = intarg;
+ btrfs_info(info, "metadata ratio %u",
+ info->metadata_ratio);
+@@ -790,6 +798,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ btrfs_set_and_info(info, DISCARD_ASYNC,
+ "turning on async discard");
+ } else {
++ btrfs_err(info, "unrecognized discard mode value %s",
++ args[0].from);
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -814,6 +824,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ btrfs_set_and_info(info, FREE_SPACE_TREE,
+ "enabling free space tree");
+ } else {
++ btrfs_err(info, "unrecognized space_cache value %s",
++ args[0].from);
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -889,8 +901,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ break;
+ case Opt_check_integrity_print_mask:
+ ret = match_int(&args[0], &intarg);
+- if (ret)
++ if (ret) {
++ btrfs_err(info,
++ "unrecognized check_integrity_print_mask value %s",
++ args[0].from);
+ goto out;
++ }
+ info->check_integrity_print_mask = intarg;
+ btrfs_info(info, "check_integrity_print_mask 0x%x",
+ info->check_integrity_print_mask);
+@@ -905,13 +921,15 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ goto out;
+ #endif
+ case Opt_fatal_errors:
+- if (strcmp(args[0].from, "panic") == 0)
++ if (strcmp(args[0].from, "panic") == 0) {
+ btrfs_set_opt(info->mount_opt,
+ PANIC_ON_FATAL_ERROR);
+- else if (strcmp(args[0].from, "bug") == 0)
++ } else if (strcmp(args[0].from, "bug") == 0) {
+ btrfs_clear_opt(info->mount_opt,
+ PANIC_ON_FATAL_ERROR);
+- else {
++ } else {
++ btrfs_err(info, "unrecognized fatal_errors value %s",
++ args[0].from);
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -919,8 +937,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ case Opt_commit_interval:
+ intarg = 0;
+ ret = match_int(&args[0], &intarg);
+- if (ret)
++ if (ret) {
++ btrfs_err(info, "unrecognized commit_interval value %s",
++ args[0].from);
++ ret = -EINVAL;
+ goto out;
++ }
+ if (intarg == 0) {
+ btrfs_info(info,
+ "using default commit interval %us",
+@@ -934,8 +956,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ break;
+ case Opt_rescue:
+ ret = parse_rescue_options(info, args[0].from);
+- if (ret < 0)
++ if (ret < 0) {
++ btrfs_err(info, "unrecognized rescue value %s",
++ args[0].from);
+ goto out;
++ }
+ break;
+ #ifdef CONFIG_BTRFS_DEBUG
+ case Opt_fragment_all:
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 6ae2beabe578d..72b109685db47 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -91,8 +91,6 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
+ if (test_opt(sbi, INLINE_XATTR))
+ set_inode_flag(inode, FI_INLINE_XATTR);
+
+- if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
+- set_inode_flag(inode, FI_INLINE_DATA);
+ if (f2fs_may_inline_dentry(inode))
+ set_inode_flag(inode, FI_INLINE_DENTRY);
+
+@@ -109,10 +107,6 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
+
+ f2fs_init_extent_tree(inode, NULL);
+
+- stat_inc_inline_xattr(inode);
+- stat_inc_inline_inode(inode);
+- stat_inc_inline_dir(inode);
+-
+ F2FS_I(inode)->i_flags =
+ f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
+
+@@ -129,6 +123,14 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
+ set_compress_context(inode);
+ }
+
++ /* Should enable inline_data after compression set */
++ if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
++ set_inode_flag(inode, FI_INLINE_DATA);
++
++ stat_inc_inline_xattr(inode);
++ stat_inc_inline_inode(inode);
++ stat_inc_inline_dir(inode);
++
+ f2fs_set_inode_flags(inode);
+
+ trace_f2fs_new_inode(inode, 0);
+@@ -317,6 +319,9 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
+ if (!is_extension_exist(name, ext[i], false))
+ continue;
+
++ /* Do not use inline_data with compression */
++ stat_dec_inline_inode(inode);
++ clear_inode_flag(inode, FI_INLINE_DATA);
+ set_compress_context(inode);
+ return;
+ }
+diff --git a/include/linux/kd.h b/include/linux/kd.h
+deleted file mode 100644
+index b130a18f860f0..0000000000000
+--- a/include/linux/kd.h
++++ /dev/null
+@@ -1,8 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _LINUX_KD_H
+-#define _LINUX_KD_H
+-
+-#include <uapi/linux/kd.h>
+-
+-#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */
+-#endif /* _LINUX_KD_H */
+diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
+index b676aa419eef8..f0e535f199bef 100644
+--- a/include/linux/ratelimit_types.h
++++ b/include/linux/ratelimit_types.h
+@@ -23,12 +23,16 @@ struct ratelimit_state {
+ unsigned long flags;
+ };
+
+-#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
+- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+- .interval = interval_init, \
+- .burst = burst_init, \
++#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
++ .interval = interval_init, \
++ .burst = burst_init, \
++ .flags = flags_init, \
+ }
+
++#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
++ RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0)
++
+ #define RATELIMIT_STATE_INIT_DISABLED \
+ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index b7907385a02ff..b9948e7861f22 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -203,11 +203,11 @@ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
+ unsigned int nft_parse_register(const struct nlattr *attr);
+ int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
+
+-int nft_validate_register_load(enum nft_registers reg, unsigned int len);
+-int nft_validate_register_store(const struct nft_ctx *ctx,
+- enum nft_registers reg,
+- const struct nft_data *data,
+- enum nft_data_types type, unsigned int len);
++int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len);
++int nft_parse_register_store(const struct nft_ctx *ctx,
++ const struct nlattr *attr, u8 *dreg,
++ const struct nft_data *data,
++ enum nft_data_types type, unsigned int len);
+
+ /**
+ * struct nft_userdata - user defined data associated with an object
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index 8657e6815b07c..fd10a7862fdc6 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -26,21 +26,21 @@ void nf_tables_core_module_exit(void);
+ struct nft_bitwise_fast_expr {
+ u32 mask;
+ u32 xor;
+- enum nft_registers sreg:8;
+- enum nft_registers dreg:8;
++ u8 sreg;
++ u8 dreg;
+ };
+
+ struct nft_cmp_fast_expr {
+ u32 data;
+ u32 mask;
+- enum nft_registers sreg:8;
++ u8 sreg;
+ u8 len;
+ bool inv;
+ };
+
+ struct nft_immediate_expr {
+ struct nft_data data;
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u8 dlen;
+ };
+
+@@ -60,14 +60,14 @@ struct nft_payload {
+ enum nft_payload_bases base:8;
+ u8 offset;
+ u8 len;
+- enum nft_registers dreg:8;
++ u8 dreg;
+ };
+
+ struct nft_payload_set {
+ enum nft_payload_bases base:8;
+ u8 offset;
+ u8 len;
+- enum nft_registers sreg:8;
++ u8 sreg;
+ u8 csum_type;
+ u8 csum_offset;
+ u8 csum_flags;
+diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
+index 628b6fa579cd8..237f3757637e1 100644
+--- a/include/net/netfilter/nft_fib.h
++++ b/include/net/netfilter/nft_fib.h
+@@ -5,7 +5,7 @@
+ #include <net/netfilter/nf_tables.h>
+
+ struct nft_fib {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u8 result;
+ u32 flags;
+ };
+diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
+index 07e2fd507963a..2dce55c736f40 100644
+--- a/include/net/netfilter/nft_meta.h
++++ b/include/net/netfilter/nft_meta.h
+@@ -7,8 +7,8 @@
+ struct nft_meta {
+ enum nft_meta_keys key:8;
+ union {
+- enum nft_registers dreg:8;
+- enum nft_registers sreg:8;
++ u8 dreg;
++ u8 sreg;
+ };
+ };
+
+diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h
+index ab69434e2329e..72e785a903b65 100644
+--- a/include/trace/events/libata.h
++++ b/include/trace/events/libata.h
+@@ -249,6 +249,7 @@ DECLARE_EVENT_CLASS(ata_qc_complete_template,
+ __entry->hob_feature = qc->result_tf.hob_feature;
+ __entry->nsect = qc->result_tf.nsect;
+ __entry->hob_nsect = qc->result_tf.hob_nsect;
++ __entry->flags = qc->flags;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \
+diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
+index 8e8ffac037cd4..97805ec424c19 100644
+--- a/net/bridge/netfilter/nft_meta_bridge.c
++++ b/net/bridge/netfilter/nft_meta_bridge.c
+@@ -87,9 +87,8 @@ static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
+ return nft_meta_get_init(ctx, expr, tb);
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+
+ static struct nft_expr_type nft_meta_bridge_type;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index d348f1d3fb8fc..246947fbc9581 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -5982,10 +5982,21 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ ifindex, proto, netns_id, flags);
+
+ if (sk) {
+- sk = sk_to_full_sk(sk);
+- if (!sk_fullsock(sk)) {
++ struct sock *sk2 = sk_to_full_sk(sk);
++
++ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
++ * sock refcnt is decremented to prevent a request_sock leak.
++ */
++ if (!sk_fullsock(sk2))
++ sk2 = NULL;
++ if (sk2 != sk) {
+ sock_gen_put(sk);
+- return NULL;
++ /* Ensure there is no need to bump sk2 refcnt */
++ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
++ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
++ return NULL;
++ }
++ sk = sk2;
+ }
+ }
+
+@@ -6019,10 +6030,21 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ flags);
+
+ if (sk) {
+- sk = sk_to_full_sk(sk);
+- if (!sk_fullsock(sk)) {
++ struct sock *sk2 = sk_to_full_sk(sk);
++
++ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
++ * sock refcnt is decremented to prevent a request_sock leak.
++ */
++ if (!sk_fullsock(sk2))
++ sk2 = NULL;
++ if (sk2 != sk) {
+ sock_gen_put(sk);
+- return NULL;
++ /* Ensure there is no need to bump sk2 refcnt */
++ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
++ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
++ return NULL;
++ }
++ sk = sk2;
+ }
+ }
+
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index a7e32be8714f5..6ab5c50aa7a87 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -519,7 +519,6 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+ int tunnel_hlen;
+ int version;
+ int nhoff;
+- int thoff;
+
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+@@ -553,10 +552,16 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+ (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
+ truncate = true;
+
+- thoff = skb_transport_header(skb) - skb_mac_header(skb);
+- if (skb->protocol == htons(ETH_P_IPV6) &&
+- (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
+- truncate = true;
++ if (skb->protocol == htons(ETH_P_IPV6)) {
++ int thoff;
++
++ if (skb_transport_header_was_set(skb))
++ thoff = skb_transport_header(skb) - skb_mac_header(skb);
++ else
++ thoff = nhoff + sizeof(struct ipv6hdr);
++ if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
++ truncate = true;
++ }
+
+ if (version == 1) {
+ erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
+diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
+index bcdb37f86a949..aeb631760eb9e 100644
+--- a/net/ipv4/netfilter/nft_dup_ipv4.c
++++ b/net/ipv4/netfilter/nft_dup_ipv4.c
+@@ -13,8 +13,8 @@
+ #include <net/netfilter/ipv4/nf_dup_ipv4.h>
+
+ struct nft_dup_ipv4 {
+- enum nft_registers sreg_addr:8;
+- enum nft_registers sreg_dev:8;
++ u8 sreg_addr;
++ u8 sreg_dev;
+ };
+
+ static void nft_dup_ipv4_eval(const struct nft_expr *expr,
+@@ -40,16 +40,16 @@ static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+ return -EINVAL;
+
+- priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+- err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in_addr));
++ err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
++ sizeof(struct in_addr));
+ if (err < 0)
+ return err;
+
+- if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+- priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+- }
+- return 0;
++ if (tb[NFTA_DUP_SREG_DEV])
++ err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
++ &priv->sreg_dev, sizeof(int));
++
++ return err;
+ }
+
+ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 3f88ba6555ab8..9e0890738d93f 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -944,7 +944,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ __be16 proto;
+ __u32 mtu;
+ int nhoff;
+- int thoff;
+
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+@@ -965,10 +964,16 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
+ truncate = true;
+
+- thoff = skb_transport_header(skb) - skb_mac_header(skb);
+- if (skb->protocol == htons(ETH_P_IPV6) &&
+- (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
+- truncate = true;
++ if (skb->protocol == htons(ETH_P_IPV6)) {
++ int thoff;
++
++ if (skb_transport_header_was_set(skb))
++ thoff = skb_transport_header(skb) - skb_mac_header(skb);
++ else
++ thoff = nhoff + sizeof(struct ipv6hdr);
++ if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
++ truncate = true;
++ }
+
+ if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
+ goto tx_err;
+diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
+index 8b5193efb1f1b..3a00d95e964e9 100644
+--- a/net/ipv6/netfilter/nft_dup_ipv6.c
++++ b/net/ipv6/netfilter/nft_dup_ipv6.c
+@@ -13,8 +13,8 @@
+ #include <net/netfilter/ipv6/nf_dup_ipv6.h>
+
+ struct nft_dup_ipv6 {
+- enum nft_registers sreg_addr:8;
+- enum nft_registers sreg_dev:8;
++ u8 sreg_addr;
++ u8 sreg_dev;
+ };
+
+ static void nft_dup_ipv6_eval(const struct nft_expr *expr,
+@@ -38,16 +38,16 @@ static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+ return -EINVAL;
+
+- priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+- err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in6_addr));
++ err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
++ sizeof(struct in6_addr));
+ if (err < 0)
+ return err;
+
+- if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+- priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+- }
+- return 0;
++ if (tb[NFTA_DUP_SREG_DEV])
++ err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
++ &priv->sreg_dev, sizeof(int));
++
++ return err;
+ }
+
+ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 0c56a90c3f086..3c17fadaab5fa 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4414,6 +4414,12 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
+ return nft_delset(&ctx, set);
+ }
+
++static int nft_validate_register_store(const struct nft_ctx *ctx,
++ enum nft_registers reg,
++ const struct nft_data *data,
++ enum nft_data_types type,
++ unsigned int len);
++
+ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
+ struct nft_set *set,
+ const struct nft_set_iter *iter,
+@@ -8514,7 +8520,7 @@ EXPORT_SYMBOL_GPL(nft_dump_register);
+ * Validate that the input register is one of the general purpose
+ * registers and that the length of the load is within the bounds.
+ */
+-int nft_validate_register_load(enum nft_registers reg, unsigned int len)
++static int nft_validate_register_load(enum nft_registers reg, unsigned int len)
+ {
+ if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
+ return -EINVAL;
+@@ -8525,7 +8531,21 @@ int nft_validate_register_load(enum nft_registers reg, unsigned int len)
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(nft_validate_register_load);
++
++int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len)
++{
++ u32 reg;
++ int err;
++
++ reg = nft_parse_register(attr);
++ err = nft_validate_register_load(reg, len);
++ if (err < 0)
++ return err;
++
++ *sreg = reg;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(nft_parse_register_load);
+
+ /**
+ * nft_validate_register_store - validate an expressions' register store
+@@ -8541,10 +8561,11 @@ EXPORT_SYMBOL_GPL(nft_validate_register_load);
+ * A value of NULL for the data means that its runtime gathered
+ * data.
+ */
+-int nft_validate_register_store(const struct nft_ctx *ctx,
+- enum nft_registers reg,
+- const struct nft_data *data,
+- enum nft_data_types type, unsigned int len)
++static int nft_validate_register_store(const struct nft_ctx *ctx,
++ enum nft_registers reg,
++ const struct nft_data *data,
++ enum nft_data_types type,
++ unsigned int len)
+ {
+ int err;
+
+@@ -8576,7 +8597,24 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
+ return 0;
+ }
+ }
+-EXPORT_SYMBOL_GPL(nft_validate_register_store);
++
++int nft_parse_register_store(const struct nft_ctx *ctx,
++ const struct nlattr *attr, u8 *dreg,
++ const struct nft_data *data,
++ enum nft_data_types type, unsigned int len)
++{
++ int err;
++ u32 reg;
++
++ reg = nft_parse_register(attr);
++ err = nft_validate_register_store(ctx, reg, data, type, len);
++ if (err < 0)
++ return err;
++
++ *dreg = reg;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(nft_parse_register_store);
+
+ static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
+ [NFTA_VERDICT_CODE] = { .type = NLA_U32 },
+diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
+index bbd773d743773..47b0dba95054f 100644
+--- a/net/netfilter/nft_bitwise.c
++++ b/net/netfilter/nft_bitwise.c
+@@ -16,8 +16,8 @@
+ #include <net/netfilter/nf_tables_offload.h>
+
+ struct nft_bitwise {
+- enum nft_registers sreg:8;
+- enum nft_registers dreg:8;
++ u8 sreg;
++ u8 dreg;
+ enum nft_bitwise_ops op:8;
+ u8 len;
+ struct nft_data mask;
+@@ -169,14 +169,14 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
+
+ priv->len = len;
+
+- priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
+- err = nft_validate_register_load(priv->sreg, priv->len);
++ err = nft_parse_register_load(tb[NFTA_BITWISE_SREG], &priv->sreg,
++ priv->len);
+ if (err < 0)
+ return err;
+
+- priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, priv->len);
++ err = nft_parse_register_store(ctx, tb[NFTA_BITWISE_DREG],
++ &priv->dreg, NULL, NFT_DATA_VALUE,
++ priv->len);
+ if (err < 0)
+ return err;
+
+@@ -315,14 +315,13 @@ static int nft_bitwise_fast_init(const struct nft_ctx *ctx,
+ struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
+ int err;
+
+- priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
+- err = nft_validate_register_load(priv->sreg, sizeof(u32));
++ err = nft_parse_register_load(tb[NFTA_BITWISE_SREG], &priv->sreg,
++ sizeof(u32));
+ if (err < 0)
+ return err;
+
+- priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, sizeof(u32));
++ err = nft_parse_register_store(ctx, tb[NFTA_BITWISE_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, sizeof(u32));
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
+index 12bed3f7bbc6d..9d5947ab8d4ef 100644
+--- a/net/netfilter/nft_byteorder.c
++++ b/net/netfilter/nft_byteorder.c
+@@ -16,8 +16,8 @@
+ #include <net/netfilter/nf_tables.h>
+
+ struct nft_byteorder {
+- enum nft_registers sreg:8;
+- enum nft_registers dreg:8;
++ u8 sreg;
++ u8 dreg;
+ enum nft_byteorder_ops op:8;
+ u8 len;
+ u8 size;
+@@ -131,20 +131,20 @@ static int nft_byteorder_init(const struct nft_ctx *ctx,
+ return -EINVAL;
+ }
+
+- priv->sreg = nft_parse_register(tb[NFTA_BYTEORDER_SREG]);
+ err = nft_parse_u32_check(tb[NFTA_BYTEORDER_LEN], U8_MAX, &len);
+ if (err < 0)
+ return err;
+
+ priv->len = len;
+
+- err = nft_validate_register_load(priv->sreg, priv->len);
++ err = nft_parse_register_load(tb[NFTA_BYTEORDER_SREG], &priv->sreg,
++ priv->len);
+ if (err < 0)
+ return err;
+
+- priv->dreg = nft_parse_register(tb[NFTA_BYTEORDER_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, priv->len);
++ return nft_parse_register_store(ctx, tb[NFTA_BYTEORDER_DREG],
++ &priv->dreg, NULL, NFT_DATA_VALUE,
++ priv->len);
+ }
+
+ static int nft_byteorder_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
+index 1d42d06f5b64b..b529c0e865466 100644
+--- a/net/netfilter/nft_cmp.c
++++ b/net/netfilter/nft_cmp.c
+@@ -18,7 +18,7 @@
+
+ struct nft_cmp_expr {
+ struct nft_data data;
+- enum nft_registers sreg:8;
++ u8 sreg;
+ u8 len;
+ enum nft_cmp_ops op:8;
+ };
+@@ -87,8 +87,7 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ return err;
+ }
+
+- priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
+- err = nft_validate_register_load(priv->sreg, desc.len);
++ err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
+ if (err < 0)
+ return err;
+
+@@ -211,8 +210,7 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
+ if (err < 0)
+ return err;
+
+- priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
+- err = nft_validate_register_load(priv->sreg, desc.len);
++ err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 7fcb73ac2e6ed..781118465d466 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -27,8 +27,8 @@ struct nft_ct {
+ enum nft_ct_keys key:8;
+ enum ip_conntrack_dir dir:8;
+ union {
+- enum nft_registers dreg:8;
+- enum nft_registers sreg:8;
++ u8 dreg;
++ u8 sreg;
+ };
+ };
+
+@@ -499,9 +499,8 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
+ }
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_CT_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL,
++ NFT_DATA_VALUE, len);
+ if (err < 0)
+ return err;
+
+@@ -608,8 +607,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
+ }
+ }
+
+- priv->sreg = nft_parse_register(tb[NFTA_CT_SREG]);
+- err = nft_validate_register_load(priv->sreg, len);
++ err = nft_parse_register_load(tb[NFTA_CT_SREG], &priv->sreg, len);
+ if (err < 0)
+ goto err1;
+
+diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
+index 70c457476b874..5b5c607fbf83f 100644
+--- a/net/netfilter/nft_dup_netdev.c
++++ b/net/netfilter/nft_dup_netdev.c
+@@ -14,7 +14,7 @@
+ #include <net/netfilter/nf_dup_netdev.h>
+
+ struct nft_dup_netdev {
+- enum nft_registers sreg_dev:8;
++ u8 sreg_dev;
+ };
+
+ static void nft_dup_netdev_eval(const struct nft_expr *expr,
+@@ -40,8 +40,8 @@ static int nft_dup_netdev_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_DUP_SREG_DEV] == NULL)
+ return -EINVAL;
+
+- priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
++ return nft_parse_register_load(tb[NFTA_DUP_SREG_DEV], &priv->sreg_dev,
++ sizeof(int));
+ }
+
+ static int nft_dup_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 58904bee1a0df..8c45e01fecdd8 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -16,8 +16,8 @@ struct nft_dynset {
+ struct nft_set *set;
+ struct nft_set_ext_tmpl tmpl;
+ enum nft_dynset_ops op:8;
+- enum nft_registers sreg_key:8;
+- enum nft_registers sreg_data:8;
++ u8 sreg_key;
++ u8 sreg_data;
+ bool invert;
+ u64 timeout;
+ struct nft_expr *expr;
+@@ -154,8 +154,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ return err;
+ }
+
+- priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
+- err = nft_validate_register_load(priv->sreg_key, set->klen);
++ err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_KEY], &priv->sreg_key,
++ set->klen);
+ if (err < 0)
+ return err;
+
+@@ -165,8 +165,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ if (set->dtype == NFT_DATA_VERDICT)
+ return -EOPNOTSUPP;
+
+- priv->sreg_data = nft_parse_register(tb[NFTA_DYNSET_SREG_DATA]);
+- err = nft_validate_register_load(priv->sreg_data, set->dlen);
++ err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_DATA],
++ &priv->sreg_data, set->dlen);
+ if (err < 0)
+ return err;
+ } else if (set->flags & NFT_SET_MAP)
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index faa0844c01fb8..670dd146fb2b1 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -19,8 +19,8 @@ struct nft_exthdr {
+ u8 offset;
+ u8 len;
+ u8 op;
+- enum nft_registers dreg:8;
+- enum nft_registers sreg:8;
++ u8 dreg;
++ u8 sreg;
+ u8 flags;
+ };
+
+@@ -353,12 +353,12 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
+ priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
+ priv->offset = offset;
+ priv->len = len;
+- priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
+ priv->flags = flags;
+ priv->op = op;
+
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, priv->len);
++ return nft_parse_register_store(ctx, tb[NFTA_EXTHDR_DREG],
++ &priv->dreg, NULL, NFT_DATA_VALUE,
++ priv->len);
+ }
+
+ static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
+@@ -403,11 +403,11 @@ static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
+ priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
+ priv->offset = offset;
+ priv->len = len;
+- priv->sreg = nft_parse_register(tb[NFTA_EXTHDR_SREG]);
+ priv->flags = flags;
+ priv->op = op;
+
+- return nft_validate_register_load(priv->sreg, priv->len);
++ return nft_parse_register_load(tb[NFTA_EXTHDR_SREG], &priv->sreg,
++ priv->len);
+ }
+
+ static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
+index 4dfdaeaf09a5b..b10ce732b337c 100644
+--- a/net/netfilter/nft_fib.c
++++ b/net/netfilter/nft_fib.c
+@@ -86,7 +86,6 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ return -EINVAL;
+
+ priv->result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT]));
+- priv->dreg = nft_parse_register(tb[NFTA_FIB_DREG]);
+
+ switch (priv->result) {
+ case NFT_FIB_RESULT_OIF:
+@@ -106,8 +105,8 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ return -EINVAL;
+ }
+
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ err = nft_parse_register_store(ctx, tb[NFTA_FIB_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
+index 3b0dcd170551b..7730409f6f091 100644
+--- a/net/netfilter/nft_fwd_netdev.c
++++ b/net/netfilter/nft_fwd_netdev.c
+@@ -18,7 +18,7 @@
+ #include <net/ip.h>
+
+ struct nft_fwd_netdev {
+- enum nft_registers sreg_dev:8;
++ u8 sreg_dev;
+ };
+
+ static void nft_fwd_netdev_eval(const struct nft_expr *expr,
+@@ -50,8 +50,8 @@ static int nft_fwd_netdev_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_FWD_SREG_DEV] == NULL)
+ return -EINVAL;
+
+- priv->sreg_dev = nft_parse_register(tb[NFTA_FWD_SREG_DEV]);
+- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
++ return nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
++ sizeof(int));
+ }
+
+ static int nft_fwd_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
+@@ -83,8 +83,8 @@ static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr)
+ }
+
+ struct nft_fwd_neigh {
+- enum nft_registers sreg_dev:8;
+- enum nft_registers sreg_addr:8;
++ u8 sreg_dev;
++ u8 sreg_addr;
+ u8 nfproto;
+ };
+
+@@ -162,8 +162,6 @@ static int nft_fwd_neigh_init(const struct nft_ctx *ctx,
+ !tb[NFTA_FWD_NFPROTO])
+ return -EINVAL;
+
+- priv->sreg_dev = nft_parse_register(tb[NFTA_FWD_SREG_DEV]);
+- priv->sreg_addr = nft_parse_register(tb[NFTA_FWD_SREG_ADDR]);
+ priv->nfproto = ntohl(nla_get_be32(tb[NFTA_FWD_NFPROTO]));
+
+ switch (priv->nfproto) {
+@@ -177,11 +175,13 @@ static int nft_fwd_neigh_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- err = nft_validate_register_load(priv->sreg_dev, sizeof(int));
++ err = nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
++ sizeof(int));
+ if (err < 0)
+ return err;
+
+- return nft_validate_register_load(priv->sreg_addr, addr_len);
++ return nft_parse_register_load(tb[NFTA_FWD_SREG_ADDR], &priv->sreg_addr,
++ addr_len);
+ }
+
+ static int nft_fwd_neigh_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
+index 96371d878e7e5..f829f5289e162 100644
+--- a/net/netfilter/nft_hash.c
++++ b/net/netfilter/nft_hash.c
+@@ -14,8 +14,8 @@
+ #include <linux/jhash.h>
+
+ struct nft_jhash {
+- enum nft_registers sreg:8;
+- enum nft_registers dreg:8;
++ u8 sreg;
++ u8 dreg;
+ u8 len;
+ bool autogen_seed:1;
+ u32 modulus;
+@@ -38,7 +38,7 @@ static void nft_jhash_eval(const struct nft_expr *expr,
+ }
+
+ struct nft_symhash {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u32 modulus;
+ u32 offset;
+ };
+@@ -83,9 +83,6 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_HASH_OFFSET])
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
+
+- priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]);
+- priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
+-
+ err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len);
+ if (err < 0)
+ return err;
+@@ -94,6 +91,10 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
+
+ priv->len = len;
+
++ err = nft_parse_register_load(tb[NFTA_HASH_SREG], &priv->sreg, len);
++ if (err < 0)
++ return err;
++
+ priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
+ if (priv->modulus < 1)
+ return -ERANGE;
+@@ -108,9 +109,8 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
+ get_random_bytes(&priv->seed, sizeof(priv->seed));
+ }
+
+- return nft_validate_register_load(priv->sreg, len) &&
+- nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, sizeof(u32));
++ return nft_parse_register_store(ctx, tb[NFTA_HASH_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, sizeof(u32));
+ }
+
+ static int nft_symhash_init(const struct nft_ctx *ctx,
+@@ -126,8 +126,6 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_HASH_OFFSET])
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
+
+- priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
+-
+ priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
+ if (priv->modulus < 1)
+ return -ERANGE;
+@@ -135,8 +133,9 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+ return -EOVERFLOW;
+
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, sizeof(u32));
++ return nft_parse_register_store(ctx, tb[NFTA_HASH_DREG],
++ &priv->dreg, NULL, NFT_DATA_VALUE,
++ sizeof(u32));
+ }
+
+ static int nft_jhash_dump(struct sk_buff *skb,
+diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
+index 5c9d88560a474..d0f67d325bdfd 100644
+--- a/net/netfilter/nft_immediate.c
++++ b/net/netfilter/nft_immediate.c
+@@ -48,9 +48,9 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
+
+ priv->dlen = desc.len;
+
+- priv->dreg = nft_parse_register(tb[NFTA_IMMEDIATE_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, &priv->data,
+- desc.type, desc.len);
++ err = nft_parse_register_store(ctx, tb[NFTA_IMMEDIATE_DREG],
++ &priv->dreg, &priv->data, desc.type,
++ desc.len);
+ if (err < 0)
+ goto err1;
+
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index f1363b8aabba8..b0f558b4fea54 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -17,8 +17,8 @@
+
+ struct nft_lookup {
+ struct nft_set *set;
+- enum nft_registers sreg:8;
+- enum nft_registers dreg:8;
++ u8 sreg;
++ u8 dreg;
+ bool invert;
+ struct nft_set_binding binding;
+ };
+@@ -76,8 +76,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
+- priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
+- err = nft_validate_register_load(priv->sreg, set->klen);
++ err = nft_parse_register_load(tb[NFTA_LOOKUP_SREG], &priv->sreg,
++ set->klen);
+ if (err < 0)
+ return err;
+
+@@ -100,9 +100,9 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
+ if (!(set->flags & NFT_SET_MAP))
+ return -EINVAL;
+
+- priv->dreg = nft_parse_register(tb[NFTA_LOOKUP_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- set->dtype, set->dlen);
++ err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG],
++ &priv->dreg, NULL, set->dtype,
++ set->dlen);
+ if (err < 0)
+ return err;
+ } else if (set->flags & NFT_SET_MAP)
+diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
+index 71390b7270405..9953e80537536 100644
+--- a/net/netfilter/nft_masq.c
++++ b/net/netfilter/nft_masq.c
+@@ -15,8 +15,8 @@
+
+ struct nft_masq {
+ u32 flags;
+- enum nft_registers sreg_proto_min:8;
+- enum nft_registers sreg_proto_max:8;
++ u8 sreg_proto_min;
++ u8 sreg_proto_max;
+ };
+
+ static const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
+@@ -54,19 +54,15 @@ static int nft_masq_init(const struct nft_ctx *ctx,
+ }
+
+ if (tb[NFTA_MASQ_REG_PROTO_MIN]) {
+- priv->sreg_proto_min =
+- nft_parse_register(tb[NFTA_MASQ_REG_PROTO_MIN]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_min, plen);
++ err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MIN],
++ &priv->sreg_proto_min, plen);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_MASQ_REG_PROTO_MAX]) {
+- priv->sreg_proto_max =
+- nft_parse_register(tb[NFTA_MASQ_REG_PROTO_MAX]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_max,
+- plen);
++ err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MAX],
++ &priv->sreg_proto_max,
++ plen);
+ if (err < 0)
+ return err;
+ } else {
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index bf4b3ad5314c3..44d9b38e5f90c 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -14,6 +14,7 @@
+ #include <linux/in.h>
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
++#include <linux/random.h>
+ #include <linux/smp.h>
+ #include <linux/static_key.h>
+ #include <net/dst.h>
+@@ -32,8 +33,6 @@
+ #define NFT_META_SECS_PER_DAY 86400
+ #define NFT_META_DAYS_PER_WEEK 7
+
+-static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state);
+-
+ static u8 nft_meta_weekday(void)
+ {
+ time64_t secs = ktime_get_real_seconds();
+@@ -267,13 +266,6 @@ static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest,
+ return true;
+ }
+
+-static noinline u32 nft_prandom_u32(void)
+-{
+- struct rnd_state *state = this_cpu_ptr(&nft_prandom_state);
+-
+- return prandom_u32_state(state);
+-}
+-
+ #ifdef CONFIG_IP_ROUTE_CLASSID
+ static noinline bool
+ nft_meta_get_eval_rtclassid(const struct sk_buff *skb, u32 *dest)
+@@ -385,7 +377,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
+ break;
+ #endif
+ case NFT_META_PRANDOM:
+- *dest = nft_prandom_u32();
++ *dest = get_random_u32();
+ break;
+ #ifdef CONFIG_XFRM
+ case NFT_META_SECPATH:
+@@ -514,7 +506,6 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
+ len = IFNAMSIZ;
+ break;
+ case NFT_META_PRANDOM:
+- prandom_init_once(&nft_prandom_state);
+ len = sizeof(u32);
+ break;
+ #ifdef CONFIG_XFRM
+@@ -535,9 +526,8 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+ EXPORT_SYMBOL_GPL(nft_meta_get_init);
+
+@@ -661,8 +651,7 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
+- err = nft_validate_register_load(priv->sreg, len);
++ err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index 6a4a5ac88db70..db8f9116eeb43 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -21,10 +21,10 @@
+ #include <net/ip.h>
+
+ struct nft_nat {
+- enum nft_registers sreg_addr_min:8;
+- enum nft_registers sreg_addr_max:8;
+- enum nft_registers sreg_proto_min:8;
+- enum nft_registers sreg_proto_max:8;
++ u8 sreg_addr_min;
++ u8 sreg_addr_max;
++ u8 sreg_proto_min;
++ u8 sreg_proto_max;
+ enum nf_nat_manip_type type:8;
+ u8 family;
+ u16 flags;
+@@ -208,18 +208,15 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ priv->family = family;
+
+ if (tb[NFTA_NAT_REG_ADDR_MIN]) {
+- priv->sreg_addr_min =
+- nft_parse_register(tb[NFTA_NAT_REG_ADDR_MIN]);
+- err = nft_validate_register_load(priv->sreg_addr_min, alen);
++ err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MIN],
++ &priv->sreg_addr_min, alen);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_NAT_REG_ADDR_MAX]) {
+- priv->sreg_addr_max =
+- nft_parse_register(tb[NFTA_NAT_REG_ADDR_MAX]);
+-
+- err = nft_validate_register_load(priv->sreg_addr_max,
+- alen);
++ err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MAX],
++ &priv->sreg_addr_max,
++ alen);
+ if (err < 0)
+ return err;
+ } else {
+@@ -231,19 +228,15 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+
+ plen = sizeof_field(struct nf_nat_range, min_addr.all);
+ if (tb[NFTA_NAT_REG_PROTO_MIN]) {
+- priv->sreg_proto_min =
+- nft_parse_register(tb[NFTA_NAT_REG_PROTO_MIN]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_min, plen);
++ err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN],
++ &priv->sreg_proto_min, plen);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_NAT_REG_PROTO_MAX]) {
+- priv->sreg_proto_max =
+- nft_parse_register(tb[NFTA_NAT_REG_PROTO_MAX]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_max,
+- plen);
++ err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MAX],
++ &priv->sreg_proto_max,
++ plen);
+ if (err < 0)
+ return err;
+ } else {
+diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
+index f1fc824f97370..4e43214e88def 100644
+--- a/net/netfilter/nft_numgen.c
++++ b/net/netfilter/nft_numgen.c
+@@ -9,14 +9,13 @@
+ #include <linux/netlink.h>
+ #include <linux/netfilter.h>
+ #include <linux/netfilter/nf_tables.h>
++#include <linux/random.h>
+ #include <linux/static_key.h>
+ #include <net/netfilter/nf_tables.h>
+ #include <net/netfilter/nf_tables_core.h>
+
+-static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state);
+-
+ struct nft_ng_inc {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u32 modulus;
+ atomic_t counter;
+ u32 offset;
+@@ -66,11 +65,10 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+ return -EOVERFLOW;
+
+- priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]);
+ atomic_set(&priv->counter, priv->modulus - 1);
+
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, sizeof(u32));
++ return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, sizeof(u32));
+ }
+
+ static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
+@@ -100,17 +98,14 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
+ }
+
+ struct nft_ng_random {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u32 modulus;
+ u32 offset;
+ };
+
+-static u32 nft_ng_random_gen(struct nft_ng_random *priv)
++static u32 nft_ng_random_gen(const struct nft_ng_random *priv)
+ {
+- struct rnd_state *state = this_cpu_ptr(&nft_numgen_prandom_state);
+-
+- return reciprocal_scale(prandom_u32_state(state), priv->modulus) +
+- priv->offset;
++ return reciprocal_scale(get_random_u32(), priv->modulus) + priv->offset;
+ }
+
+ static void nft_ng_random_eval(const struct nft_expr *expr,
+@@ -138,12 +133,8 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+ return -EOVERFLOW;
+
+- prandom_init_once(&nft_numgen_prandom_state);
+-
+- priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]);
+-
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, sizeof(u32));
++ return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, sizeof(u32));
+ }
+
+ static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 5f9207a9f4851..bc104d36d3bb2 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -95,7 +95,7 @@ static const struct nft_expr_ops nft_objref_ops = {
+
+ struct nft_objref_map {
+ struct nft_set *set;
+- enum nft_registers sreg:8;
++ u8 sreg;
+ struct nft_set_binding binding;
+ };
+
+@@ -137,8 +137,8 @@ static int nft_objref_map_init(const struct nft_ctx *ctx,
+ if (!(set->flags & NFT_SET_OBJECT))
+ return -EINVAL;
+
+- priv->sreg = nft_parse_register(tb[NFTA_OBJREF_SET_SREG]);
+- err = nft_validate_register_load(priv->sreg, set->klen);
++ err = nft_parse_register_load(tb[NFTA_OBJREF_SET_SREG], &priv->sreg,
++ set->klen);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
+index 2c957629ea660..d82677e83400b 100644
+--- a/net/netfilter/nft_osf.c
++++ b/net/netfilter/nft_osf.c
+@@ -6,7 +6,7 @@
+ #include <linux/netfilter/nfnetlink_osf.h>
+
+ struct nft_osf {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u8 ttl;
+ u32 flags;
+ };
+@@ -83,9 +83,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
+ priv->flags = flags;
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
++ err = nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE,
++ NFT_OSF_MAXGENRELEN);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 6a8495bd08bb2..01878c16418c2 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -144,10 +144,10 @@ static int nft_payload_init(const struct nft_ctx *ctx,
+ priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+ priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+- priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
+
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, priv->len);
++ return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
++ &priv->dreg, NULL, NFT_DATA_VALUE,
++ priv->len);
+ }
+
+ static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
+@@ -664,7 +664,6 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
+ priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+ priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+- priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
+
+ if (tb[NFTA_PAYLOAD_CSUM_TYPE])
+ priv->csum_type =
+@@ -697,7 +696,8 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- return nft_validate_register_load(priv->sreg, priv->len);
++ return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
++ priv->len);
+ }
+
+ static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
+index 23265d757acbc..9ba1de51ac070 100644
+--- a/net/netfilter/nft_queue.c
++++ b/net/netfilter/nft_queue.c
+@@ -19,10 +19,10 @@
+ static u32 jhash_initval __read_mostly;
+
+ struct nft_queue {
+- enum nft_registers sreg_qnum:8;
+- u16 queuenum;
+- u16 queues_total;
+- u16 flags;
++ u8 sreg_qnum;
++ u16 queuenum;
++ u16 queues_total;
++ u16 flags;
+ };
+
+ static void nft_queue_eval(const struct nft_expr *expr,
+@@ -111,8 +111,8 @@ static int nft_queue_sreg_init(const struct nft_ctx *ctx,
+ struct nft_queue *priv = nft_expr_priv(expr);
+ int err;
+
+- priv->sreg_qnum = nft_parse_register(tb[NFTA_QUEUE_SREG_QNUM]);
+- err = nft_validate_register_load(priv->sreg_qnum, sizeof(u32));
++ err = nft_parse_register_load(tb[NFTA_QUEUE_SREG_QNUM],
++ &priv->sreg_qnum, sizeof(u32));
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
+index 89efcc5a533d2..e4a1c44d7f513 100644
+--- a/net/netfilter/nft_range.c
++++ b/net/netfilter/nft_range.c
+@@ -15,7 +15,7 @@
+ struct nft_range_expr {
+ struct nft_data data_from;
+ struct nft_data data_to;
+- enum nft_registers sreg:8;
++ u8 sreg;
+ u8 len;
+ enum nft_range_ops op:8;
+ };
+@@ -86,8 +86,8 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
+ goto err2;
+ }
+
+- priv->sreg = nft_parse_register(tb[NFTA_RANGE_SREG]);
+- err = nft_validate_register_load(priv->sreg, desc_from.len);
++ err = nft_parse_register_load(tb[NFTA_RANGE_SREG], &priv->sreg,
++ desc_from.len);
+ if (err < 0)
+ goto err2;
+
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index 2056051c0af0d..ba09890dddb50 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -14,8 +14,8 @@
+ #include <net/netfilter/nf_tables.h>
+
+ struct nft_redir {
+- enum nft_registers sreg_proto_min:8;
+- enum nft_registers sreg_proto_max:8;
++ u8 sreg_proto_min;
++ u8 sreg_proto_max;
+ u16 flags;
+ };
+
+@@ -50,19 +50,15 @@ static int nft_redir_init(const struct nft_ctx *ctx,
+
+ plen = sizeof_field(struct nf_nat_range, min_addr.all);
+ if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
+- priv->sreg_proto_min =
+- nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_min, plen);
++ err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
++ &priv->sreg_proto_min, plen);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_REDIR_REG_PROTO_MAX]) {
+- priv->sreg_proto_max =
+- nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MAX]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_max,
+- plen);
++ err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MAX],
++ &priv->sreg_proto_max,
++ plen);
+ if (err < 0)
+ return err;
+ } else {
+diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
+index 7cfcb0e2f7ee1..bcd01a63e38f1 100644
+--- a/net/netfilter/nft_rt.c
++++ b/net/netfilter/nft_rt.c
+@@ -15,7 +15,7 @@
+
+ struct nft_rt {
+ enum nft_rt_keys key:8;
+- enum nft_registers dreg:8;
++ u8 dreg;
+ };
+
+ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skbdst)
+@@ -141,9 +141,8 @@ static int nft_rt_get_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_RT_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_RT_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+
+ static int nft_rt_get_dump(struct sk_buff *skb,
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index 8a0125e966c83..f6d517185d9c0 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -10,7 +10,7 @@
+ struct nft_socket {
+ enum nft_socket_keys key:8;
+ union {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ };
+ };
+
+@@ -146,9 +146,8 @@ static int nft_socket_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_SOCKET_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_SOCKET_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+
+ static int nft_socket_dump(struct sk_buff *skb,
+diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
+index 242222dc52c3c..37c728bdad41c 100644
+--- a/net/netfilter/nft_tproxy.c
++++ b/net/netfilter/nft_tproxy.c
+@@ -13,9 +13,9 @@
+ #endif
+
+ struct nft_tproxy {
+- enum nft_registers sreg_addr:8;
+- enum nft_registers sreg_port:8;
+- u8 family;
++ u8 sreg_addr;
++ u8 sreg_port;
++ u8 family;
+ };
+
+ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
+@@ -254,15 +254,15 @@ static int nft_tproxy_init(const struct nft_ctx *ctx,
+ }
+
+ if (tb[NFTA_TPROXY_REG_ADDR]) {
+- priv->sreg_addr = nft_parse_register(tb[NFTA_TPROXY_REG_ADDR]);
+- err = nft_validate_register_load(priv->sreg_addr, alen);
++ err = nft_parse_register_load(tb[NFTA_TPROXY_REG_ADDR],
++ &priv->sreg_addr, alen);
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[NFTA_TPROXY_REG_PORT]) {
+- priv->sreg_port = nft_parse_register(tb[NFTA_TPROXY_REG_PORT]);
+- err = nft_validate_register_load(priv->sreg_port, sizeof(u16));
++ err = nft_parse_register_load(tb[NFTA_TPROXY_REG_PORT],
++ &priv->sreg_port, sizeof(u16));
+ if (err < 0)
+ return err;
+ }
+diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
+index d3eb953d0333b..3b27926d5382c 100644
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -15,7 +15,7 @@
+
+ struct nft_tunnel {
+ enum nft_tunnel_keys key:8;
+- enum nft_registers dreg:8;
++ u8 dreg;
+ enum nft_tunnel_mode mode:8;
+ };
+
+@@ -93,8 +93,6 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
+-
+ if (tb[NFTA_TUNNEL_MODE]) {
+ priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
+ if (priv->mode > NFT_TUNNEL_MODE_MAX)
+@@ -103,8 +101,8 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
+ priv->mode = NFT_TUNNEL_MODE_NONE;
+ }
+
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+
+ static int nft_tunnel_get_dump(struct sk_buff *skb,
+diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
+index 06d5cabf1d7c4..cbbbc4ecad3ae 100644
+--- a/net/netfilter/nft_xfrm.c
++++ b/net/netfilter/nft_xfrm.c
+@@ -24,7 +24,7 @@ static const struct nla_policy nft_xfrm_policy[NFTA_XFRM_MAX + 1] = {
+
+ struct nft_xfrm {
+ enum nft_xfrm_keys key:8;
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u8 dir;
+ u8 spnum;
+ };
+@@ -86,9 +86,8 @@ static int nft_xfrm_get_init(const struct nft_ctx *ctx,
+
+ priv->spnum = spnum;
+
+- priv->dreg = nft_parse_register(tb[NFTA_XFRM_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_XFRM_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+
+ /* Return true if key asks for daddr/saddr and current
+diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
+index b03d142ec82ef..c9ba61413c98b 100644
+--- a/net/openvswitch/flow.c
++++ b/net/openvswitch/flow.c
+@@ -265,7 +265,7 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
+ if (flags & IP6_FH_F_FRAG) {
+ if (frag_off) {
+ key->ip.frag = OVS_FRAG_TYPE_LATER;
+- key->ip.proto = nexthdr;
++ key->ip.proto = NEXTHDR_FRAGMENT;
+ return 0;
+ }
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 0c345e43a09a3..adc5407fd5d58 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -1146,9 +1146,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
+ struct tc_netem_rate rate;
+ struct tc_netem_slot slot;
+
+- qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
++ qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
+ UINT_MAX);
+- qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
++ qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
+ UINT_MAX);
+ qopt.limit = q->limit;
+ qopt.loss = q->loss;
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index 40c03085c0eaf..7724499f516e9 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -60,7 +60,7 @@ static int __net_init tipc_init_net(struct net *net)
+ tn->trial_addr = 0;
+ tn->addr_trial_end = 0;
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+- INIT_WORK(&tn->final_work.work, tipc_net_finalize_work);
++ INIT_WORK(&tn->work, tipc_net_finalize_work);
+ memset(tn->node_id, 0, sizeof(tn->node_id));
+ memset(tn->node_id_string, 0, sizeof(tn->node_id_string));
+ tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
+@@ -111,10 +111,9 @@ static void __net_exit tipc_exit_net(struct net *net)
+ struct tipc_net *tn = tipc_net(net);
+
+ tipc_detach_loopback(net);
+- /* Make sure the tipc_net_finalize_work() finished */
+- cancel_work_sync(&tn->final_work.work);
+ tipc_net_stop(net);
+-
++ /* Make sure the tipc_net_finalize_work() finished */
++ cancel_work_sync(&tn->work);
+ tipc_bcast_stop(net);
+ tipc_nametbl_stop(net);
+ tipc_sk_rht_destroy(net);
+diff --git a/net/tipc/core.h b/net/tipc/core.h
+index 992924a849be6..73a26b0b9ca19 100644
+--- a/net/tipc/core.h
++++ b/net/tipc/core.h
+@@ -90,12 +90,6 @@ extern unsigned int tipc_net_id __read_mostly;
+ extern int sysctl_tipc_rmem[3] __read_mostly;
+ extern int sysctl_tipc_named_timeout __read_mostly;
+
+-struct tipc_net_work {
+- struct work_struct work;
+- struct net *net;
+- u32 addr;
+-};
+-
+ struct tipc_net {
+ u8 node_id[NODE_ID_LEN];
+ u32 node_addr;
+@@ -150,7 +144,7 @@ struct tipc_net {
+ struct tipc_crypto *crypto_tx;
+ #endif
+ /* Work item for net finalize */
+- struct tipc_net_work final_work;
++ struct work_struct work;
+ /* The numbers of work queues in schedule */
+ atomic_t wq_count;
+ };
+diff --git a/net/tipc/discover.c b/net/tipc/discover.c
+index d4ecacddb40ce..14bc20604051d 100644
+--- a/net/tipc/discover.c
++++ b/net/tipc/discover.c
+@@ -167,7 +167,7 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
+
+ /* Apply trial address if we just left trial period */
+ if (!trial && !self) {
+- tipc_sched_net_finalize(net, tn->trial_addr);
++ schedule_work(&tn->work);
+ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
+ msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+ }
+@@ -307,7 +307,7 @@ static void tipc_disc_timeout(struct timer_list *t)
+ if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
+ mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
+ spin_unlock_bh(&d->lock);
+- tipc_sched_net_finalize(net, tn->trial_addr);
++ schedule_work(&tn->work);
+ return;
+ }
+
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index 7a353ff628448..064fdb8e50e19 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -344,6 +344,11 @@ char tipc_link_plane(struct tipc_link *l)
+ return l->net_plane;
+ }
+
++struct net *tipc_link_net(struct tipc_link *l)
++{
++ return l->net;
++}
++
+ void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
+ {
+ l->peer_caps = capabilities;
+diff --git a/net/tipc/link.h b/net/tipc/link.h
+index fc07232c9a127..a16f401fdabda 100644
+--- a/net/tipc/link.h
++++ b/net/tipc/link.h
+@@ -156,4 +156,5 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
+ int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
+ bool tipc_link_too_silent(struct tipc_link *l);
++struct net *tipc_link_net(struct tipc_link *l);
+ #endif
+diff --git a/net/tipc/net.c b/net/tipc/net.c
+index 0bb2323201daa..671cb4f9d5633 100644
+--- a/net/tipc/net.c
++++ b/net/tipc/net.c
+@@ -41,6 +41,7 @@
+ #include "socket.h"
+ #include "node.h"
+ #include "bcast.h"
++#include "link.h"
+ #include "netlink.h"
+ #include "monitor.h"
+
+@@ -138,19 +139,9 @@ static void tipc_net_finalize(struct net *net, u32 addr)
+
+ void tipc_net_finalize_work(struct work_struct *work)
+ {
+- struct tipc_net_work *fwork;
++ struct tipc_net *tn = container_of(work, struct tipc_net, work);
+
+- fwork = container_of(work, struct tipc_net_work, work);
+- tipc_net_finalize(fwork->net, fwork->addr);
+-}
+-
+-void tipc_sched_net_finalize(struct net *net, u32 addr)
+-{
+- struct tipc_net *tn = tipc_net(net);
+-
+- tn->final_work.net = net;
+- tn->final_work.addr = addr;
+- schedule_work(&tn->final_work.work);
++ tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr);
+ }
+
+ void tipc_net_stop(struct net *net)
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 79aef50ede170..e48742760fec8 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -1119,7 +1119,7 @@ static const struct sectioncheck sectioncheck[] = {
+ },
+ /* Do not export init/exit functions or data */
+ {
+- .fromsec = { "__ksymtab*", NULL },
++ .fromsec = { "___ksymtab*", NULL },
+ .bad_tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
+ .mismatch = EXPORT_TO_INIT_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
+diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
+index 4dc01647753c8..ec821a263036d 100644
+--- a/sound/pci/hda/hda_auto_parser.c
++++ b/sound/pci/hda/hda_auto_parser.c
+@@ -823,7 +823,7 @@ static void set_pin_targets(struct hda_codec *codec,
+ snd_hda_set_pin_ctl_cache(codec, cfg->nid, cfg->val);
+ }
+
+-static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
++void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth)
+ {
+ const char *modelname = codec->fixup_name;
+
+@@ -833,7 +833,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
+ if (++depth > 10)
+ break;
+ if (fix->chained_before)
+- apply_fixup(codec, fix->chain_id, action, depth + 1);
++ __snd_hda_apply_fixup(codec, fix->chain_id, action, depth + 1);
+
+ switch (fix->type) {
+ case HDA_FIXUP_PINS:
+@@ -874,6 +874,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
+ id = fix->chain_id;
+ }
+ }
++EXPORT_SYMBOL_GPL(__snd_hda_apply_fixup);
+
+ /**
+ * snd_hda_apply_fixup - Apply the fixup chain with the given action
+@@ -883,7 +884,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
+ void snd_hda_apply_fixup(struct hda_codec *codec, int action)
+ {
+ if (codec->fixup_list)
+- apply_fixup(codec, codec->fixup_id, action, 0);
++ __snd_hda_apply_fixup(codec, codec->fixup_id, action, 0);
+ }
+ EXPORT_SYMBOL_GPL(snd_hda_apply_fixup);
+
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
+index 5beb8aa44ecd8..efc0c68a54427 100644
+--- a/sound/pci/hda/hda_local.h
++++ b/sound/pci/hda/hda_local.h
+@@ -357,6 +357,7 @@ void snd_hda_apply_verbs(struct hda_codec *codec);
+ void snd_hda_apply_pincfgs(struct hda_codec *codec,
+ const struct hda_pintbl *cfg);
+ void snd_hda_apply_fixup(struct hda_codec *codec, int action);
++void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth);
+ void snd_hda_pick_fixup(struct hda_codec *codec,
+ const struct hda_model_fixup *models,
+ const struct snd_pci_quirk *quirk,
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 0dd6d37db9666..53b7ea86f3f84 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1072,11 +1072,11 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ if (err < 0)
+ goto error;
+
+- err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
++ err = cx_auto_parse_beep(codec);
+ if (err < 0)
+ goto error;
+
+- err = cx_auto_parse_beep(codec);
++ err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
+ if (err < 0)
+ goto error;
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 7c720f03c1349..604f55ec7944b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2643,6 +2643,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x67f1, "Clevo PC70H[PRS]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++ SND_PCI_QUIRK(0x1558, 0x67f5, "Clevo PD70PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED),
+@@ -6827,6 +6828,7 @@ enum {
+ ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS,
+ ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+ ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
++ ALC298_FIXUP_LENOVO_C940_DUET7,
+ ALC287_FIXUP_13S_GEN2_SPEAKERS,
+ ALC256_FIXUP_SET_COEF_DEFAULTS,
+ ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+@@ -6836,6 +6838,23 @@ enum {
+ ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE,
+ };
+
++/* A special fixup for Lenovo C940 and Yoga Duet 7;
++ * both have the very same PCI SSID, and we need to apply different fixups
++ * depending on the codec ID
++ */
++static void alc298_fixup_lenovo_c940_duet7(struct hda_codec *codec,
++ const struct hda_fixup *fix,
++ int action)
++{
++ int id;
++
++ if (codec->core.vendor_id == 0x10ec0298)
++ id = ALC298_FIXUP_LENOVO_SPK_VOLUME; /* C940 */
++ else
++ id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* Duet 7 */
++ __snd_hda_apply_fixup(codec, id, action, 0);
++}
++
+ static const struct hda_fixup alc269_fixups[] = {
+ [ALC269_FIXUP_GPIO2] = {
+ .type = HDA_FIXUP_FUNC,
+@@ -8529,6 +8548,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE,
+ },
++ [ALC298_FIXUP_LENOVO_C940_DUET7] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc298_fixup_lenovo_c940_duet7,
++ },
+ [ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+@@ -8768,6 +8791,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation",
+ ALC285_FIXUP_HP_GPIO_AMP_INIT),
++ SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+@@ -8909,6 +8933,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -8992,7 +9017,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+ SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+- SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
++ SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
+ SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
+@@ -10446,6 +10471,7 @@ enum {
+ ALC668_FIXUP_MIC_DET_COEF,
+ ALC897_FIXUP_LENOVO_HEADSET_MIC,
+ ALC897_FIXUP_HEADSET_MIC_PIN,
++ ALC897_FIXUP_HP_HSMIC_VERB,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -10865,6 +10891,13 @@ static const struct hda_fixup alc662_fixups[] = {
+ .chained = true,
+ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC
+ },
++ [ALC897_FIXUP_HP_HSMIC_VERB] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
++ { }
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -10890,6 +10923,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++ SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+ SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
+ SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 773a136161f11..a188901a83bbe 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -520,11 +520,11 @@ static int via_parse_auto_config(struct hda_codec *codec)
+ if (err < 0)
+ return err;
+
+- err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
++ err = auto_parse_beep(codec);
+ if (err < 0)
+ return err;
+
+- err = auto_parse_beep(codec);
++ err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
+ if (err < 0)
+ return err;
+
+diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
+index b5eef5ffb58e5..af3461cb5c409 100755
+--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
++++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
+@@ -31,7 +31,7 @@ BUGS="flush_remove_add reload"
+
+ # List of possible paths to pktgen script from kernel tree for performance tests
+ PKTGEN_SCRIPT_PATHS="
+- ../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
++ ../../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
+ pktgen/pktgen_bench_xmit_mode_netif_receive.sh"
+
+ # Definition of set types: