diff options
author | Mike Pagano <mpagano@gentoo.org> | 2019-08-29 10:15:04 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2019-08-29 10:15:04 -0400 |
commit | e82b3f7e057ed2b8500c8faacd875f3dc7d43572 (patch) | |
tree | ef986b0e08ad072d457d5dd640ea0c90250934d0 | |
parent | Linux patch 4.19.68 (diff) | |
download | linux-patches-e82b3f7e057ed2b8500c8faacd875f3dc7d43572.tar.gz linux-patches-e82b3f7e057ed2b8500c8faacd875f3dc7d43572.tar.bz2 linux-patches-e82b3f7e057ed2b8500c8faacd875f3dc7d43572.zip |
Linux patch 4.19.694.19-69
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1068_linux-4.19.69.patch | 4438 |
2 files changed, 4442 insertions, 0 deletions
diff --git a/0000_README b/0000_README index dd97210b..c2032037 100644 --- a/0000_README +++ b/0000_README @@ -315,6 +315,10 @@ Patch: 1067_linux-4.19.68.patch From: https://www.kernel.org Desc: Linux 4.19.68 +Patch: 1068_linux-4.19.69.patch +From: https://www.kernel.org +Desc: Linux 4.19.69 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1068_linux-4.19.69.patch b/1068_linux-4.19.69.patch new file mode 100644 index 00000000..c959f94f --- /dev/null +++ b/1068_linux-4.19.69.patch @@ -0,0 +1,4438 @@ +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index c96a8e9ad5c2..e8ddf0ef232e 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3948,6 +3948,13 @@ + Run specified binary instead of /init from the ramdisk, + used for early userspace startup. See initrd. + ++ rdrand= [X86] ++ force - Override the decision by the kernel to hide the ++ advertisement of RDRAND support (this affects ++ certain AMD processors because of buggy BIOS ++ support, specifically around the suspend/resume ++ path). ++ + rdt= [HW,X86,RDT] + Turn on/off individual RDT features. List is: + cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp, +diff --git a/Makefile b/Makefile +index 6f164b04d953..677341239449 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 19 +-SUBLEVEL = 68 ++SUBLEVEL = 69 + EXTRAVERSION = + NAME = "People's Front" + +diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c +index fd6cde23bb5d..871fa50a09f1 100644 +--- a/arch/arm/kvm/coproc.c ++++ b/arch/arm/kvm/coproc.c +@@ -658,13 +658,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) + } + + static void reset_coproc_regs(struct kvm_vcpu *vcpu, +- const struct coproc_reg *table, size_t num) ++ const struct coproc_reg *table, size_t num, ++ unsigned long *bmap) + { + unsigned long i; + + for (i = 0; i < num; i++) +- if (table[i].reset) ++ if (table[i].reset) { ++ int reg = table[i].reg; ++ + table[i].reset(vcpu, &table[i]); ++ if (reg > 0 && reg < NR_CP15_REGS) { ++ set_bit(reg, bmap); ++ if (table[i].is_64bit) ++ set_bit(reg + 1, bmap); ++ } ++ } + } + + static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) +@@ -1439,17 +1448,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) + { + size_t num; + const struct coproc_reg *table; +- +- /* Catch someone adding a register without putting in reset entry. */ +- memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15)); ++ DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, }; + + /* Generic chip reset first (so target could override). */ +- reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); ++ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap); + + table = get_target_table(vcpu->arch.target, &num); +- reset_coproc_regs(vcpu, table, num); ++ reset_coproc_regs(vcpu, table, num, bmap); + + for (num = 1; num < NR_CP15_REGS; num++) +- WARN(vcpu_cp15(vcpu, num) == 0x42424242, ++ WARN(!test_bit(num, bmap), + "Didn't reset vcpu_cp15(vcpu, %zi)", num); + } +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c +index d112af75680b..6da2bbdb9648 100644 +--- a/arch/arm64/kvm/sys_regs.c ++++ b/arch/arm64/kvm/sys_regs.c +@@ -626,7 +626,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) + */ + val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) + | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); +- __vcpu_sys_reg(vcpu, PMCR_EL0) = val; ++ __vcpu_sys_reg(vcpu, r->reg) = val; + } + + static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) +@@ -968,13 +968,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ + #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ + { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ +- trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ ++ trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \ + { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ +- trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ ++ trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \ + { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ +- trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ ++ trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \ + { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ +- trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } ++ trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr } + + /* Macro to expand the PMEVCNTRn_EL0 register */ + #define PMU_PMEVCNTR_EL0(n) \ +@@ -1359,7 +1359,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { + + { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 }, + +- { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, }, ++ { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 }, + { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, + { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 }, + { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 }, +@@ -2072,13 +2072,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu, + } + + static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, +- const struct sys_reg_desc *table, size_t num) ++ const struct sys_reg_desc *table, size_t num, ++ unsigned long *bmap) + { + unsigned long i; + + for (i = 0; i < num; i++) +- if (table[i].reset) ++ if (table[i].reset) { ++ int reg = table[i].reg; ++ + table[i].reset(vcpu, &table[i]); ++ if (reg > 0 && reg < NR_SYS_REGS) ++ set_bit(reg, bmap); ++ } + } + + /** +@@ -2576,18 +2582,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) + { + size_t num; + const struct sys_reg_desc *table; +- +- /* Catch someone adding a register without putting in reset entry. */ +- memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); ++ DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, }; + + /* Generic chip reset first (so target could override). */ +- reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); ++ reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap); + + table = get_target_table(vcpu->arch.target, true, &num); +- reset_sys_reg_descs(vcpu, table, num); ++ reset_sys_reg_descs(vcpu, table, num, bmap); + + for (num = 1; num < NR_SYS_REGS; num++) { +- if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242, ++ if (WARN(!test_bit(num, bmap), + "Didn't reset __vcpu_sys_reg(%zi)\n", num)) + break; + } +diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c +index 97d5239ca47b..428ef2189203 100644 +--- a/arch/mips/kernel/cacheinfo.c ++++ b/arch/mips/kernel/cacheinfo.c +@@ -80,6 +80,8 @@ static int __populate_cache_leaves(unsigned int cpu) + if (c->tcache.waysize) + populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); + ++ this_cpu_ci->cpu_map_populated = true; ++ + return 0; + } + +diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c +index 5f209f111e59..df7ddd246eaa 100644 +--- a/arch/mips/kernel/i8253.c ++++ b/arch/mips/kernel/i8253.c +@@ -32,7 +32,8 @@ void __init setup_pit_timer(void) + + static int __init init_pit_clocksource(void) + { +- if (num_possible_cpus() > 1) /* PIT does not scale! */ ++ if (num_possible_cpus() > 1 || /* PIT does not scale! */ ++ !clockevent_state_periodic(&i8253_clockevent)) + return 0; + + return clocksource_i8253_init(); +diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S +index 262ba9481781..1bf6aaefd26a 100644 +--- a/arch/powerpc/kernel/misc_64.S ++++ b/arch/powerpc/kernel/misc_64.S +@@ -135,7 +135,7 @@ _GLOBAL_TOC(flush_dcache_range) + subf r8,r6,r4 /* compute length */ + add r8,r8,r5 /* ensure we get enough */ + lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */ +- srw. r8,r8,r9 /* compute line count */ ++ srd. r8,r8,r9 /* compute line count */ + beqlr /* nothing to do? */ + mtctr r8 + 0: dcbst 0,r6 +@@ -153,7 +153,7 @@ _GLOBAL(flush_inval_dcache_range) + subf r8,r6,r4 /* compute length */ + add r8,r8,r5 /* ensure we get enough */ + lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */ +- srw. r8,r8,r9 /* compute line count */ ++ srd. r8,r8,r9 /* compute line count */ + beqlr /* nothing to do? */ + sync + isync +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S +index b43f8d33a369..18ede6e806b9 100644 +--- a/arch/s390/kernel/vmlinux.lds.S ++++ b/arch/s390/kernel/vmlinux.lds.S +@@ -31,10 +31,9 @@ PHDRS { + SECTIONS + { + . = 0x100000; +- _stext = .; /* Start of text section */ + .text : { +- /* Text and read-only data */ +- _text = .; ++ _stext = .; /* Start of text section */ ++ _text = .; /* Text and read-only data */ + HEAD_TEXT + TEXT_TEXT + SCHED_TEXT +@@ -46,11 +45,10 @@ SECTIONS + *(.text.*_indirect_*) + *(.fixup) + *(.gnu.warning) ++ . = ALIGN(PAGE_SIZE); ++ _etext = .; /* End of text section */ + } :text = 0x0700 + +- . = ALIGN(PAGE_SIZE); +- _etext = .; /* End of text section */ +- + NOTES :text :note + + .dummy : { *(.dummy) } :data +diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h +index a07ffd23e4dd..d3983fdf1012 100644 +--- a/arch/x86/include/asm/bootparam_utils.h ++++ b/arch/x86/include/asm/bootparam_utils.h +@@ -18,6 +18,20 @@ + * Note: efi_info is commonly left uninitialized, but that field has a + * private magic, so it is better to leave it unchanged. + */ ++ ++#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); }) ++ ++#define BOOT_PARAM_PRESERVE(struct_member) \ ++ { \ ++ .start = offsetof(struct boot_params, struct_member), \ ++ .len = sizeof_mbr(struct boot_params, struct_member), \ ++ } ++ ++struct boot_params_to_save { ++ unsigned int start; ++ unsigned int len; ++}; ++ + static void sanitize_boot_params(struct boot_params *boot_params) + { + /* +@@ -36,19 +50,40 @@ static void sanitize_boot_params(struct boot_params *boot_params) + */ + if (boot_params->sentinel) { + /* fields in boot_params are left uninitialized, clear them */ +- memset(&boot_params->ext_ramdisk_image, 0, +- (char *)&boot_params->efi_info - +- (char *)&boot_params->ext_ramdisk_image); +- memset(&boot_params->kbd_status, 0, +- (char *)&boot_params->hdr - +- (char *)&boot_params->kbd_status); +- memset(&boot_params->_pad7[0], 0, +- (char *)&boot_params->edd_mbr_sig_buffer[0] - +- (char *)&boot_params->_pad7[0]); +- memset(&boot_params->_pad8[0], 0, +- (char *)&boot_params->eddbuf[0] - +- (char *)&boot_params->_pad8[0]); +- memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9)); ++ static struct boot_params scratch; ++ char *bp_base = (char *)boot_params; ++ char *save_base = (char *)&scratch; ++ int i; ++ ++ const struct boot_params_to_save to_save[] = { ++ BOOT_PARAM_PRESERVE(screen_info), ++ BOOT_PARAM_PRESERVE(apm_bios_info), ++ BOOT_PARAM_PRESERVE(tboot_addr), ++ BOOT_PARAM_PRESERVE(ist_info), ++ BOOT_PARAM_PRESERVE(hd0_info), ++ BOOT_PARAM_PRESERVE(hd1_info), ++ BOOT_PARAM_PRESERVE(sys_desc_table), ++ BOOT_PARAM_PRESERVE(olpc_ofw_header), ++ BOOT_PARAM_PRESERVE(efi_info), ++ BOOT_PARAM_PRESERVE(alt_mem_k), ++ BOOT_PARAM_PRESERVE(scratch), ++ BOOT_PARAM_PRESERVE(e820_entries), ++ BOOT_PARAM_PRESERVE(eddbuf_entries), ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), ++ BOOT_PARAM_PRESERVE(hdr), ++ BOOT_PARAM_PRESERVE(e820_table), ++ BOOT_PARAM_PRESERVE(eddbuf), ++ }; ++ ++ memset(&scratch, 0, sizeof(scratch)); ++ ++ for (i = 0; i < ARRAY_SIZE(to_save); i++) { ++ memcpy(save_base + to_save[i].start, ++ bp_base + to_save[i].start, to_save[i].len); ++ } ++ ++ memcpy(boot_params, save_base, sizeof(*boot_params)); + } + } + +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index f85f43db9225..a1d22e4428f6 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -334,6 +334,7 @@ + #define MSR_AMD64_PATCH_LEVEL 0x0000008b + #define MSR_AMD64_TSC_RATIO 0xc0000104 + #define MSR_AMD64_NB_CFG 0xc001001f ++#define MSR_AMD64_CPUID_FN_1 0xc0011004 + #define MSR_AMD64_PATCH_LOADER 0xc0010020 + #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 + #define MSR_AMD64_OSVW_STATUS 0xc0010141 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 599c273f5d00..28cb2b31527a 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -202,7 +202,7 @@ + " lfence;\n" \ + " jmp 902b;\n" \ + " .align 16\n" \ +- "903: addl $4, %%esp;\n" \ ++ "903: lea 4(%%esp), %%esp;\n" \ + " pushl %[thunk_target];\n" \ + " ret;\n" \ + " .align 16\n" \ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 272a12865b2a..b316bd61a6ac 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -715,7 +715,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2; + static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; + + /* +- * Temporary interrupt handler. ++ * Temporary interrupt handler and polled calibration function. + */ + static void __init lapic_cal_handler(struct clock_event_device *dev) + { +@@ -799,7 +799,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) + static int __init calibrate_APIC_clock(void) + { + struct clock_event_device *levt = this_cpu_ptr(&lapic_events); +- void (*real_handler)(struct clock_event_device *dev); ++ u64 tsc_perj = 0, tsc_start = 0; ++ unsigned long jif_start; + unsigned long deltaj; + long delta, deltatsc; + int pm_referenced = 0; +@@ -830,28 +831,64 @@ static int __init calibrate_APIC_clock(void) + apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" + "calibrating APIC timer ...\n"); + ++ /* ++ * There are platforms w/o global clockevent devices. Instead of ++ * making the calibration conditional on that, use a polling based ++ * approach everywhere. ++ */ + local_irq_disable(); + +- /* Replace the global interrupt handler */ +- real_handler = global_clock_event->event_handler; +- global_clock_event->event_handler = lapic_cal_handler; +- + /* + * Setup the APIC counter to maximum. There is no way the lapic + * can underflow in the 100ms detection time frame + */ + __setup_APIC_LVTT(0xffffffff, 0, 0); + +- /* Let the interrupts run */ ++ /* ++ * Methods to terminate the calibration loop: ++ * 1) Global clockevent if available (jiffies) ++ * 2) TSC if available and frequency is known ++ */ ++ jif_start = READ_ONCE(jiffies); ++ ++ if (tsc_khz) { ++ tsc_start = rdtsc(); ++ tsc_perj = div_u64((u64)tsc_khz * 1000, HZ); ++ } ++ ++ /* ++ * Enable interrupts so the tick can fire, if a global ++ * clockevent device is available ++ */ + local_irq_enable(); + +- while (lapic_cal_loops <= LAPIC_CAL_LOOPS) +- cpu_relax(); ++ while (lapic_cal_loops <= LAPIC_CAL_LOOPS) { ++ /* Wait for a tick to elapse */ ++ while (1) { ++ if (tsc_khz) { ++ u64 tsc_now = rdtsc(); ++ if ((tsc_now - tsc_start) >= tsc_perj) { ++ tsc_start += tsc_perj; ++ break; ++ } ++ } else { ++ unsigned long jif_now = READ_ONCE(jiffies); + +- local_irq_disable(); ++ if (time_after(jif_now, jif_start)) { ++ jif_start = jif_now; ++ break; ++ } ++ } ++ cpu_relax(); ++ } + +- /* Restore the real event handler */ +- global_clock_event->event_handler = real_handler; ++ /* Invoke the calibration routine */ ++ local_irq_disable(); ++ lapic_cal_handler(NULL); ++ local_irq_enable(); ++ } ++ ++ local_irq_disable(); + + /* Build delta t1-t2 as apic timer counts down */ + delta = lapic_cal_t1 - lapic_cal_t2; +@@ -904,10 +941,11 @@ static int __init calibrate_APIC_clock(void) + levt->features &= ~CLOCK_EVT_FEAT_DUMMY; + + /* +- * PM timer calibration failed or not turned on +- * so lets try APIC timer based calibration ++ * PM timer calibration failed or not turned on so lets try APIC ++ * timer based calibration, if a global clockevent device is ++ * available. + */ +- if (!pm_referenced) { ++ if (!pm_referenced && global_clock_event) { + apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); + + /* +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index da1f5e78363e..f86f912ce215 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -799,6 +799,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c) + msr_set_bit(MSR_AMD64_DE_CFG, 31); + } + ++static bool rdrand_force; ++ ++static int __init rdrand_cmdline(char *str) ++{ ++ if (!str) ++ return -EINVAL; ++ ++ if (!strcmp(str, "force")) ++ rdrand_force = true; ++ else ++ return -EINVAL; ++ ++ return 0; ++} ++early_param("rdrand", rdrand_cmdline); ++ ++static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c) ++{ ++ /* ++ * Saving of the MSR used to hide the RDRAND support during ++ * suspend/resume is done by arch/x86/power/cpu.c, which is ++ * dependent on CONFIG_PM_SLEEP. ++ */ ++ if (!IS_ENABLED(CONFIG_PM_SLEEP)) ++ return; ++ ++ /* ++ * The nordrand option can clear X86_FEATURE_RDRAND, so check for ++ * RDRAND support using the CPUID function directly. ++ */ ++ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force) ++ return; ++ ++ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62); ++ ++ /* ++ * Verify that the CPUID change has occurred in case the kernel is ++ * running virtualized and the hypervisor doesn't support the MSR. ++ */ ++ if (cpuid_ecx(1) & BIT(30)) { ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n"); ++ return; ++ } ++ ++ clear_cpu_cap(c, X86_FEATURE_RDRAND); ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n"); ++} ++ ++static void init_amd_jg(struct cpuinfo_x86 *c) ++{ ++ /* ++ * Some BIOS implementations do not restore proper RDRAND support ++ * across suspend and resume. Check on whether to hide the RDRAND ++ * instruction support via CPUID. ++ */ ++ clear_rdrand_cpuid_bit(c); ++} ++ + static void init_amd_bd(struct cpuinfo_x86 *c) + { + u64 value; +@@ -813,6 +871,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c) + wrmsrl_safe(MSR_F15H_IC_CFG, value); + } + } ++ ++ /* ++ * Some BIOS implementations do not restore proper RDRAND support ++ * across suspend and resume. Check on whether to hide the RDRAND ++ * instruction support via CPUID. ++ */ ++ clear_rdrand_cpuid_bit(c); + } + + static void init_amd_zn(struct cpuinfo_x86 *c) +@@ -855,6 +920,7 @@ static void init_amd(struct cpuinfo_x86 *c) + case 0x10: init_amd_gh(c); break; + case 0x12: init_amd_ln(c); break; + case 0x15: init_amd_bd(c); break; ++ case 0x16: init_amd_jg(c); break; + case 0x17: init_amd_zn(c); break; + } + +diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c +index 2dd1fe13a37b..19f707992db2 100644 +--- a/arch/x86/lib/cpu.c ++++ b/arch/x86/lib/cpu.c +@@ -1,5 +1,6 @@ + #include <linux/types.h> + #include <linux/export.h> ++#include <asm/cpu.h> + + unsigned int x86_family(unsigned int sig) + { +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c +index 513ce09e9950..3aa3149df07f 100644 +--- a/arch/x86/power/cpu.c ++++ b/arch/x86/power/cpu.c +@@ -13,6 +13,7 @@ + #include <linux/smp.h> + #include <linux/perf_event.h> + #include <linux/tboot.h> ++#include <linux/dmi.h> + + #include <asm/pgtable.h> + #include <asm/proto.h> +@@ -24,7 +25,7 @@ + #include <asm/debugreg.h> + #include <asm/cpu.h> + #include <asm/mmu_context.h> +-#include <linux/dmi.h> ++#include <asm/cpu_device_id.h> + + #ifdef CONFIG_X86_32 + __visible unsigned long saved_context_ebx; +@@ -398,15 +399,14 @@ static int __init bsp_pm_check_init(void) + + core_initcall(bsp_pm_check_init); + +-static int msr_init_context(const u32 *msr_id, const int total_num) ++static int msr_build_context(const u32 *msr_id, const int num) + { +- int i = 0; ++ struct saved_msrs *saved_msrs = &saved_context.saved_msrs; + struct saved_msr *msr_array; ++ int total_num; ++ int i, j; + +- if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) { +- pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n"); +- return -EINVAL; +- } ++ total_num = saved_msrs->num + num; + + msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL); + if (!msr_array) { +@@ -414,19 +414,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num) + return -ENOMEM; + } + +- for (i = 0; i < total_num; i++) { +- msr_array[i].info.msr_no = msr_id[i]; ++ if (saved_msrs->array) { ++ /* ++ * Multiple callbacks can invoke this function, so copy any ++ * MSR save requests from previous invocations. ++ */ ++ memcpy(msr_array, saved_msrs->array, ++ sizeof(struct saved_msr) * saved_msrs->num); ++ ++ kfree(saved_msrs->array); ++ } ++ ++ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { ++ msr_array[i].info.msr_no = msr_id[j]; + msr_array[i].valid = false; + msr_array[i].info.reg.q = 0; + } +- saved_context.saved_msrs.num = total_num; +- saved_context.saved_msrs.array = msr_array; ++ saved_msrs->num = total_num; ++ saved_msrs->array = msr_array; + + return 0; + } + + /* +- * The following section is a quirk framework for problematic BIOSen: ++ * The following sections are a quirk framework for problematic BIOSen: + * Sometimes MSRs are modified by the BIOSen after suspended to + * RAM, this might cause unexpected behavior after wakeup. + * Thus we save/restore these specified MSRs across suspend/resume +@@ -441,7 +452,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d) + u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL }; + + pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); +- return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); ++ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); + } + + static const struct dmi_system_id msr_save_dmi_table[] = { +@@ -456,9 +467,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = { + {} + }; + ++static int msr_save_cpuid_features(const struct x86_cpu_id *c) ++{ ++ u32 cpuid_msr_id[] = { ++ MSR_AMD64_CPUID_FN_1, ++ }; ++ ++ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n", ++ c->family); ++ ++ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id)); ++} ++ ++static const struct x86_cpu_id msr_save_cpu_table[] = { ++ { ++ .vendor = X86_VENDOR_AMD, ++ .family = 0x15, ++ .model = X86_MODEL_ANY, ++ .feature = X86_FEATURE_ANY, ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features, ++ }, ++ { ++ .vendor = X86_VENDOR_AMD, ++ .family = 0x16, ++ .model = X86_MODEL_ANY, ++ .feature = X86_FEATURE_ANY, ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features, ++ }, ++ {} ++}; ++ ++typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *); ++static int pm_cpu_check(const struct x86_cpu_id *c) ++{ ++ const struct x86_cpu_id *m; ++ int ret = 0; ++ ++ m = x86_match_cpu(msr_save_cpu_table); ++ if (m) { ++ pm_cpu_match_t fn; ++ ++ fn = (pm_cpu_match_t)m->driver_data; ++ ret = fn(m); ++ } ++ ++ return ret; ++} ++ + static int pm_check_save_msr(void) + { + dmi_check_system(msr_save_dmi_table); ++ pm_cpu_check(msr_save_cpu_table); ++ + return 0; + } + +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +index becd793a258c..d8d2ac294b0c 100644 +--- a/block/bfq-iosched.c ++++ b/block/bfq-iosched.c +@@ -1886,9 +1886,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, + blk_rq_pos(container_of(rb_prev(&req->rb_node), + struct request, rb_node))) { + struct bfq_queue *bfqq = bfq_init_rq(req); +- struct bfq_data *bfqd = bfqq->bfqd; ++ struct bfq_data *bfqd; + struct request *prev, *next_rq; + ++ if (!bfqq) ++ return; ++ ++ bfqd = bfqq->bfqd; ++ + /* Reposition request in its sort_list */ + elv_rb_del(&bfqq->sort_list, req); + elv_rb_add(&bfqq->sort_list, req); +@@ -1930,6 +1935,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, + struct bfq_queue *bfqq = bfq_init_rq(rq), + *next_bfqq = bfq_init_rq(next); + ++ if (!bfqq) ++ return; ++ + /* + * If next and rq belong to the same bfq_queue and next is older + * than rq, then reposition rq in the fifo (by substituting next +@@ -4590,12 +4598,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, + + spin_lock_irq(&bfqd->lock); + bfqq = bfq_init_rq(rq); +- if (at_head || blk_rq_is_passthrough(rq)) { ++ if (!bfqq || at_head || blk_rq_is_passthrough(rq)) { + if (at_head) + list_add(&rq->queuelist, &bfqd->dispatch); + else + list_add_tail(&rq->queuelist, &bfqd->dispatch); +- } else { /* bfqq is assumed to be non null here */ ++ } else { + idle_timer_disabled = __bfq_insert_request(bfqd, rq); + /* + * Update bfqq, because, if a queue merge has occurred +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 1984fc78c750..3a64fa4aaf7e 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -1803,6 +1803,21 @@ nothing_to_do: + return 1; + } + ++static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks) ++{ ++ struct request *rq = scmd->request; ++ u32 req_blocks; ++ ++ if (!blk_rq_is_passthrough(rq)) ++ return true; ++ ++ req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; ++ if (n_blocks > req_blocks) ++ return false; ++ ++ return true; ++} ++ + /** + * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one + * @qc: Storage for translated ATA taskfile +@@ -1847,6 +1862,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) + scsi_10_lba_len(cdb, &block, &n_block); + if (cdb[1] & (1 << 3)) + tf_flags |= ATA_TFLAG_FUA; ++ if (!ata_check_nblocks(scmd, n_block)) ++ goto invalid_fld; + break; + case READ_6: + case WRITE_6: +@@ -1861,6 +1878,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) + */ + if (!n_block) + n_block = 256; ++ if (!ata_check_nblocks(scmd, n_block)) ++ goto invalid_fld; + break; + case READ_16: + case WRITE_16: +@@ -1871,6 +1890,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) + scsi_16_lba_len(cdb, &block, &n_block); + if (cdb[1] & (1 << 3)) + tf_flags |= ATA_TFLAG_FUA; ++ if (!ata_check_nblocks(scmd, n_block)) ++ goto invalid_fld; + break; + default: + DPRINTK("no-byte command\n"); +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c +index c5ea0fc635e5..873cc0906055 100644 +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -674,6 +674,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) + unsigned int offset; + unsigned char *buf; + ++ if (!qc->cursg) { ++ qc->curbytes = qc->nbytes; ++ return; ++ } + if (qc->curbytes == qc->nbytes - qc->sect_size) + ap->hsm_task_state = HSM_ST_LAST; + +@@ -699,6 +703,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) + + if (qc->cursg_ofs == qc->cursg->length) { + qc->cursg = sg_next(qc->cursg); ++ if (!qc->cursg) ++ ap->hsm_task_state = HSM_ST_LAST; + qc->cursg_ofs = 0; + } + } +diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c +index 568f59b58ddf..e7c877d354c7 100644 +--- a/drivers/clk/socfpga/clk-periph-s10.c ++++ b/drivers/clk/socfpga/clk-periph-s10.c +@@ -37,7 +37,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk, + if (socfpgaclk->fixed_div) { + div = socfpgaclk->fixed_div; + } else { +- if (!socfpgaclk->bypass_reg) ++ if (socfpgaclk->hw.reg) + div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); + } + +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index b308ce92685d..53395852f012 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -1082,9 +1082,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) + lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW; + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) +- lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN; ++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN | ++ GPIOLINE_FLAG_IS_OUT); + if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) +- lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE; ++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE | ++ GPIOLINE_FLAG_IS_OUT); + + if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) + return -EFAULT; +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +index b4e7404fe660..a11637b0f6cc 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) + u8 *ptr = msg->buf; + + while (remaining) { +- u8 cnt = (remaining > 16) ? 16 : remaining; +- u8 cmd; ++ u8 cnt, retries, cmd; + + if (msg->flags & I2C_M_RD) + cmd = 1; +@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) + if (mcnt || remaining > 16) + cmd |= 4; /* MOT */ + +- ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt); +- if (ret < 0) { +- nvkm_i2c_aux_release(aux); +- return ret; ++ for (retries = 0, cnt = 0; ++ retries < 32 && !cnt; ++ retries++) { ++ cnt = min_t(u8, remaining, 16); ++ ret = aux->func->xfer(aux, true, cmd, ++ msg->addr, ptr, &cnt); ++ if (ret < 0) ++ goto out; ++ } ++ if (!cnt) { ++ AUX_TRACE(aux, "no data after 32 retries"); ++ ret = -EIO; ++ goto out; + } + + ptr += cnt; +@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) + msg++; + } + ++ ret = num; ++out: + nvkm_i2c_aux_release(aux); +- return num; ++ return ret; + } + + static u32 +diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +index 080f05352195..6a4da3a0ff1c 100644 +--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c ++++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +@@ -436,7 +436,7 @@ static int rockchip_dp_resume(struct device *dev) + + static const struct dev_pm_ops rockchip_dp_pm_ops = { + #ifdef CONFIG_PM_SLEEP +- .suspend = rockchip_dp_suspend, ++ .suspend_late = rockchip_dp_suspend, + .resume_early = rockchip_dp_resume, + #endif + }; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +index e4e09d47c5c0..59e9d05ab928 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +@@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, + break; + } + +- if (retries == RETRIES) ++ if (retries == RETRIES) { ++ kfree(reply); + return -EINVAL; ++ } + + *msg_len = reply_len; + *msg = reply; +diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c +index 9428ea7cdf8a..c52bd163abb3 100644 +--- a/drivers/hid/hid-a4tech.c ++++ b/drivers/hid/hid-a4tech.c +@@ -26,12 +26,36 @@ + #define A4_2WHEEL_MOUSE_HACK_7 0x01 + #define A4_2WHEEL_MOUSE_HACK_B8 0x02 + ++#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8) ++ + struct a4tech_sc { + unsigned long quirks; + unsigned int hw_wheel; + __s32 delayed_value; + }; + ++static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi, ++ struct hid_field *field, struct hid_usage *usage, ++ unsigned long **bit, int *max) ++{ ++ struct a4tech_sc *a4 = hid_get_drvdata(hdev); ++ ++ if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 && ++ usage->hid == A4_WHEEL_ORIENTATION) { ++ /* ++ * We do not want to have this usage mapped to anything as it's ++ * nonstandard and doesn't really behave like an HID report. ++ * It's only selecting the orientation (vertical/horizontal) of ++ * the previous mouse wheel report. The input_events will be ++ * generated once both reports are recorded in a4_event(). ++ */ ++ return -1; ++ } ++ ++ return 0; ++ ++} ++ + static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +@@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field, + struct a4tech_sc *a4 = hid_get_drvdata(hdev); + struct input_dev *input; + +- if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || +- !usage->type) ++ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput) + return 0; + + input = field->hidinput->input; +@@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field, + return 1; + } + +- if (usage->hid == 0x000100b8) { ++ if (usage->hid == A4_WHEEL_ORIENTATION) { + input_event(input, EV_REL, value ? REL_HWHEEL : + REL_WHEEL, a4->delayed_value); + return 1; +@@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices); + static struct hid_driver a4_driver = { + .name = "a4tech", + .id_table = a4_devices, ++ .input_mapping = a4_input_mapping, + .input_mapped = a4_input_mapped, + .event = a4_event, + .probe = a4_probe, +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 2898bb061945..4a2fa57ddcb8 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -971,6 +971,7 @@ + #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 + #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa + #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 ++#define USB_DEVICE_ID_SAITEK_X52 0x075c + + #define USB_VENDOR_ID_SAMSUNG 0x0419 + #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index d29c7c9cd185..e553f6fae7a4 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -143,6 +143,7 @@ static const struct hid_device_id hid_quirks[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, +diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c +index bea8def64f43..30b8c3256c99 100644 +--- a/drivers/hid/hid-tmff.c ++++ b/drivers/hid/hid-tmff.c +@@ -34,6 +34,8 @@ + + #include "hid-ids.h" + ++#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320 ++ + static const signed short ff_rumble[] = { + FF_RUMBLE, + -1 +@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data, + struct hid_field *ff_field = tmff->ff_field; + int x, y; + int left, right; /* Rumbling */ ++ int motor_swap; + + switch (effect->type) { + case FF_CONSTANT: +@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data, + ff_field->logical_minimum, + ff_field->logical_maximum); + ++ /* 2-in-1 strong motor is left */ ++ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) { ++ motor_swap = left; ++ left = right; ++ right = motor_swap; ++ } ++ + dbg_hid("(left,right)=(%08x, %08x)\n", left, right); + ff_field->value[0] = left; + ff_field->value[1] = right; +@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = { + .driver_data = (unsigned long)ff_rumble }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */ + .driver_data = (unsigned long)ff_rumble }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */ ++ .driver_data = (unsigned long)ff_rumble }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */ + .driver_data = (unsigned long)ff_rumble }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */ +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index e56dc97fe4b6..50ef7b6cd195 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -848,6 +848,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom) + y >>= 1; + distance >>= 1; + } ++ if (features->type == INTUOSHT2) ++ distance = features->distance_max - distance; + input_report_abs(input, ABS_X, x); + input_report_abs(input, ABS_Y, y); + input_report_abs(input, ABS_DISTANCE, distance); +@@ -1061,7 +1063,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len) + input_report_key(input, BTN_BASE2, (data[11] & 0x02)); + + if (data[12] & 0x80) +- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f)); ++ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1); + else + input_report_abs(input, ABS_WHEEL, 0); + +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c +index 2f164bd74687..fdb0f832fade 100644 +--- a/drivers/hv/channel.c ++++ b/drivers/hv/channel.c +@@ -38,7 +38,7 @@ + + static unsigned long virt_to_hvpfn(void *addr) + { +- unsigned long paddr; ++ phys_addr_t paddr; + + if (is_vmalloc_addr(addr)) + paddr = page_to_phys(vmalloc_to_page(addr)) + +diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c +index 060dc7fd66c1..c952002c6301 100644 +--- a/drivers/isdn/hardware/mISDN/hfcsusb.c ++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c +@@ -1406,6 +1406,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb, + printk(KERN_DEBUG + "%s: %s: alloc urb for fifo %i failed", + hw->name, __func__, fifo->fifonum); ++ continue; + } + fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo; + fifo->iso[i].indx = i; +@@ -1704,13 +1705,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel) + static int + setup_hfcsusb(struct hfcsusb *hw) + { ++ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL); + u_char b; ++ int ret; + + if (debug & DBG_HFC_CALL_TRACE) + printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); + ++ if (!dmabuf) ++ return -ENOMEM; ++ ++ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf); ++ ++ memcpy(&b, dmabuf, sizeof(u_char)); ++ kfree(dmabuf); ++ + /* check the chip id */ +- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) { ++ if (ret != 1) { + printk(KERN_DEBUG "%s: %s: cannot read chip id\n", + hw->name, __func__); + return 1; +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c +index b1d0ae2dbd3d..dc385b70e4c3 100644 +--- a/drivers/md/dm-bufio.c ++++ b/drivers/md/dm-bufio.c +@@ -1602,7 +1602,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) + unsigned long freed; + + c = container_of(shrink, struct dm_bufio_client, shrinker); +- if (!dm_bufio_trylock(c)) ++ if (sc->gfp_mask & __GFP_FS) ++ dm_bufio_lock(c); ++ else if (!dm_bufio_trylock(c)) + return SHRINK_STOP; + + freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c +index dbdcc543832d..2e22d588f056 100644 +--- a/drivers/md/dm-integrity.c ++++ b/drivers/md/dm-integrity.c +@@ -1749,7 +1749,22 @@ offload_to_thread: + queue_work(ic->wait_wq, &dio->work); + return; + } ++ if (journal_read_pos != NOT_FOUND) ++ dio->range.n_sectors = ic->sectors_per_block; + wait_and_add_new_range(ic, &dio->range); ++ /* ++ * wait_and_add_new_range drops the spinlock, so the journal ++ * may have been changed arbitrarily. We need to recheck. ++ * To simplify the code, we restrict I/O size to just one block. ++ */ ++ if (journal_read_pos != NOT_FOUND) { ++ sector_t next_sector; ++ unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); ++ if (unlikely(new_pos != journal_read_pos)) { ++ remove_range_unlocked(ic, &dio->range); ++ goto retry; ++ } ++ } + } + spin_unlock_irq(&ic->endio_wait.lock); + +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c +index 671c24332802..3f694d9061ec 100644 +--- a/drivers/md/dm-kcopyd.c ++++ b/drivers/md/dm-kcopyd.c +@@ -548,8 +548,10 @@ static int run_io_job(struct kcopyd_job *job) + * no point in continuing. + */ + if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && +- job->master_job->write_err) ++ job->master_job->write_err) { ++ job->write_err = job->master_job->write_err; + return -EIO; ++ } + + io_job_start(job->kc->throttle); + +@@ -601,6 +603,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, + else + job->read_err = 1; + push(&kc->complete_jobs, job); ++ wake(kc); + break; + } + +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index c44925e4e481..b78a8a4d061c 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -3199,7 +3199,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) + */ + r = rs_prepare_reshape(rs); + if (r) +- return r; ++ goto bad; + + /* Reshaping ain't recovery, so disable recovery */ + rs_setup_recovery(rs, MaxSector); +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 34ab30dd5de9..36275c59e4e7 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -1349,7 +1349,7 @@ void dm_table_event(struct dm_table *t) + } + EXPORT_SYMBOL(dm_table_event); + +-sector_t dm_table_get_size(struct dm_table *t) ++inline sector_t dm_table_get_size(struct dm_table *t) + { + return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; + } +@@ -1374,6 +1374,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) + unsigned int l, n = 0, k = 0; + sector_t *node; + ++ if (unlikely(sector >= dm_table_get_size(t))) ++ return &t->targets[t->num_targets]; ++ + for (l = 0; l < t->depth; l++) { + n = get_child(n, k); + node = get_node(t, l, n); +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c +index 4cdde7a02e94..7e8d7fc99410 100644 +--- a/drivers/md/dm-zoned-metadata.c ++++ b/drivers/md/dm-zoned-metadata.c +@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, + sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; + struct bio *bio; + ++ if (dmz_bdev_is_dying(zmd->dev)) ++ return ERR_PTR(-EIO); ++ + /* Get a new block and a BIO to read it */ + mblk = dmz_alloc_mblock(zmd, mblk_no); + if (!mblk) +- return NULL; ++ return ERR_PTR(-ENOMEM); + + bio = bio_alloc(GFP_NOIO, 1); + if (!bio) { + dmz_free_mblock(zmd, mblk); +- return NULL; ++ return ERR_PTR(-ENOMEM); + } + + spin_lock(&zmd->mblk_lock); +@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, + if (!mblk) { + /* Cache miss: read the block from disk */ + mblk = dmz_get_mblock_slow(zmd, mblk_no); +- if (!mblk) +- return ERR_PTR(-ENOMEM); ++ if (IS_ERR(mblk)) ++ return mblk; + } + + /* Wait for on-going read I/O and check for error */ +@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) + /* + * Issue a metadata block write BIO. + */ +-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, +- unsigned int set) ++static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, ++ unsigned int set) + { + sector_t block = zmd->sb[set].block + mblk->no; + struct bio *bio; + ++ if (dmz_bdev_is_dying(zmd->dev)) ++ return -EIO; ++ + bio = bio_alloc(GFP_NOIO, 1); + if (!bio) { + set_bit(DMZ_META_ERROR, &mblk->state); +- return; ++ return -ENOMEM; + } + + set_bit(DMZ_META_WRITING, &mblk->state); +@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, + bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); + bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); + submit_bio(bio); ++ ++ return 0; + } + + /* +@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, + struct bio *bio; + int ret; + ++ if (dmz_bdev_is_dying(zmd->dev)) ++ return -EIO; ++ + bio = bio_alloc(GFP_NOIO, 1); + if (!bio) + return -ENOMEM; +@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, + { + struct dmz_mblock *mblk; + struct blk_plug plug; +- int ret = 0; ++ int ret = 0, nr_mblks_submitted = 0; + + /* Issue writes */ + blk_start_plug(&plug); +- list_for_each_entry(mblk, write_list, link) +- dmz_write_mblock(zmd, mblk, set); ++ list_for_each_entry(mblk, write_list, link) { ++ ret = dmz_write_mblock(zmd, mblk, set); ++ if (ret) ++ break; ++ nr_mblks_submitted++; ++ } + blk_finish_plug(&plug); + + /* Wait for completion */ + list_for_each_entry(mblk, write_list, link) { ++ if (!nr_mblks_submitted) ++ break; + wait_on_bit_io(&mblk->state, DMZ_META_WRITING, + TASK_UNINTERRUPTIBLE); + if (test_bit(DMZ_META_ERROR, &mblk->state)) { + clear_bit(DMZ_META_ERROR, &mblk->state); + ret = -EIO; + } ++ nr_mblks_submitted--; + } + + /* Flush drive cache (this will also sync data) */ +@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) + */ + dmz_lock_flush(zmd); + ++ if (dmz_bdev_is_dying(zmd->dev)) { ++ ret = -EIO; ++ goto out; ++ } ++ + /* Get dirty blocks */ + spin_lock(&zmd->mblk_lock); + list_splice_init(&zmd->mblk_dirty_list, &write_list); +@@ -1534,7 +1557,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) + struct dm_zone *zone; + + if (list_empty(&zmd->map_rnd_list)) +- return NULL; ++ return ERR_PTR(-EBUSY); + + list_for_each_entry(zone, &zmd->map_rnd_list, link) { + if (dmz_is_buf(zone)) +@@ -1545,7 +1568,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) + return dzone; + } + +- return NULL; ++ return ERR_PTR(-EBUSY); + } + + /* +@@ -1556,7 +1579,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) + struct dm_zone *zone; + + if (list_empty(&zmd->map_seq_list)) +- return NULL; ++ return ERR_PTR(-EBUSY); + + list_for_each_entry(zone, &zmd->map_seq_list, link) { + if (!zone->bzone) +@@ -1565,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) + return zone; + } + +- return NULL; ++ return ERR_PTR(-EBUSY); + } + + /* +@@ -1623,6 +1646,10 @@ again: + /* Alloate a random zone */ + dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); + if (!dzone) { ++ if (dmz_bdev_is_dying(zmd->dev)) { ++ dzone = ERR_PTR(-EIO); ++ goto out; ++ } + dmz_wait_for_free_zones(zmd); + goto again; + } +@@ -1720,6 +1747,10 @@ again: + /* Alloate a random zone */ + bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); + if (!bzone) { ++ if (dmz_bdev_is_dying(zmd->dev)) { ++ bzone = ERR_PTR(-EIO); ++ goto out; ++ } + dmz_wait_for_free_zones(zmd); + goto again; + } +diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c +index edf4b95eb075..9470b8f77a33 100644 +--- a/drivers/md/dm-zoned-reclaim.c ++++ b/drivers/md/dm-zoned-reclaim.c +@@ -37,7 +37,7 @@ enum { + /* + * Number of seconds of target BIO inactivity to consider the target idle. + */ +-#define DMZ_IDLE_PERIOD (10UL * HZ) ++#define DMZ_IDLE_PERIOD (10UL * HZ) + + /* + * Percentage of unmapped (free) random zones below which reclaim starts +@@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc, + set_bit(DM_KCOPYD_WRITE_SEQ, &flags); + + while (block < end_block) { ++ if (dev->flags & DMZ_BDEV_DYING) ++ return -EIO; ++ + /* Get a valid region from the source zone */ + ret = dmz_first_valid_block(zmd, src_zone, &block); + if (ret <= 0) +@@ -215,7 +218,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone) + + dmz_unlock_flush(zmd); + +- return 0; ++ return ret; + } + + /* +@@ -259,7 +262,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone) + + dmz_unlock_flush(zmd); + +- return 0; ++ return ret; + } + + /* +@@ -312,7 +315,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone) + + dmz_unlock_flush(zmd); + +- return 0; ++ return ret; + } + + /* +@@ -334,7 +337,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone) + /* + * Find a candidate zone for reclaim and process it. + */ +-static void dmz_reclaim(struct dmz_reclaim *zrc) ++static int dmz_do_reclaim(struct dmz_reclaim *zrc) + { + struct dmz_metadata *zmd = zrc->metadata; + struct dm_zone *dzone; +@@ -344,8 +347,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc) + + /* Get a data zone */ + dzone = dmz_get_zone_for_reclaim(zmd); +- if (!dzone) +- return; ++ if (IS_ERR(dzone)) ++ return PTR_ERR(dzone); + + start = jiffies; + +@@ -391,13 +394,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc) + out: + if (ret) { + dmz_unlock_zone_reclaim(dzone); +- return; ++ return ret; + } + +- (void) dmz_flush_metadata(zrc->metadata); ++ ret = dmz_flush_metadata(zrc->metadata); ++ if (ret) { ++ dmz_dev_debug(zrc->dev, ++ "Metadata flush for zone %u failed, err %d\n", ++ dmz_id(zmd, rzone), ret); ++ return ret; ++ } + + dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms", + dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start)); ++ return 0; + } + + /* +@@ -442,6 +452,10 @@ static void dmz_reclaim_work(struct work_struct *work) + struct dmz_metadata *zmd = zrc->metadata; + unsigned int nr_rnd, nr_unmap_rnd; + unsigned int p_unmap_rnd; ++ int ret; ++ ++ if (dmz_bdev_is_dying(zrc->dev)) ++ return; + + if (!dmz_should_reclaim(zrc)) { + mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); +@@ -471,7 +485,17 @@ static void dmz_reclaim_work(struct work_struct *work) + (dmz_target_idle(zrc) ? "Idle" : "Busy"), + p_unmap_rnd, nr_unmap_rnd, nr_rnd); + +- dmz_reclaim(zrc); ++ ret = dmz_do_reclaim(zrc); ++ if (ret) { ++ dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret); ++ if (ret == -EIO) ++ /* ++ * LLD might be performing some error handling sequence ++ * at the underlying device. To not interfere, do not ++ * attempt to schedule the next reclaim run immediately. ++ */ ++ return; ++ } + + dmz_schedule_reclaim(zrc); + } +diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c +index 85fb2baa8a7f..1030c42add05 100644 +--- a/drivers/md/dm-zoned-target.c ++++ b/drivers/md/dm-zoned-target.c +@@ -133,6 +133,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, + + atomic_inc(&bioctx->ref); + generic_make_request(clone); ++ if (clone->bi_status == BLK_STS_IOERR) ++ return -EIO; + + if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) + zone->wp_block += nr_blocks; +@@ -277,8 +279,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz, + + /* Get the buffer zone. One will be allocated if needed */ + bzone = dmz_get_chunk_buffer(zmd, zone); +- if (!bzone) +- return -ENOSPC; ++ if (IS_ERR(bzone)) ++ return PTR_ERR(bzone); + + if (dmz_is_readonly(bzone)) + return -EROFS; +@@ -389,6 +391,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, + + dmz_lock_metadata(zmd); + ++ if (dmz->dev->flags & DMZ_BDEV_DYING) { ++ ret = -EIO; ++ goto out; ++ } ++ + /* + * Get the data zone mapping the chunk. There may be no + * mapping for read and discard. If a mapping is obtained, +@@ -493,6 +500,8 @@ static void dmz_flush_work(struct work_struct *work) + + /* Flush dirty metadata blocks */ + ret = dmz_flush_metadata(dmz->metadata); ++ if (ret) ++ dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret); + + /* Process queued flush requests */ + while (1) { +@@ -513,22 +522,24 @@ static void dmz_flush_work(struct work_struct *work) + * Get a chunk work and start it to process a new BIO. + * If the BIO chunk has no work yet, create one. + */ +-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) ++static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) + { + unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); + struct dm_chunk_work *cw; ++ int ret = 0; + + mutex_lock(&dmz->chunk_lock); + + /* Get the BIO chunk work. If one is not active yet, create one */ + cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); + if (!cw) { +- int ret; + + /* Create a new chunk work */ + cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); +- if (!cw) ++ if (unlikely(!cw)) { ++ ret = -ENOMEM; + goto out; ++ } + + INIT_WORK(&cw->work, dmz_chunk_work); + atomic_set(&cw->refcount, 0); +@@ -539,7 +550,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) + ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); + if (unlikely(ret)) { + kfree(cw); +- cw = NULL; + goto out; + } + } +@@ -547,10 +557,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) + bio_list_add(&cw->bio_list, bio); + dmz_get_chunk_work(cw); + ++ dmz_reclaim_bio_acc(dmz->reclaim); + if (queue_work(dmz->chunk_wq, &cw->work)) + dmz_get_chunk_work(cw); + out: + mutex_unlock(&dmz->chunk_lock); ++ return ret; ++} ++ ++/* ++ * Check the backing device availability. If it's on the way out, ++ * start failing I/O. Reclaim and metadata components also call this ++ * function to cleanly abort operation in the event of such failure. ++ */ ++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev) ++{ ++ struct gendisk *disk; ++ ++ if (!(dmz_dev->flags & DMZ_BDEV_DYING)) { ++ disk = dmz_dev->bdev->bd_disk; ++ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { ++ dmz_dev_warn(dmz_dev, "Backing device queue dying"); ++ dmz_dev->flags |= DMZ_BDEV_DYING; ++ } else if (disk->fops->check_events) { ++ if (disk->fops->check_events(disk, 0) & ++ DISK_EVENT_MEDIA_CHANGE) { ++ dmz_dev_warn(dmz_dev, "Backing device offline"); ++ dmz_dev->flags |= DMZ_BDEV_DYING; ++ } ++ } ++ } ++ ++ return dmz_dev->flags & DMZ_BDEV_DYING; + } + + /* +@@ -564,6 +602,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) + sector_t sector = bio->bi_iter.bi_sector; + unsigned int nr_sectors = bio_sectors(bio); + sector_t chunk_sector; ++ int ret; ++ ++ if (dmz_bdev_is_dying(dmz->dev)) ++ return DM_MAPIO_KILL; + + dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", + bio_op(bio), (unsigned long long)sector, nr_sectors, +@@ -601,8 +643,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) + dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); + + /* Now ready to handle this BIO */ +- dmz_reclaim_bio_acc(dmz->reclaim); +- dmz_queue_chunk_work(dmz, bio); ++ ret = dmz_queue_chunk_work(dmz, bio); ++ if (ret) { ++ dmz_dev_debug(dmz->dev, ++ "BIO op %d, can't process chunk %llu, err %i\n", ++ bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio), ++ ret); ++ return DM_MAPIO_REQUEUE; ++ } + + return DM_MAPIO_SUBMITTED; + } +@@ -856,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) + { + struct dmz_target *dmz = ti->private; + ++ if (dmz_bdev_is_dying(dmz->dev)) ++ return -ENODEV; ++ + *bdev = dmz->dev->bdev; + + return 0; +diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h +index ed8de49c9a08..93a64529f219 100644 +--- a/drivers/md/dm-zoned.h ++++ b/drivers/md/dm-zoned.h +@@ -56,6 +56,8 @@ struct dmz_dev { + + unsigned int nr_zones; + ++ unsigned int flags; ++ + sector_t zone_nr_sectors; + unsigned int zone_nr_sectors_shift; + +@@ -67,6 +69,9 @@ struct dmz_dev { + (dev)->zone_nr_sectors_shift) + #define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1)) + ++/* Device flags. */ ++#define DMZ_BDEV_DYING (1 << 0) ++ + /* + * Zone descriptor. + */ +@@ -245,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc); + void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc); + void dmz_schedule_reclaim(struct dmz_reclaim *zrc); + ++/* ++ * Functions defined in dm-zoned-target.c ++ */ ++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev); ++ + #endif /* DM_ZONED_H */ +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c +index 58b319757b1e..8aae0624a297 100644 +--- a/drivers/md/persistent-data/dm-btree.c ++++ b/drivers/md/persistent-data/dm-btree.c +@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) + + new_parent = shadow_current(s); + ++ pn = dm_block_data(new_parent); ++ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? ++ sizeof(__le64) : s->info->value_type.size; ++ ++ /* create & init the left block */ + r = new_block(s->info, &left); + if (r < 0) + return r; + ++ ln = dm_block_data(left); ++ nr_left = le32_to_cpu(pn->header.nr_entries) / 2; ++ ++ ln->header.flags = pn->header.flags; ++ ln->header.nr_entries = cpu_to_le32(nr_left); ++ ln->header.max_entries = pn->header.max_entries; ++ ln->header.value_size = pn->header.value_size; ++ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); ++ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size); ++ ++ /* create & init the right block */ + r = new_block(s->info, &right); + if (r < 0) { + unlock_block(s->info, left); + return r; + } + +- pn = dm_block_data(new_parent); +- ln = dm_block_data(left); + rn = dm_block_data(right); +- +- nr_left = le32_to_cpu(pn->header.nr_entries) / 2; + nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; + +- ln->header.flags = pn->header.flags; +- ln->header.nr_entries = cpu_to_le32(nr_left); +- ln->header.max_entries = pn->header.max_entries; +- ln->header.value_size = pn->header.value_size; +- + rn->header.flags = pn->header.flags; + rn->header.nr_entries = cpu_to_le32(nr_right); + rn->header.max_entries = pn->header.max_entries; + rn->header.value_size = pn->header.value_size; +- +- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); + memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); +- +- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? +- sizeof(__le64) : s->info->value_type.size; +- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size); + memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), + nr_right * size); + +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c +index aec449243966..25328582cc48 100644 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c +@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm) + } + + if (smm->recursion_count == 1) +- apply_bops(smm); ++ r = apply_bops(smm); + + smm->recursion_count--; + +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 8f14f85b8e95..0d2392c4b625 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -2190,6 +2190,15 @@ static void bond_miimon_commit(struct bonding *bond) + bond_for_each_slave(bond, slave, iter) { + switch (slave->new_link) { + case BOND_LINK_NOCHANGE: ++ /* For 802.3ad mode, check current slave speed and ++ * duplex again in case its port was disabled after ++ * invalid speed/duplex reporting but recovered before ++ * link monitoring could make a decision on the actual ++ * link status ++ */ ++ if (BOND_MODE(bond) == BOND_MODE_8023AD && ++ slave->link == BOND_LINK_UP) ++ bond_3ad_adapter_speed_duplex_changed(slave); + continue; + + case BOND_LINK_UP: +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index c05e4d50d43d..bd127ce3aba2 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -1260,6 +1260,8 @@ int register_candev(struct net_device *dev) + return -EINVAL; + + dev->rtnl_link_ops = &can_link_ops; ++ netif_carrier_off(dev); ++ + return register_netdev(dev); + } + EXPORT_SYMBOL_GPL(register_candev); +diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c +index b8c39ede7cd5..179bfcd541f2 100644 +--- a/drivers/net/can/sja1000/peak_pcmcia.c ++++ b/drivers/net/can/sja1000/peak_pcmcia.c +@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card) + if (!netdev) + continue; + +- strncpy(name, netdev->name, IFNAMSIZ); ++ strlcpy(name, netdev->name, IFNAMSIZ); + + unregister_sja1000dev(netdev); + +diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c +index da64e71a62ee..fccb6bf21fad 100644 +--- a/drivers/net/can/spi/mcp251x.c ++++ b/drivers/net/can/spi/mcp251x.c +@@ -678,17 +678,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable) + return regulator_disable(reg); + } + +-static void mcp251x_open_clean(struct net_device *net) +-{ +- struct mcp251x_priv *priv = netdev_priv(net); +- struct spi_device *spi = priv->spi; +- +- free_irq(spi->irq, priv); +- mcp251x_hw_sleep(spi); +- mcp251x_power_enable(priv->transceiver, 0); +- close_candev(net); +-} +- + static int mcp251x_stop(struct net_device *net) + { + struct mcp251x_priv *priv = netdev_priv(net); +@@ -954,37 +943,43 @@ static int mcp251x_open(struct net_device *net) + flags | IRQF_ONESHOT, DEVICE_NAME, priv); + if (ret) { + dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); +- mcp251x_power_enable(priv->transceiver, 0); +- close_candev(net); +- goto open_unlock; ++ goto out_close; + } + + priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, + 0); ++ if (!priv->wq) { ++ ret = -ENOMEM; ++ goto out_clean; ++ } + INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); + INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); + + ret = mcp251x_hw_reset(spi); +- if (ret) { +- mcp251x_open_clean(net); +- goto open_unlock; +- } ++ if (ret) ++ goto out_free_wq; + ret = mcp251x_setup(net, spi); +- if (ret) { +- mcp251x_open_clean(net); +- goto open_unlock; +- } ++ if (ret) ++ goto out_free_wq; + ret = mcp251x_set_normal_mode(spi); +- if (ret) { +- mcp251x_open_clean(net); +- goto open_unlock; +- } ++ if (ret) ++ goto out_free_wq; + + can_led_event(net, CAN_LED_EVENT_OPEN); + + netif_wake_queue(net); ++ mutex_unlock(&priv->mcp_lock); + +-open_unlock: ++ return 0; ++ ++out_free_wq: ++ destroy_workqueue(priv->wq); ++out_clean: ++ free_irq(spi->irq, priv); ++ mcp251x_hw_sleep(spi); ++out_close: ++ mcp251x_power_enable(priv->transceiver, 0); ++ close_candev(net); + mutex_unlock(&priv->mcp_lock); + return ret; + } +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +index 740ef47eab01..43b0fa2b9932 100644 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +@@ -863,7 +863,7 @@ static void peak_usb_disconnect(struct usb_interface *intf) + + dev_prev_siblings = dev->prev_siblings; + dev->state &= ~PCAN_USB_STATE_CONNECTED; +- strncpy(name, netdev->name, IFNAMSIZ); ++ strlcpy(name, netdev->name, IFNAMSIZ); + + unregister_netdev(netdev); + +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +index c34ea385fe4a..6be6de0774b6 100644 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +@@ -3270,7 +3270,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + if (!adapter->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; +- goto out_free_adapter; ++ goto out_free_adapter_nofail; + } + + adapter->pdev = pdev; +@@ -3398,6 +3398,9 @@ out_free_dev: + if (adapter->port[i]) + free_netdev(adapter->port[i]); + ++out_free_adapter_nofail: ++ kfree_skb(adapter->nofail_skb); ++ + out_free_adapter: + kfree(adapter); + +diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c +index 6127697ede12..a91d49dd92ea 100644 +--- a/drivers/net/ethernet/hisilicon/hip04_eth.c ++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c +@@ -157,6 +157,7 @@ struct hip04_priv { + unsigned int reg_inten; + + struct napi_struct napi; ++ struct device *dev; + struct net_device *ndev; + + struct tx_desc *tx_desc; +@@ -185,7 +186,7 @@ struct hip04_priv { + + static inline unsigned int tx_count(unsigned int head, unsigned int tail) + { +- return (head - tail) % (TX_DESC_NUM - 1); ++ return (head - tail) % TX_DESC_NUM; + } + + static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex) +@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force) + } + + if (priv->tx_phys[tx_tail]) { +- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail], ++ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail], + priv->tx_skb[tx_tail]->len, + DMA_TO_DEVICE); + priv->tx_phys[tx_tail] = 0; +@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + return NETDEV_TX_BUSY; + } + +- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); +- if (dma_mapping_error(&ndev->dev, phys)) { ++ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE); ++ if (dma_mapping_error(priv->dev, phys)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } +@@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) + u16 len; + u32 err; + ++ /* clean up tx descriptors */ ++ tx_remaining = hip04_tx_reclaim(ndev, false); ++ + while (cnt && !last) { + buf = priv->rx_buf[priv->rx_head]; + skb = build_skb(buf, priv->rx_buf_size); +@@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) + goto refill; + } + +- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head], ++ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head], + RX_BUF_SIZE, DMA_FROM_DEVICE); + priv->rx_phys[priv->rx_head] = 0; + +@@ -534,9 +538,9 @@ refill: + buf = netdev_alloc_frag(priv->rx_buf_size); + if (!buf) + goto done; +- phys = dma_map_single(&ndev->dev, buf, ++ phys = dma_map_single(priv->dev, buf, + RX_BUF_SIZE, DMA_FROM_DEVICE); +- if (dma_mapping_error(&ndev->dev, phys)) ++ if (dma_mapping_error(priv->dev, phys)) + goto done; + priv->rx_buf[priv->rx_head] = buf; + priv->rx_phys[priv->rx_head] = phys; +@@ -557,8 +561,7 @@ refill: + } + napi_complete_done(napi, rx); + done: +- /* clean up tx descriptors and start a new timer if necessary */ +- tx_remaining = hip04_tx_reclaim(ndev, false); ++ /* start a new timer if necessary */ + if (rx < budget && tx_remaining) + hip04_start_tx_timer(priv); + +@@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev) + for (i = 0; i < RX_DESC_NUM; i++) { + dma_addr_t phys; + +- phys = dma_map_single(&ndev->dev, priv->rx_buf[i], ++ phys = dma_map_single(priv->dev, priv->rx_buf[i], + RX_BUF_SIZE, DMA_FROM_DEVICE); +- if (dma_mapping_error(&ndev->dev, phys)) ++ if (dma_mapping_error(priv->dev, phys)) + return -EIO; + + priv->rx_phys[i] = phys; +@@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev) + + for (i = 0; i < RX_DESC_NUM; i++) { + if (priv->rx_phys[i]) { +- dma_unmap_single(&ndev->dev, priv->rx_phys[i], ++ dma_unmap_single(priv->dev, priv->rx_phys[i], + RX_BUF_SIZE, DMA_FROM_DEVICE); + priv->rx_phys[i] = 0; + } +@@ -820,6 +823,7 @@ static int hip04_mac_probe(struct platform_device *pdev) + return -ENOMEM; + + priv = netdev_priv(ndev); ++ priv->dev = d; + priv->ndev = ndev; + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +index 6455511457ca..9b608d23ff7e 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +@@ -4412,9 +4412,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, + if (state->pause & MLO_PAUSE_RX) + ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + +- ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; +- ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | +- MVPP22_XLG_CTRL4_EN_IDLE_CHECK; ++ ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | ++ MVPP22_XLG_CTRL4_EN_IDLE_CHECK); ++ ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; + + writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); + writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); +diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c +index b22f464ea3fa..f9e475075d3e 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_int.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_int.c +@@ -939,7 +939,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, + snprintf(bit_name, 30, + p_aeu->bit_name, num); + else +- strncpy(bit_name, ++ strlcpy(bit_name, + p_aeu->bit_name, 30); + + /* We now need to pass bitmask in its +diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c +index 13802b825d65..909422d93903 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c +@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, + /* Vendor specific information */ + dev->vendor_id = cdev->vendor_id; + dev->vendor_part_id = cdev->device_id; +- dev->hw_ver = 0; ++ dev->hw_ver = cdev->chip_rev; + dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | + (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index d0e6e1503581..48cf5e2b2441 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -88,6 +88,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw, + u32 value; + + base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; ++ if (queue >= 4) ++ queue -= 4; + + value = readl(ioaddr + base_register); + +@@ -105,6 +107,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw, + u32 value; + + base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; ++ if (queue >= 4) ++ queue -= 4; + + value = readl(ioaddr + base_register); + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +index d182f82f7b58..870302a7177e 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +@@ -106,6 +106,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio, + u32 value, reg; + + reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3; ++ if (queue >= 4) ++ queue -= 4; + + value = readl(ioaddr + reg); + value &= ~XGMAC_PSRQ(queue); +@@ -169,6 +171,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue, + u32 value, reg; + + reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1; ++ if (queue >= 4) ++ queue -= 4; + + value = readl(ioaddr + reg); + value &= ~XGMAC_QxMDMACH(queue); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +index 58ea18af9813..37c0bc699cd9 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv, + entry = &priv->tc_entries[i]; + if (!entry->in_use && !first && free) + first = entry; +- if (entry->handle == loc && !free) ++ if ((entry->handle == loc) && !free && !entry->is_frag) + dup = entry; + } + +diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c +index 491efc1bf5c4..7278eca70f9f 100644 +--- a/drivers/net/phy/phy_led_triggers.c ++++ b/drivers/net/phy/phy_led_triggers.c +@@ -58,8 +58,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy) + if (!phy->last_triggered) + led_trigger_event(&phy->led_link_trigger->trigger, + LED_FULL); ++ else ++ led_trigger_event(&phy->last_triggered->trigger, LED_OFF); + +- led_trigger_event(&phy->last_triggered->trigger, LED_OFF); + led_trigger_event(&plt->trigger, LED_FULL); + phy->last_triggered = plt; + } +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 128c8a327d8e..51017c6bb3bc 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1231,6 +1231,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ ++ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c +index 7cd428c0af43..ce2dd06af62e 100644 +--- a/drivers/net/wireless/mac80211_hwsim.c ++++ b/drivers/net/wireless/mac80211_hwsim.c +@@ -3502,10 +3502,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb, + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, &hwsim_genl_family, + NLM_F_MULTI, HWSIM_CMD_GET_RADIO); +- if (!hdr) ++ if (hdr) { ++ genl_dump_check_consistent(cb, hdr); ++ genlmsg_end(skb, hdr); ++ } else { + res = -EMSGSIZE; +- genl_dump_check_consistent(cb, hdr); +- genlmsg_end(skb, hdr); ++ } + } + + done: +diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c +index f55d082ace71..5d6e7e931bc6 100644 +--- a/drivers/nfc/st-nci/se.c ++++ b/drivers/nfc/st-nci/se.c +@@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, + + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, + skb->len - 2, GFP_KERNEL); ++ if (!transaction) ++ return -ENOMEM; + + transaction->aid_len = skb->data[1]; + memcpy(transaction->aid, &skb->data[2], transaction->aid_len); +diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c +index 4bed9e842db3..fd967a38a94a 100644 +--- a/drivers/nfc/st21nfca/se.c ++++ b/drivers/nfc/st21nfca/se.c +@@ -328,6 +328,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, + + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, + skb->len - 2, GFP_KERNEL); ++ if (!transaction) ++ return -ENOMEM; + + transaction->aid_len = skb->data[1]; + memcpy(transaction->aid, &skb->data[2], +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c +index 9c332a6f6667..476728bdae8c 100644 +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -913,8 +913,9 @@ get_more_pages: + if (page_offset(page) >= ceph_wbc.i_size) { + dout("%p page eof %llu\n", + page, ceph_wbc.i_size); +- if (ceph_wbc.size_stable || +- page_offset(page) >= i_size_read(inode)) ++ if ((ceph_wbc.size_stable || ++ page_offset(page) >= i_size_read(inode)) && ++ clear_page_dirty_for_io(page)) + mapping->a_ops->invalidatepage(page, + 0, PAGE_SIZE); + unlock_page(page); +diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c +index 9dae2ec7e1fa..6a8f4a99582e 100644 +--- a/fs/ceph/locks.c ++++ b/fs/ceph/locks.c +@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode, + req->r_wait_for_completion = ceph_lock_wait_for_completion; + + err = ceph_mdsc_do_request(mdsc, inode, req); +- +- if (operation == CEPH_MDS_OP_GETFILELOCK) { ++ if (!err && operation == CEPH_MDS_OP_GETFILELOCK) { + fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); + if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) + fl->fl_type = F_RDLCK; +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 0ccf8f9b63a2..cc9e846a3865 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -2545,7 +2545,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len, + static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf, + unsigned int buflen) + { +- sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); ++ void *addr; ++ /* ++ * VMAP_STACK (at least) puts stack into the vmalloc address space ++ */ ++ if (is_vmalloc_addr(buf)) ++ addr = vmalloc_to_page(buf); ++ else ++ addr = virt_to_page(buf); ++ sg_set_page(sg, addr, buflen, offset_in_page(buf)); + } + + /* Assumes the first rqst has a transform header as the first iov. +@@ -3121,7 +3129,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server, + { + int ret, length; + char *buf = server->smallbuf; +- char *tmpbuf; + struct smb2_sync_hdr *shdr; + unsigned int pdu_length = server->pdu_size; + unsigned int buf_size; +@@ -3151,18 +3158,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server, + return length; + + next_is_large = server->large_buf; +- one_more: ++one_more: + shdr = (struct smb2_sync_hdr *)buf; + if (shdr->NextCommand) { +- if (next_is_large) { +- tmpbuf = server->bigbuf; ++ if (next_is_large) + next_buffer = (char *)cifs_buf_get(); +- } else { +- tmpbuf = server->smallbuf; ++ else + next_buffer = (char *)cifs_small_buf_get(); +- } + memcpy(next_buffer, +- tmpbuf + le32_to_cpu(shdr->NextCommand), ++ buf + le32_to_cpu(shdr->NextCommand), + pdu_length - le32_to_cpu(shdr->NextCommand)); + } + +@@ -3191,12 +3195,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server, + pdu_length -= le32_to_cpu(shdr->NextCommand); + server->large_buf = next_is_large; + if (next_is_large) +- server->bigbuf = next_buffer; ++ server->bigbuf = buf = next_buffer; + else +- server->smallbuf = next_buffer; +- +- buf += le32_to_cpu(shdr->NextCommand); ++ server->smallbuf = buf = next_buffer; + goto one_more; ++ } else if (ret != 0) { ++ /* ++ * ret != 0 here means that we didn't get to handle_mid() thus ++ * server->smallbuf and server->bigbuf are still valid. We need ++ * to free next_buffer because it is not going to be used ++ * anywhere. ++ */ ++ if (next_is_large) ++ free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer); ++ else ++ free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer); + } + + return ret; +diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c +index 4dc887813c71..a7bc4e0494f9 100644 +--- a/fs/nfs/fscache.c ++++ b/fs/nfs/fscache.c +@@ -118,6 +118,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int + struct rb_node **p, *parent; + int diff; + ++ nfss->fscache_key = NULL; ++ nfss->fscache = NULL; ++ if (!(nfss->options & NFS_OPTION_FSCACHE)) ++ return; + if (!uniq) { + uniq = ""; + ulen = 1; +@@ -230,10 +234,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb) + void nfs_fscache_init_inode(struct inode *inode) + { + struct nfs_fscache_inode_auxdata auxdata; ++ struct nfs_server *nfss = NFS_SERVER(inode); + struct nfs_inode *nfsi = NFS_I(inode); + + nfsi->fscache = NULL; +- if (!S_ISREG(inode->i_mode)) ++ if (!(nfss->fscache && S_ISREG(inode->i_mode))) + return; + + memset(&auxdata, 0, sizeof(auxdata)); +diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h +index 161ba2edb9d0..6363ea956858 100644 +--- a/fs/nfs/fscache.h ++++ b/fs/nfs/fscache.h +@@ -186,7 +186,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode) + */ + static inline const char *nfs_server_fscache_state(struct nfs_server *server) + { +- if (server->fscache && (server->options & NFS_OPTION_FSCACHE)) ++ if (server->fscache) + return "yes"; + return "no "; + } +diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h +index 63287d911c08..5b61520dce88 100644 +--- a/fs/nfs/nfs4_fs.h ++++ b/fs/nfs/nfs4_fs.h +@@ -469,7 +469,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, + + extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t); + extern void nfs4_put_state_owner(struct nfs4_state_owner *); +-extern void nfs4_purge_state_owners(struct nfs_server *); ++extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *); ++extern void nfs4_free_state_owners(struct list_head *head); + extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); + extern void nfs4_put_open_state(struct nfs4_state *); + extern void nfs4_close_state(struct nfs4_state *, fmode_t); +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c +index 8f53455c4765..86991bcfbeb1 100644 +--- a/fs/nfs/nfs4client.c ++++ b/fs/nfs/nfs4client.c +@@ -754,9 +754,12 @@ out: + + static void nfs4_destroy_server(struct nfs_server *server) + { ++ LIST_HEAD(freeme); ++ + nfs_server_return_all_delegations(server); + unset_pnfs_layoutdriver(server); +- nfs4_purge_state_owners(server); ++ nfs4_purge_state_owners(server, &freeme); ++ nfs4_free_state_owners(&freeme); + } + + /* +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index 3ba2087469ac..c36ef75f2054 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -628,24 +628,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp) + /** + * nfs4_purge_state_owners - Release all cached state owners + * @server: nfs_server with cached state owners to release ++ * @head: resulting list of state owners + * + * Called at umount time. Remaining state owners will be on + * the LRU with ref count of zero. ++ * Note that the state owners are not freed, but are added ++ * to the list @head, which can later be used as an argument ++ * to nfs4_free_state_owners. + */ +-void nfs4_purge_state_owners(struct nfs_server *server) ++void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head) + { + struct nfs_client *clp = server->nfs_client; + struct nfs4_state_owner *sp, *tmp; +- LIST_HEAD(doomed); + + spin_lock(&clp->cl_lock); + list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) { +- list_move(&sp->so_lru, &doomed); ++ list_move(&sp->so_lru, head); + nfs4_remove_state_owner_locked(sp); + } + spin_unlock(&clp->cl_lock); ++} + +- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) { ++/** ++ * nfs4_purge_state_owners - Release all cached state owners ++ * @head: resulting list of state owners ++ * ++ * Frees a list of state owners that was generated by ++ * nfs4_purge_state_owners ++ */ ++void nfs4_free_state_owners(struct list_head *head) ++{ ++ struct nfs4_state_owner *sp, *tmp; ++ ++ list_for_each_entry_safe(sp, tmp, head, so_lru) { + list_del(&sp->so_lru); + nfs4_free_state_owner(sp); + } +@@ -1843,12 +1858,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov + struct nfs4_state_owner *sp; + struct nfs_server *server; + struct rb_node *pos; ++ LIST_HEAD(freeme); + int status = 0; + + restart: + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { +- nfs4_purge_state_owners(server); ++ nfs4_purge_state_owners(server, &freeme); + spin_lock(&clp->cl_lock); + for (pos = rb_first(&server->state_owners); + pos != NULL; +@@ -1877,6 +1893,7 @@ restart: + spin_unlock(&clp->cl_lock); + } + rcu_read_unlock(); ++ nfs4_free_state_owners(&freeme); + return 0; + } + +diff --git a/fs/nfs/super.c b/fs/nfs/super.c +index 6df9b85caf20..d90efdea9fbd 100644 +--- a/fs/nfs/super.c ++++ b/fs/nfs/super.c +@@ -2239,6 +2239,7 @@ nfs_compare_remount_data(struct nfs_server *nfss, + data->acdirmin != nfss->acdirmin / HZ || + data->acdirmax != nfss->acdirmax / HZ || + data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) || ++ (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) || + data->nfs_server.port != nfss->port || + data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen || + !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address, +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c +index e1ebdbe40032..9c2955f67f70 100644 +--- a/fs/userfaultfd.c ++++ b/fs/userfaultfd.c +@@ -881,6 +881,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) + /* len == 0 means wake all */ + struct userfaultfd_wake_range range = { .len = 0, }; + unsigned long new_flags; ++ bool still_valid; + + WRITE_ONCE(ctx->released, true); + +@@ -896,8 +897,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) + * taking the mmap_sem for writing. + */ + down_write(&mm->mmap_sem); +- if (!mmget_still_valid(mm)) +- goto skip_mm; ++ still_valid = mmget_still_valid(mm); + prev = NULL; + for (vma = mm->mmap; vma; vma = vma->vm_next) { + cond_resched(); +@@ -908,19 +908,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file) + continue; + } + new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); +- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, +- new_flags, vma->anon_vma, +- vma->vm_file, vma->vm_pgoff, +- vma_policy(vma), +- NULL_VM_UFFD_CTX); +- if (prev) +- vma = prev; +- else +- prev = vma; ++ if (still_valid) { ++ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, ++ new_flags, vma->anon_vma, ++ vma->vm_file, vma->vm_pgoff, ++ vma_policy(vma), ++ NULL_VM_UFFD_CTX); ++ if (prev) ++ vma = prev; ++ else ++ prev = vma; ++ } + vma->vm_flags = new_flags; + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + } +-skip_mm: + up_write(&mm->mmap_sem); + mmput(mm); + wakeup: +diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c +index c6299f82a6e4..6410d3e00ce0 100644 +--- a/fs/xfs/libxfs/xfs_attr.c ++++ b/fs/xfs/libxfs/xfs_attr.c +@@ -191,6 +191,121 @@ xfs_attr_calc_size( + return nblks; + } + ++STATIC int ++xfs_attr_try_sf_addname( ++ struct xfs_inode *dp, ++ struct xfs_da_args *args) ++{ ++ ++ struct xfs_mount *mp = dp->i_mount; ++ int error, error2; ++ ++ error = xfs_attr_shortform_addname(args); ++ if (error == -ENOSPC) ++ return error; ++ ++ /* ++ * Commit the shortform mods, and we're done. ++ * NOTE: this is also the error path (EEXIST, etc). ++ */ ++ if (!error && (args->flags & ATTR_KERNOTIME) == 0) ++ xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG); ++ ++ if (mp->m_flags & XFS_MOUNT_WSYNC) ++ xfs_trans_set_sync(args->trans); ++ ++ error2 = xfs_trans_commit(args->trans); ++ args->trans = NULL; ++ return error ? error : error2; ++} ++ ++/* ++ * Set the attribute specified in @args. ++ */ ++int ++xfs_attr_set_args( ++ struct xfs_da_args *args) ++{ ++ struct xfs_inode *dp = args->dp; ++ struct xfs_buf *leaf_bp = NULL; ++ int error; ++ ++ /* ++ * If the attribute list is non-existent or a shortform list, ++ * upgrade it to a single-leaf-block attribute list. ++ */ ++ if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL || ++ (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && ++ dp->i_d.di_anextents == 0)) { ++ ++ /* ++ * Build initial attribute list (if required). ++ */ ++ if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) ++ xfs_attr_shortform_create(args); ++ ++ /* ++ * Try to add the attr to the attribute list in the inode. ++ */ ++ error = xfs_attr_try_sf_addname(dp, args); ++ if (error != -ENOSPC) ++ return error; ++ ++ /* ++ * It won't fit in the shortform, transform to a leaf block. ++ * GROT: another possible req'mt for a double-split btree op. ++ */ ++ error = xfs_attr_shortform_to_leaf(args, &leaf_bp); ++ if (error) ++ return error; ++ ++ /* ++ * Prevent the leaf buffer from being unlocked so that a ++ * concurrent AIL push cannot grab the half-baked leaf ++ * buffer and run into problems with the write verifier. ++ * Once we're done rolling the transaction we can release ++ * the hold and add the attr to the leaf. ++ */ ++ xfs_trans_bhold(args->trans, leaf_bp); ++ error = xfs_defer_finish(&args->trans); ++ xfs_trans_bhold_release(args->trans, leaf_bp); ++ if (error) { ++ xfs_trans_brelse(args->trans, leaf_bp); ++ return error; ++ } ++ } ++ ++ if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) ++ error = xfs_attr_leaf_addname(args); ++ else ++ error = xfs_attr_node_addname(args); ++ return error; ++} ++ ++/* ++ * Remove the attribute specified in @args. ++ */ ++int ++xfs_attr_remove_args( ++ struct xfs_da_args *args) ++{ ++ struct xfs_inode *dp = args->dp; ++ int error; ++ ++ if (!xfs_inode_hasattr(dp)) { ++ error = -ENOATTR; ++ } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { ++ ASSERT(dp->i_afp->if_flags & XFS_IFINLINE); ++ error = xfs_attr_shortform_remove(args); ++ } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { ++ error = xfs_attr_leaf_removename(args); ++ } else { ++ error = xfs_attr_node_removename(args); ++ } ++ ++ return error; ++} ++ + int + xfs_attr_set( + struct xfs_inode *dp, +@@ -200,11 +315,10 @@ xfs_attr_set( + int flags) + { + struct xfs_mount *mp = dp->i_mount; +- struct xfs_buf *leaf_bp = NULL; + struct xfs_da_args args; + struct xfs_trans_res tres; + int rsvd = (flags & ATTR_ROOT) != 0; +- int error, err2, local; ++ int error, local; + + XFS_STATS_INC(mp, xs_attr_set); + +@@ -255,93 +369,17 @@ xfs_attr_set( + error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0, + rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : + XFS_QMOPT_RES_REGBLKS); +- if (error) { +- xfs_iunlock(dp, XFS_ILOCK_EXCL); +- xfs_trans_cancel(args.trans); +- return error; +- } ++ if (error) ++ goto out_trans_cancel; + + xfs_trans_ijoin(args.trans, dp, 0); +- +- /* +- * If the attribute list is non-existent or a shortform list, +- * upgrade it to a single-leaf-block attribute list. +- */ +- if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL || +- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && +- dp->i_d.di_anextents == 0)) { +- +- /* +- * Build initial attribute list (if required). +- */ +- if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) +- xfs_attr_shortform_create(&args); +- +- /* +- * Try to add the attr to the attribute list in +- * the inode. +- */ +- error = xfs_attr_shortform_addname(&args); +- if (error != -ENOSPC) { +- /* +- * Commit the shortform mods, and we're done. +- * NOTE: this is also the error path (EEXIST, etc). +- */ +- ASSERT(args.trans != NULL); +- +- /* +- * If this is a synchronous mount, make sure that +- * the transaction goes to disk before returning +- * to the user. +- */ +- if (mp->m_flags & XFS_MOUNT_WSYNC) +- xfs_trans_set_sync(args.trans); +- +- if (!error && (flags & ATTR_KERNOTIME) == 0) { +- xfs_trans_ichgtime(args.trans, dp, +- XFS_ICHGTIME_CHG); +- } +- err2 = xfs_trans_commit(args.trans); +- xfs_iunlock(dp, XFS_ILOCK_EXCL); +- +- return error ? error : err2; +- } +- +- /* +- * It won't fit in the shortform, transform to a leaf block. +- * GROT: another possible req'mt for a double-split btree op. +- */ +- error = xfs_attr_shortform_to_leaf(&args, &leaf_bp); +- if (error) +- goto out; +- /* +- * Prevent the leaf buffer from being unlocked so that a +- * concurrent AIL push cannot grab the half-baked leaf +- * buffer and run into problems with the write verifier. +- */ +- xfs_trans_bhold(args.trans, leaf_bp); +- error = xfs_defer_finish(&args.trans); +- if (error) +- goto out; +- +- /* +- * Commit the leaf transformation. We'll need another (linked) +- * transaction to add the new attribute to the leaf, which +- * means that we have to hold & join the leaf buffer here too. +- */ +- error = xfs_trans_roll_inode(&args.trans, dp); +- if (error) +- goto out; +- xfs_trans_bjoin(args.trans, leaf_bp); +- leaf_bp = NULL; +- } +- +- if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) +- error = xfs_attr_leaf_addname(&args); +- else +- error = xfs_attr_node_addname(&args); ++ error = xfs_attr_set_args(&args); + if (error) +- goto out; ++ goto out_trans_cancel; ++ if (!args.trans) { ++ /* shortform attribute has already been committed */ ++ goto out_unlock; ++ } + + /* + * If this is a synchronous mount, make sure that the +@@ -358,17 +396,14 @@ xfs_attr_set( + */ + xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE); + error = xfs_trans_commit(args.trans); ++out_unlock: + xfs_iunlock(dp, XFS_ILOCK_EXCL); +- + return error; + +-out: +- if (leaf_bp) +- xfs_trans_brelse(args.trans, leaf_bp); ++out_trans_cancel: + if (args.trans) + xfs_trans_cancel(args.trans); +- xfs_iunlock(dp, XFS_ILOCK_EXCL); +- return error; ++ goto out_unlock; + } + + /* +@@ -423,17 +458,7 @@ xfs_attr_remove( + */ + xfs_trans_ijoin(args.trans, dp, 0); + +- if (!xfs_inode_hasattr(dp)) { +- error = -ENOATTR; +- } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { +- ASSERT(dp->i_afp->if_flags & XFS_IFINLINE); +- error = xfs_attr_shortform_remove(&args); +- } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { +- error = xfs_attr_leaf_removename(&args); +- } else { +- error = xfs_attr_node_removename(&args); +- } +- ++ error = xfs_attr_remove_args(&args); + if (error) + goto out; + +diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h +new file mode 100644 +index 000000000000..cc04ee0aacfb +--- /dev/null ++++ b/fs/xfs/libxfs/xfs_attr.h +@@ -0,0 +1,150 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc. ++ * All Rights Reserved. ++ */ ++#ifndef __XFS_ATTR_H__ ++#define __XFS_ATTR_H__ ++ ++struct xfs_inode; ++struct xfs_da_args; ++struct xfs_attr_list_context; ++ ++/* ++ * Large attribute lists are structured around Btrees where all the data ++ * elements are in the leaf nodes. Attribute names are hashed into an int, ++ * then that int is used as the index into the Btree. Since the hashval ++ * of an attribute name may not be unique, we may have duplicate keys. ++ * The internal links in the Btree are logical block offsets into the file. ++ * ++ * Small attribute lists use a different format and are packed as tightly ++ * as possible so as to fit into the literal area of the inode. ++ */ ++ ++/*======================================================================== ++ * External interfaces ++ *========================================================================*/ ++ ++ ++#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */ ++#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */ ++#define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */ ++#define ATTR_SECURE 0x0008 /* use attrs in security namespace */ ++#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */ ++#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */ ++ ++#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */ ++#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */ ++ ++#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */ ++ ++#define XFS_ATTR_FLAGS \ ++ { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \ ++ { ATTR_ROOT, "ROOT" }, \ ++ { ATTR_TRUST, "TRUST" }, \ ++ { ATTR_SECURE, "SECURE" }, \ ++ { ATTR_CREATE, "CREATE" }, \ ++ { ATTR_REPLACE, "REPLACE" }, \ ++ { ATTR_KERNOTIME, "KERNOTIME" }, \ ++ { ATTR_KERNOVAL, "KERNOVAL" }, \ ++ { ATTR_INCOMPLETE, "INCOMPLETE" } ++ ++/* ++ * The maximum size (into the kernel or returned from the kernel) of an ++ * attribute value or the buffer used for an attr_list() call. Larger ++ * sizes will result in an ERANGE return code. ++ */ ++#define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */ ++ ++/* ++ * Define how lists of attribute names are returned to the user from ++ * the attr_list() call. A large, 32bit aligned, buffer is passed in ++ * along with its size. We put an array of offsets at the top that each ++ * reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom. ++ */ ++typedef struct attrlist { ++ __s32 al_count; /* number of entries in attrlist */ ++ __s32 al_more; /* T/F: more attrs (do call again) */ ++ __s32 al_offset[1]; /* byte offsets of attrs [var-sized] */ ++} attrlist_t; ++ ++/* ++ * Show the interesting info about one attribute. This is what the ++ * al_offset[i] entry points to. ++ */ ++typedef struct attrlist_ent { /* data from attr_list() */ ++ __u32 a_valuelen; /* number bytes in value of attr */ ++ char a_name[1]; /* attr name (NULL terminated) */ ++} attrlist_ent_t; ++ ++/* ++ * Given a pointer to the (char*) buffer containing the attr_list() result, ++ * and an index, return a pointer to the indicated attribute in the buffer. ++ */ ++#define ATTR_ENTRY(buffer, index) \ ++ ((attrlist_ent_t *) \ ++ &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ]) ++ ++/* ++ * Kernel-internal version of the attrlist cursor. ++ */ ++typedef struct attrlist_cursor_kern { ++ __u32 hashval; /* hash value of next entry to add */ ++ __u32 blkno; /* block containing entry (suggestion) */ ++ __u32 offset; /* offset in list of equal-hashvals */ ++ __u16 pad1; /* padding to match user-level */ ++ __u8 pad2; /* padding to match user-level */ ++ __u8 initted; /* T/F: cursor has been initialized */ ++} attrlist_cursor_kern_t; ++ ++ ++/*======================================================================== ++ * Structure used to pass context around among the routines. ++ *========================================================================*/ ++ ++ ++/* void; state communicated via *context */ ++typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int, ++ unsigned char *, int, int); ++ ++typedef struct xfs_attr_list_context { ++ struct xfs_trans *tp; ++ struct xfs_inode *dp; /* inode */ ++ struct attrlist_cursor_kern *cursor; /* position in list */ ++ char *alist; /* output buffer */ ++ int seen_enough; /* T/F: seen enough of list? */ ++ ssize_t count; /* num used entries */ ++ int dupcnt; /* count dup hashvals seen */ ++ int bufsize; /* total buffer size */ ++ int firstu; /* first used byte in buffer */ ++ int flags; /* from VOP call */ ++ int resynch; /* T/F: resynch with cursor */ ++ put_listent_func_t put_listent; /* list output fmt function */ ++ int index; /* index into output buffer */ ++} xfs_attr_list_context_t; ++ ++ ++/*======================================================================== ++ * Function prototypes for the kernel. ++ *========================================================================*/ ++ ++/* ++ * Overall external interface routines. ++ */ ++int xfs_attr_inactive(struct xfs_inode *dp); ++int xfs_attr_list_int_ilocked(struct xfs_attr_list_context *); ++int xfs_attr_list_int(struct xfs_attr_list_context *); ++int xfs_inode_hasattr(struct xfs_inode *ip); ++int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args); ++int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name, ++ unsigned char *value, int *valuelenp, int flags); ++int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name, ++ unsigned char *value, int valuelen, int flags); ++int xfs_attr_set_args(struct xfs_da_args *args); ++int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags); ++int xfs_attr_remove_args(struct xfs_da_args *args); ++int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize, ++ int flags, struct attrlist_cursor_kern *cursor); ++ ++ ++#endif /* __XFS_ATTR_H__ */ +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c +index 3a496ffe6551..06a7da8dbda5 100644 +--- a/fs/xfs/libxfs/xfs_bmap.c ++++ b/fs/xfs/libxfs/xfs_bmap.c +@@ -1019,6 +1019,34 @@ xfs_bmap_add_attrfork_local( + return -EFSCORRUPTED; + } + ++/* Set an inode attr fork off based on the format */ ++int ++xfs_bmap_set_attrforkoff( ++ struct xfs_inode *ip, ++ int size, ++ int *version) ++{ ++ switch (ip->i_d.di_format) { ++ case XFS_DINODE_FMT_DEV: ++ ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; ++ break; ++ case XFS_DINODE_FMT_LOCAL: ++ case XFS_DINODE_FMT_EXTENTS: ++ case XFS_DINODE_FMT_BTREE: ++ ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); ++ if (!ip->i_d.di_forkoff) ++ ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; ++ else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version) ++ *version = 2; ++ break; ++ default: ++ ASSERT(0); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ + /* + * Convert inode from non-attributed to attributed. + * Must not be in a transaction, ip must not be locked. +@@ -1070,26 +1098,9 @@ xfs_bmap_add_attrfork( + + xfs_trans_ijoin(tp, ip, 0); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); +- +- switch (ip->i_d.di_format) { +- case XFS_DINODE_FMT_DEV: +- ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; +- break; +- case XFS_DINODE_FMT_LOCAL: +- case XFS_DINODE_FMT_EXTENTS: +- case XFS_DINODE_FMT_BTREE: +- ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); +- if (!ip->i_d.di_forkoff) +- ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; +- else if (mp->m_flags & XFS_MOUNT_ATTR2) +- version = 2; +- break; +- default: +- ASSERT(0); +- error = -EINVAL; ++ error = xfs_bmap_set_attrforkoff(ip, size, &version); ++ if (error) + goto trans_cancel; +- } +- + ASSERT(ip->i_afp == NULL); + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); + ip->i_afp->if_flags = XFS_IFEXTENTS; +@@ -1178,7 +1189,10 @@ xfs_iread_extents( + * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. + */ + level = be16_to_cpu(block->bb_level); +- ASSERT(level > 0); ++ if (unlikely(level == 0)) { ++ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); ++ return -EFSCORRUPTED; ++ } + pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); + bno = be64_to_cpu(*pp); + +diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h +index b6e9b639e731..488dc8860fd7 100644 +--- a/fs/xfs/libxfs/xfs_bmap.h ++++ b/fs/xfs/libxfs/xfs_bmap.h +@@ -183,6 +183,7 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno, + xfs_filblks_t len); + void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *); + int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); ++int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version); + void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork); + void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno, + xfs_filblks_t len, struct xfs_owner_info *oinfo, +diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c +index e792b167150a..c52beee31836 100644 +--- a/fs/xfs/libxfs/xfs_defer.c ++++ b/fs/xfs/libxfs/xfs_defer.c +@@ -266,13 +266,15 @@ xfs_defer_trans_roll( + + trace_xfs_defer_trans_roll(tp, _RET_IP_); + +- /* Roll the transaction. */ ++ /* ++ * Roll the transaction. Rolling always given a new transaction (even ++ * if committing the old one fails!) to hand back to the caller, so we ++ * join the held resources to the new transaction so that we always ++ * return with the held resources joined to @tpp, no matter what ++ * happened. ++ */ + error = xfs_trans_roll(tpp); + tp = *tpp; +- if (error) { +- trace_xfs_defer_trans_roll_error(tp, error); +- return error; +- } + + /* Rejoin the joined inodes. */ + for (i = 0; i < ipcount; i++) +@@ -284,6 +286,8 @@ xfs_defer_trans_roll( + xfs_trans_bhold(tp, bplist[i]); + } + ++ if (error) ++ trace_xfs_defer_trans_roll_error(tp, error); + return error; + } + +diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h +deleted file mode 100644 +index 033ff8c478e2..000000000000 +--- a/fs/xfs/xfs_attr.h ++++ /dev/null +@@ -1,148 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc. +- * All Rights Reserved. +- */ +-#ifndef __XFS_ATTR_H__ +-#define __XFS_ATTR_H__ +- +-struct xfs_inode; +-struct xfs_da_args; +-struct xfs_attr_list_context; +- +-/* +- * Large attribute lists are structured around Btrees where all the data +- * elements are in the leaf nodes. Attribute names are hashed into an int, +- * then that int is used as the index into the Btree. Since the hashval +- * of an attribute name may not be unique, we may have duplicate keys. +- * The internal links in the Btree are logical block offsets into the file. +- * +- * Small attribute lists use a different format and are packed as tightly +- * as possible so as to fit into the literal area of the inode. +- */ +- +-/*======================================================================== +- * External interfaces +- *========================================================================*/ +- +- +-#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */ +-#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */ +-#define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */ +-#define ATTR_SECURE 0x0008 /* use attrs in security namespace */ +-#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */ +-#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */ +- +-#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */ +-#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */ +- +-#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */ +- +-#define XFS_ATTR_FLAGS \ +- { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \ +- { ATTR_ROOT, "ROOT" }, \ +- { ATTR_TRUST, "TRUST" }, \ +- { ATTR_SECURE, "SECURE" }, \ +- { ATTR_CREATE, "CREATE" }, \ +- { ATTR_REPLACE, "REPLACE" }, \ +- { ATTR_KERNOTIME, "KERNOTIME" }, \ +- { ATTR_KERNOVAL, "KERNOVAL" }, \ +- { ATTR_INCOMPLETE, "INCOMPLETE" } +- +-/* +- * The maximum size (into the kernel or returned from the kernel) of an +- * attribute value or the buffer used for an attr_list() call. Larger +- * sizes will result in an ERANGE return code. +- */ +-#define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */ +- +-/* +- * Define how lists of attribute names are returned to the user from +- * the attr_list() call. A large, 32bit aligned, buffer is passed in +- * along with its size. We put an array of offsets at the top that each +- * reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom. +- */ +-typedef struct attrlist { +- __s32 al_count; /* number of entries in attrlist */ +- __s32 al_more; /* T/F: more attrs (do call again) */ +- __s32 al_offset[1]; /* byte offsets of attrs [var-sized] */ +-} attrlist_t; +- +-/* +- * Show the interesting info about one attribute. This is what the +- * al_offset[i] entry points to. +- */ +-typedef struct attrlist_ent { /* data from attr_list() */ +- __u32 a_valuelen; /* number bytes in value of attr */ +- char a_name[1]; /* attr name (NULL terminated) */ +-} attrlist_ent_t; +- +-/* +- * Given a pointer to the (char*) buffer containing the attr_list() result, +- * and an index, return a pointer to the indicated attribute in the buffer. +- */ +-#define ATTR_ENTRY(buffer, index) \ +- ((attrlist_ent_t *) \ +- &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ]) +- +-/* +- * Kernel-internal version of the attrlist cursor. +- */ +-typedef struct attrlist_cursor_kern { +- __u32 hashval; /* hash value of next entry to add */ +- __u32 blkno; /* block containing entry (suggestion) */ +- __u32 offset; /* offset in list of equal-hashvals */ +- __u16 pad1; /* padding to match user-level */ +- __u8 pad2; /* padding to match user-level */ +- __u8 initted; /* T/F: cursor has been initialized */ +-} attrlist_cursor_kern_t; +- +- +-/*======================================================================== +- * Structure used to pass context around among the routines. +- *========================================================================*/ +- +- +-/* void; state communicated via *context */ +-typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int, +- unsigned char *, int, int); +- +-typedef struct xfs_attr_list_context { +- struct xfs_trans *tp; +- struct xfs_inode *dp; /* inode */ +- struct attrlist_cursor_kern *cursor; /* position in list */ +- char *alist; /* output buffer */ +- int seen_enough; /* T/F: seen enough of list? */ +- ssize_t count; /* num used entries */ +- int dupcnt; /* count dup hashvals seen */ +- int bufsize; /* total buffer size */ +- int firstu; /* first used byte in buffer */ +- int flags; /* from VOP call */ +- int resynch; /* T/F: resynch with cursor */ +- put_listent_func_t put_listent; /* list output fmt function */ +- int index; /* index into output buffer */ +-} xfs_attr_list_context_t; +- +- +-/*======================================================================== +- * Function prototypes for the kernel. +- *========================================================================*/ +- +-/* +- * Overall external interface routines. +- */ +-int xfs_attr_inactive(struct xfs_inode *dp); +-int xfs_attr_list_int_ilocked(struct xfs_attr_list_context *); +-int xfs_attr_list_int(struct xfs_attr_list_context *); +-int xfs_inode_hasattr(struct xfs_inode *ip); +-int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args); +-int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name, +- unsigned char *value, int *valuelenp, int flags); +-int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name, +- unsigned char *value, int valuelen, int flags); +-int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags); +-int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize, +- int flags, struct attrlist_cursor_kern *cursor); +- +- +-#endif /* __XFS_ATTR_H__ */ +diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c +index 87e6dd5326d5..a1af984e4913 100644 +--- a/fs/xfs/xfs_dquot.c ++++ b/fs/xfs/xfs_dquot.c +@@ -277,7 +277,8 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp) + + /* + * Ensure that the given in-core dquot has a buffer on disk backing it, and +- * return the buffer. This is called when the bmapi finds a hole. ++ * return the buffer locked and held. This is called when the bmapi finds a ++ * hole. + */ + STATIC int + xfs_dquot_disk_alloc( +@@ -355,13 +356,14 @@ xfs_dquot_disk_alloc( + * If everything succeeds, the caller of this function is returned a + * buffer that is locked and held to the transaction. The caller + * is responsible for unlocking any buffer passed back, either +- * manually or by committing the transaction. ++ * manually or by committing the transaction. On error, the buffer is ++ * released and not passed back. + */ + xfs_trans_bhold(tp, bp); + error = xfs_defer_finish(tpp); +- tp = *tpp; + if (error) { +- xfs_buf_relse(bp); ++ xfs_trans_bhold_release(*tpp, bp); ++ xfs_trans_brelse(*tpp, bp); + return error; + } + *bpp = bp; +@@ -521,7 +523,6 @@ xfs_qm_dqread_alloc( + struct xfs_buf **bpp) + { + struct xfs_trans *tp; +- struct xfs_buf *bp; + int error; + + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc, +@@ -529,7 +530,7 @@ xfs_qm_dqread_alloc( + if (error) + goto err; + +- error = xfs_dquot_disk_alloc(&tp, dqp, &bp); ++ error = xfs_dquot_disk_alloc(&tp, dqp, bpp); + if (error) + goto err_cancel; + +@@ -539,10 +540,10 @@ xfs_qm_dqread_alloc( + * Buffer was held to the transaction, so we have to unlock it + * manually here because we're not passing it back. + */ +- xfs_buf_relse(bp); ++ xfs_buf_relse(*bpp); ++ *bpp = NULL; + goto err; + } +- *bpp = bp; + return 0; + + err_cancel: +diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c +index 74047bd0c1ae..e427ad097e2e 100644 +--- a/fs/xfs/xfs_iops.c ++++ b/fs/xfs/xfs_iops.c +@@ -803,6 +803,7 @@ xfs_setattr_nonsize( + + out_cancel: + xfs_trans_cancel(tp); ++ xfs_iunlock(ip, XFS_ILOCK_EXCL); + out_dqrele: + xfs_qm_dqrele(udqp); + xfs_qm_dqrele(gdqp); +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h +index 147546e0c11b..815dcfa64743 100644 +--- a/include/trace/events/rxrpc.h ++++ b/include/trace/events/rxrpc.h +@@ -500,10 +500,10 @@ rxrpc_tx_points; + #define E_(a, b) { a, b } + + TRACE_EVENT(rxrpc_local, +- TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op, ++ TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op, + int usage, const void *where), + +- TP_ARGS(local, op, usage, where), ++ TP_ARGS(local_debug_id, op, usage, where), + + TP_STRUCT__entry( + __field(unsigned int, local ) +@@ -513,7 +513,7 @@ TRACE_EVENT(rxrpc_local, + ), + + TP_fast_assign( +- __entry->local = local->debug_id; ++ __entry->local = local_debug_id; + __entry->op = op; + __entry->usage = usage; + __entry->where = where; +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c +index 8e009cee6517..26814a14013c 100644 +--- a/kernel/irq/irqdesc.c ++++ b/kernel/irq/irqdesc.c +@@ -294,6 +294,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc) + } + } + ++static void irq_sysfs_del(struct irq_desc *desc) ++{ ++ /* ++ * If irq_sysfs_init() has not yet been invoked (early boot), then ++ * irq_kobj_base is NULL and the descriptor was never added. ++ * kobject_del() complains about a object with no parent, so make ++ * it conditional. ++ */ ++ if (irq_kobj_base) ++ kobject_del(&desc->kobj); ++} ++ + static int __init irq_sysfs_init(void) + { + struct irq_desc *desc; +@@ -324,6 +336,7 @@ static struct kobj_type irq_kobj_type = { + }; + + static void irq_sysfs_add(int irq, struct irq_desc *desc) {} ++static void irq_sysfs_del(struct irq_desc *desc) {} + + #endif /* CONFIG_SYSFS */ + +@@ -437,7 +450,7 @@ static void free_desc(unsigned int irq) + * The sysfs entry must be serialized against a concurrent + * irq_sysfs_init() as well. + */ +- kobject_del(&desc->kobj); ++ irq_sysfs_del(desc); + delete_irq_desc(irq); + + /* +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 6fad1864ba03..09ce8528bbdd 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -33,6 +33,7 @@ + #include <linux/page_idle.h> + #include <linux/shmem_fs.h> + #include <linux/oom.h> ++#include <linux/page_owner.h> + + #include <asm/tlb.h> + #include <asm/pgalloc.h> +@@ -2477,6 +2478,9 @@ static void __split_huge_page(struct page *page, struct list_head *list, + } + + ClearPageCompound(head); ++ ++ split_page_owner(head, HPAGE_PMD_ORDER); ++ + /* See comment in __split_huge_page_tail() */ + if (PageAnon(head)) { + /* Additional pin to radix tree of swap cache */ +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c +index 9da65552e7ca..c2c4f739da8f 100644 +--- a/mm/zsmalloc.c ++++ b/mm/zsmalloc.c +@@ -53,6 +53,7 @@ + #include <linux/zpool.h> + #include <linux/mount.h> + #include <linux/migrate.h> ++#include <linux/wait.h> + #include <linux/pagemap.h> + #include <linux/fs.h> + +@@ -267,6 +268,10 @@ struct zs_pool { + #ifdef CONFIG_COMPACTION + struct inode *inode; + struct work_struct free_work; ++ /* A wait queue for when migration races with async_free_zspage() */ ++ struct wait_queue_head migration_wait; ++ atomic_long_t isolated_pages; ++ bool destroying; + #endif + }; + +@@ -1882,6 +1887,31 @@ static void dec_zspage_isolation(struct zspage *zspage) + zspage->isolated--; + } + ++static void putback_zspage_deferred(struct zs_pool *pool, ++ struct size_class *class, ++ struct zspage *zspage) ++{ ++ enum fullness_group fg; ++ ++ fg = putback_zspage(class, zspage); ++ if (fg == ZS_EMPTY) ++ schedule_work(&pool->free_work); ++ ++} ++ ++static inline void zs_pool_dec_isolated(struct zs_pool *pool) ++{ ++ VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0); ++ atomic_long_dec(&pool->isolated_pages); ++ /* ++ * There's no possibility of racing, since wait_for_isolated_drain() ++ * checks the isolated count under &class->lock after enqueuing ++ * on migration_wait. ++ */ ++ if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying) ++ wake_up_all(&pool->migration_wait); ++} ++ + static void replace_sub_page(struct size_class *class, struct zspage *zspage, + struct page *newpage, struct page *oldpage) + { +@@ -1951,6 +1981,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode) + */ + if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { + get_zspage_mapping(zspage, &class_idx, &fullness); ++ atomic_long_inc(&pool->isolated_pages); + remove_zspage(class, zspage, fullness); + } + +@@ -2050,8 +2081,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, + * Page migration is done so let's putback isolated zspage to + * the list if @page is final isolated subpage in the zspage. + */ +- if (!is_zspage_isolated(zspage)) +- putback_zspage(class, zspage); ++ if (!is_zspage_isolated(zspage)) { ++ /* ++ * We cannot race with zs_destroy_pool() here because we wait ++ * for isolation to hit zero before we start destroying. ++ * Also, we ensure that everyone can see pool->destroying before ++ * we start waiting. ++ */ ++ putback_zspage_deferred(pool, class, zspage); ++ zs_pool_dec_isolated(pool); ++ } + + reset_page(page); + put_page(page); +@@ -2097,13 +2136,12 @@ static void zs_page_putback(struct page *page) + spin_lock(&class->lock); + dec_zspage_isolation(zspage); + if (!is_zspage_isolated(zspage)) { +- fg = putback_zspage(class, zspage); + /* + * Due to page_lock, we cannot free zspage immediately + * so let's defer. + */ +- if (fg == ZS_EMPTY) +- schedule_work(&pool->free_work); ++ putback_zspage_deferred(pool, class, zspage); ++ zs_pool_dec_isolated(pool); + } + spin_unlock(&class->lock); + } +@@ -2127,8 +2165,36 @@ static int zs_register_migration(struct zs_pool *pool) + return 0; + } + ++static bool pool_isolated_are_drained(struct zs_pool *pool) ++{ ++ return atomic_long_read(&pool->isolated_pages) == 0; ++} ++ ++/* Function for resolving migration */ ++static void wait_for_isolated_drain(struct zs_pool *pool) ++{ ++ ++ /* ++ * We're in the process of destroying the pool, so there are no ++ * active allocations. zs_page_isolate() fails for completely free ++ * zspages, so we need only wait for the zs_pool's isolated ++ * count to hit zero. ++ */ ++ wait_event(pool->migration_wait, ++ pool_isolated_are_drained(pool)); ++} ++ + static void zs_unregister_migration(struct zs_pool *pool) + { ++ pool->destroying = true; ++ /* ++ * We need a memory barrier here to ensure global visibility of ++ * pool->destroying. Thus pool->isolated pages will either be 0 in which ++ * case we don't care, or it will be > 0 and pool->destroying will ++ * ensure that we wake up once isolation hits 0. ++ */ ++ smp_mb(); ++ wait_for_isolated_drain(pool); /* This can block */ + flush_work(&pool->free_work); + iput(pool->inode); + } +@@ -2366,6 +2432,8 @@ struct zs_pool *zs_create_pool(const char *name) + if (!pool->name) + goto err; + ++ init_waitqueue_head(&pool->migration_wait); ++ + if (create_cache(pool)) + goto err; + +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c +index 995b3842ba7c..62ffc989a44a 100644 +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -2274,8 +2274,10 @@ static int compat_do_replace(struct net *net, void __user *user, + state.buf_kern_len = size64; + + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); +- if (WARN_ON(ret < 0)) ++ if (WARN_ON(ret < 0)) { ++ vfree(entries_tmp); + goto out_unlock; ++ } + + vfree(entries_tmp); + tmp.entries_size = size64; +diff --git a/net/can/gw.c b/net/can/gw.c +index 53859346dc9a..bd2161470e45 100644 +--- a/net/can/gw.c ++++ b/net/can/gw.c +@@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void) + pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n", + max_hops); + +- register_pernet_subsys(&cangw_pernet_ops); ++ ret = register_pernet_subsys(&cangw_pernet_ops); ++ if (ret) ++ return ret; ++ ++ ret = -ENOMEM; + cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job), + 0, 0, NULL); +- + if (!cgw_cache) +- return -ENOMEM; ++ goto out_cache_create; + + /* set notifier */ + notifier.notifier_call = cgw_notifier; +- register_netdevice_notifier(¬ifier); ++ ret = register_netdevice_notifier(¬ifier); ++ if (ret) ++ goto out_register_notifier; + + ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE, + NULL, cgw_dump_jobs, 0); +- if (ret) { +- unregister_netdevice_notifier(¬ifier); +- kmem_cache_destroy(cgw_cache); +- return -ENOBUFS; +- } +- +- /* Only the first call to rtnl_register_module can fail */ +- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE, +- cgw_create_job, NULL, 0); +- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE, +- cgw_remove_job, NULL, 0); ++ if (ret) ++ goto out_rtnl_register1; ++ ++ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE, ++ cgw_create_job, NULL, 0); ++ if (ret) ++ goto out_rtnl_register2; ++ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE, ++ cgw_remove_job, NULL, 0); ++ if (ret) ++ goto out_rtnl_register3; + + return 0; ++ ++out_rtnl_register3: ++ rtnl_unregister(PF_CAN, RTM_NEWROUTE); ++out_rtnl_register2: ++ rtnl_unregister(PF_CAN, RTM_GETROUTE); ++out_rtnl_register1: ++ unregister_netdevice_notifier(¬ifier); ++out_register_notifier: ++ kmem_cache_destroy(cgw_cache); ++out_cache_create: ++ unregister_pernet_subsys(&cangw_pernet_ops); ++ ++ return ret; + } + + static __exit void cgw_module_exit(void) +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c +index 60934bd8796c..76c41a84550e 100644 +--- a/net/ceph/osd_client.c ++++ b/net/ceph/osd_client.c +@@ -1423,7 +1423,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc, + struct ceph_osds up, acting; + bool force_resend = false; + bool unpaused = false; +- bool legacy_change; ++ bool legacy_change = false; + bool split = false; + bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); + bool recovery_deletes = ceph_osdmap_flag(osdc, +@@ -1511,15 +1511,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc, + t->osd = acting.primary; + } + +- if (unpaused || legacy_change || force_resend || +- (split && con && CEPH_HAVE_FEATURE(con->peer_features, +- RESEND_ON_SPLIT))) ++ if (unpaused || legacy_change || force_resend || split) + ct_res = CALC_TARGET_NEED_RESEND; + else + ct_res = CALC_TARGET_NO_ACTION; + + out: +- dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd); ++ dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused, ++ legacy_change, force_resend, split, ct_res, t->osd); + return ct_res; + } + +diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c +index 13ade5782847..4f01321e793c 100644 +--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c ++++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c +@@ -230,7 +230,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, + + e.id = ip_to_id(map, ip); + +- if (opt->flags & IPSET_DIM_ONE_SRC) ++ if (opt->flags & IPSET_DIM_TWO_SRC) + ether_addr_copy(e.ether, eth_hdr(skb)->h_source); + else + ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c +index 1577f2f76060..e2538c578671 100644 +--- a/net/netfilter/ipset/ip_set_core.c ++++ b/net/netfilter/ipset/ip_set_core.c +@@ -1157,7 +1157,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl, + return -ENOENT; + + write_lock_bh(&ip_set_ref_lock); +- if (set->ref != 0) { ++ if (set->ref != 0 || set->ref_netlink != 0) { + ret = -IPSET_ERR_REFERENCED; + goto out; + } +diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c +index fd87de3ed55b..16ec822e4044 100644 +--- a/net/netfilter/ipset/ip_set_hash_ipmac.c ++++ b/net/netfilter/ipset/ip_set_hash_ipmac.c +@@ -95,15 +95,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb, + struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } }; + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); + +- /* MAC can be src only */ +- if (!(opt->flags & IPSET_DIM_TWO_SRC)) +- return 0; +- + if (skb_mac_header(skb) < skb->head || + (skb_mac_header(skb) + ETH_HLEN) > skb->data) + return -EINVAL; + +- if (opt->flags & IPSET_DIM_ONE_SRC) ++ if (opt->flags & IPSET_DIM_TWO_SRC) + ether_addr_copy(e.ether, eth_hdr(skb)->h_source); + else + ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c +index d76e5e58905d..7319d3ca30e9 100644 +--- a/net/rxrpc/af_rxrpc.c ++++ b/net/rxrpc/af_rxrpc.c +@@ -195,7 +195,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) + + service_in_use: + write_unlock(&local->services_lock); +- rxrpc_put_local(local); ++ rxrpc_unuse_local(local); + ret = -EADDRINUSE; + error_unlock: + release_sock(&rx->sk); +@@ -908,7 +908,7 @@ static int rxrpc_release_sock(struct sock *sk) + rxrpc_queue_work(&rxnet->service_conn_reaper); + rxrpc_queue_work(&rxnet->client_conn_reaper); + +- rxrpc_put_local(rx->local); ++ rxrpc_unuse_local(rx->local); + rx->local = NULL; + key_put(rx->key); + rx->key = NULL; +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h +index 03e0fc8c183f..dfd9eab77cc8 100644 +--- a/net/rxrpc/ar-internal.h ++++ b/net/rxrpc/ar-internal.h +@@ -258,7 +258,8 @@ struct rxrpc_security { + */ + struct rxrpc_local { + struct rcu_head rcu; +- atomic_t usage; ++ atomic_t active_users; /* Number of users of the local endpoint */ ++ atomic_t usage; /* Number of references to the structure */ + struct rxrpc_net *rxnet; /* The network ns in which this resides */ + struct list_head link; + struct socket *socket; /* my UDP socket */ +@@ -998,6 +999,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc + struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *); + struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *); + void rxrpc_put_local(struct rxrpc_local *); ++struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *); ++void rxrpc_unuse_local(struct rxrpc_local *); + void rxrpc_queue_local(struct rxrpc_local *); + void rxrpc_destroy_all_locals(struct rxrpc_net *); + +@@ -1057,6 +1060,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *); + struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); + struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); + void rxrpc_put_peer(struct rxrpc_peer *); ++void rxrpc_put_peer_locked(struct rxrpc_peer *); + + /* + * proc.c +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c +index d591f54cb91f..7965600ee5de 100644 +--- a/net/rxrpc/input.c ++++ b/net/rxrpc/input.c +@@ -1106,8 +1106,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, + { + _enter("%p,%p", local, skb); + +- skb_queue_tail(&local->event_queue, skb); +- rxrpc_queue_local(local); ++ if (rxrpc_get_local_maybe(local)) { ++ skb_queue_tail(&local->event_queue, skb); ++ rxrpc_queue_local(local); ++ } else { ++ rxrpc_free_skb(skb, rxrpc_skb_rx_freed); ++ } + } + + /* +@@ -1117,8 +1121,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) + { + CHECK_SLAB_OKAY(&local->usage); + +- skb_queue_tail(&local->reject_queue, skb); +- rxrpc_queue_local(local); ++ if (rxrpc_get_local_maybe(local)) { ++ skb_queue_tail(&local->reject_queue, skb); ++ rxrpc_queue_local(local); ++ } else { ++ rxrpc_free_skb(skb, rxrpc_skb_rx_freed); ++ } + } + + /* +diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c +index 10317dbdab5f..c752ad487067 100644 +--- a/net/rxrpc/local_object.c ++++ b/net/rxrpc/local_object.c +@@ -83,6 +83,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, + local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); + if (local) { + atomic_set(&local->usage, 1); ++ atomic_set(&local->active_users, 1); + local->rxnet = rxnet; + INIT_LIST_HEAD(&local->link); + INIT_WORK(&local->processor, rxrpc_local_processor); +@@ -96,7 +97,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, + local->debug_id = atomic_inc_return(&rxrpc_debug_id); + memcpy(&local->srx, srx, sizeof(*srx)); + local->srx.srx_service = 0; +- trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); ++ trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL); + } + + _leave(" = %p", local); +@@ -270,11 +271,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, + * bind the transport socket may still fail if we're attempting + * to use a local address that the dying object is still using. + */ +- if (!rxrpc_get_local_maybe(local)) { +- cursor = cursor->next; +- list_del_init(&local->link); ++ if (!rxrpc_use_local(local)) + break; +- } + + age = "old"; + goto found; +@@ -288,7 +286,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, + if (ret < 0) + goto sock_error; + +- list_add_tail(&local->link, cursor); ++ if (cursor != &rxnet->local_endpoints) ++ list_replace_init(cursor, &local->link); ++ else ++ list_add_tail(&local->link, cursor); + age = "new"; + + found: +@@ -324,7 +325,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) + int n; + + n = atomic_inc_return(&local->usage); +- trace_rxrpc_local(local, rxrpc_local_got, n, here); ++ trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here); + return local; + } + +@@ -338,7 +339,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) + if (local) { + int n = atomic_fetch_add_unless(&local->usage, 1, 0); + if (n > 0) +- trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); ++ trace_rxrpc_local(local->debug_id, rxrpc_local_got, ++ n + 1, here); + else + local = NULL; + } +@@ -346,24 +348,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) + } + + /* +- * Queue a local endpoint. ++ * Queue a local endpoint and pass the caller's reference to the work item. + */ + void rxrpc_queue_local(struct rxrpc_local *local) + { + const void *here = __builtin_return_address(0); ++ unsigned int debug_id = local->debug_id; ++ int n = atomic_read(&local->usage); + + if (rxrpc_queue_work(&local->processor)) +- trace_rxrpc_local(local, rxrpc_local_queued, +- atomic_read(&local->usage), here); +-} +- +-/* +- * A local endpoint reached its end of life. +- */ +-static void __rxrpc_put_local(struct rxrpc_local *local) +-{ +- _enter("%d", local->debug_id); +- rxrpc_queue_work(&local->processor); ++ trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here); ++ else ++ rxrpc_put_local(local); + } + + /* +@@ -376,10 +372,47 @@ void rxrpc_put_local(struct rxrpc_local *local) + + if (local) { + n = atomic_dec_return(&local->usage); +- trace_rxrpc_local(local, rxrpc_local_put, n, here); ++ trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here); + + if (n == 0) +- __rxrpc_put_local(local); ++ call_rcu(&local->rcu, rxrpc_local_rcu); ++ } ++} ++ ++/* ++ * Start using a local endpoint. ++ */ ++struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local) ++{ ++ unsigned int au; ++ ++ local = rxrpc_get_local_maybe(local); ++ if (!local) ++ return NULL; ++ ++ au = atomic_fetch_add_unless(&local->active_users, 1, 0); ++ if (au == 0) { ++ rxrpc_put_local(local); ++ return NULL; ++ } ++ ++ return local; ++} ++ ++/* ++ * Cease using a local endpoint. Once the number of active users reaches 0, we ++ * start the closure of the transport in the work processor. ++ */ ++void rxrpc_unuse_local(struct rxrpc_local *local) ++{ ++ unsigned int au; ++ ++ if (local) { ++ au = atomic_dec_return(&local->active_users); ++ if (au == 0) ++ rxrpc_queue_local(local); ++ else ++ rxrpc_put_local(local); + } + } + +@@ -397,16 +430,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) + + _enter("%d", local->debug_id); + +- /* We can get a race between an incoming call packet queueing the +- * processor again and the work processor starting the destruction +- * process which will shut down the UDP socket. +- */ +- if (local->dead) { +- _leave(" [already dead]"); +- return; +- } +- local->dead = true; +- + mutex_lock(&rxnet->local_mutex); + list_del_init(&local->link); + mutex_unlock(&rxnet->local_mutex); +@@ -426,13 +449,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) + */ + rxrpc_purge_queue(&local->reject_queue); + rxrpc_purge_queue(&local->event_queue); +- +- _debug("rcu local %d", local->debug_id); +- call_rcu(&local->rcu, rxrpc_local_rcu); + } + + /* +- * Process events on an endpoint ++ * Process events on an endpoint. The work item carries a ref which ++ * we must release. + */ + static void rxrpc_local_processor(struct work_struct *work) + { +@@ -440,13 +461,15 @@ static void rxrpc_local_processor(struct work_struct *work) + container_of(work, struct rxrpc_local, processor); + bool again; + +- trace_rxrpc_local(local, rxrpc_local_processing, ++ trace_rxrpc_local(local->debug_id, rxrpc_local_processing, + atomic_read(&local->usage), NULL); + + do { + again = false; +- if (atomic_read(&local->usage) == 0) +- return rxrpc_local_destroyer(local); ++ if (atomic_read(&local->active_users) == 0) { ++ rxrpc_local_destroyer(local); ++ break; ++ } + + if (!skb_queue_empty(&local->reject_queue)) { + rxrpc_reject_packets(local); +@@ -458,6 +481,8 @@ static void rxrpc_local_processor(struct work_struct *work) + again = true; + } + } while (again); ++ ++ rxrpc_put_local(local); + } + + /* +diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c +index bd2fa3b7caa7..dc7fdaf20445 100644 +--- a/net/rxrpc/peer_event.c ++++ b/net/rxrpc/peer_event.c +@@ -375,7 +375,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, + spin_lock_bh(&rxnet->peer_hash_lock); + list_add_tail(&peer->keepalive_link, + &rxnet->peer_keepalive[slot & mask]); +- rxrpc_put_peer(peer); ++ rxrpc_put_peer_locked(peer); + } + + spin_unlock_bh(&rxnet->peer_hash_lock); +diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c +index 5691b7d266ca..71547e8673b9 100644 +--- a/net/rxrpc/peer_object.c ++++ b/net/rxrpc/peer_object.c +@@ -440,6 +440,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer) + } + } + ++/* ++ * Drop a ref on a peer record where the caller already holds the ++ * peer_hash_lock. ++ */ ++void rxrpc_put_peer_locked(struct rxrpc_peer *peer) ++{ ++ const void *here = __builtin_return_address(0); ++ int n; ++ ++ n = atomic_dec_return(&peer->usage); ++ trace_rxrpc_peer(peer, rxrpc_peer_put, n, here); ++ if (n == 0) { ++ hash_del_rcu(&peer->hash_link); ++ list_del_init(&peer->keepalive_link); ++ kfree_rcu(peer, rcu); ++ } ++} ++ + /* + * Make sure all peer records have been discarded. + */ +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c +index be01f9c5d963..5d6ab4f6fd7a 100644 +--- a/net/rxrpc/sendmsg.c ++++ b/net/rxrpc/sendmsg.c +@@ -230,6 +230,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, + rxrpc_set_call_completion(call, + RXRPC_CALL_LOCAL_ERROR, + 0, ret); ++ rxrpc_notify_socket(call); + goto out; + } + _debug("need instant resend %d", ret); +diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c +index 160b2764b2ad..6a8c279a4b20 100644 +--- a/sound/soc/davinci/davinci-mcasp.c ++++ b/sound/soc/davinci/davinci-mcasp.c +@@ -1150,6 +1150,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream, + return ret; + } + ++static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params, ++ struct snd_pcm_hw_rule *rule) ++{ ++ struct davinci_mcasp_ruledata *rd = rule->private; ++ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); ++ struct snd_mask nfmt; ++ int i, slot_width; ++ ++ snd_mask_none(&nfmt); ++ slot_width = rd->mcasp->slot_width; ++ ++ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) { ++ if (snd_mask_test(fmt, i)) { ++ if (snd_pcm_format_width(i) <= slot_width) { ++ snd_mask_set(&nfmt, i); ++ } ++ } ++ } ++ ++ return snd_mask_refine(fmt, &nfmt); ++} ++ + static const unsigned int davinci_mcasp_dai_rates[] = { + 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, + 88200, 96000, 176400, 192000, +@@ -1257,7 +1279,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, + struct davinci_mcasp_ruledata *ruledata = + &mcasp->ruledata[substream->stream]; + u32 max_channels = 0; +- int i, dir; ++ int i, dir, ret; + int tdm_slots = mcasp->tdm_slots; + + /* Do not allow more then one stream per direction */ +@@ -1286,6 +1308,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, + max_channels++; + } + ruledata->serializers = max_channels; ++ ruledata->mcasp = mcasp; + max_channels *= tdm_slots; + /* + * If the already active stream has less channels than the calculated +@@ -1311,20 +1334,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, + 0, SNDRV_PCM_HW_PARAM_CHANNELS, + &mcasp->chconstr[substream->stream]); + +- if (mcasp->slot_width) +- snd_pcm_hw_constraint_minmax(substream->runtime, +- SNDRV_PCM_HW_PARAM_SAMPLE_BITS, +- 8, mcasp->slot_width); ++ if (mcasp->slot_width) { ++ /* Only allow formats require <= slot_width bits on the bus */ ++ ret = snd_pcm_hw_rule_add(substream->runtime, 0, ++ SNDRV_PCM_HW_PARAM_FORMAT, ++ davinci_mcasp_hw_rule_slot_width, ++ ruledata, ++ SNDRV_PCM_HW_PARAM_FORMAT, -1); ++ if (ret) ++ return ret; ++ } + + /* + * If we rely on implicit BCLK divider setting we should + * set constraints based on what we can provide. + */ + if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) { +- int ret; +- +- ruledata->mcasp = mcasp; +- + ret = snd_pcm_hw_rule_add(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_RATE, + davinci_mcasp_hw_rule_rate, +diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c +index 60d43d53a8f5..11399f81c92f 100644 +--- a/sound/soc/rockchip/rockchip_i2s.c ++++ b/sound/soc/rockchip/rockchip_i2s.c +@@ -329,7 +329,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream, + val |= I2S_CHN_4; + break; + case 2: +- case 1: + val |= I2S_CHN_2; + break; + default: +@@ -462,7 +461,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = { + }, + .capture = { + .stream_name = "Capture", +- .channels_min = 1, ++ .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_192000, + .formats = (SNDRV_PCM_FMTBIT_S8 | +@@ -662,7 +661,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev) + } + + if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) { +- if (val >= 1 && val <= 8) ++ if (val >= 2 && val <= 8) + soc_dai->capture.channels_max = val; + } + +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c +index 62aa320c2070..dafc3b7f8d72 100644 +--- a/sound/soc/soc-core.c ++++ b/sound/soc/soc-core.c +@@ -1513,8 +1513,11 @@ static int soc_probe_link_dais(struct snd_soc_card *card, + } + } + +- if (dai_link->dai_fmt) +- snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt); ++ if (dai_link->dai_fmt) { ++ ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt); ++ if (ret) ++ return ret; ++ } + + ret = soc_post_component_init(rtd, dai_link->name); + if (ret) +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 3bfc788372f3..4ce57510b623 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -1145,8 +1145,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget, + list_add_tail(&widget->work_list, list); + + if (custom_stop_condition && custom_stop_condition(widget, dir)) { +- widget->endpoints[dir] = 1; +- return widget->endpoints[dir]; ++ list = NULL; ++ custom_stop_condition = NULL; + } + + if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) { +@@ -1183,8 +1183,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget, + * + * Optionally, can be supplied with a function acting as a stopping condition. + * This function takes the dapm widget currently being examined and the walk +- * direction as an arguments, it should return true if the walk should be +- * stopped and false otherwise. ++ * direction as an arguments, it should return true if widgets from that point ++ * in the graph onwards should not be added to the widget list. + */ + static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, + struct list_head *list, +diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c +index fa56fde6e8d8..91c0a4434da2 100644 +--- a/tools/perf/bench/numa.c ++++ b/tools/perf/bench/numa.c +@@ -378,8 +378,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags, + + /* Allocate and initialize all memory on CPU#0: */ + if (init_cpu0) { +- orig_mask = bind_to_node(0); +- bind_to_memnode(0); ++ int node = numa_node_of_cpu(0); ++ ++ orig_mask = bind_to_node(node); ++ bind_to_memnode(node); + } + + bytes = bytes0 + HPSIZE; +diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c +index f42f228e8899..137955197ba8 100644 +--- a/tools/perf/builtin-ftrace.c ++++ b/tools/perf/builtin-ftrace.c +@@ -174,7 +174,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap) + int last_cpu; + + last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1); +- mask_size = (last_cpu + 3) / 4 + 1; ++ mask_size = last_cpu / 4 + 2; /* one more byte for EOS */ + mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ + + cpumask = malloc(mask_size); +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c +index 68c92bb599ee..6b36b7110669 100644 +--- a/tools/perf/pmu-events/jevents.c ++++ b/tools/perf/pmu-events/jevents.c +@@ -450,6 +450,7 @@ static struct fixed { + { "inst_retired.any_p", "event=0xc0" }, + { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" }, + { "cpu_clk_unhalted.thread", "event=0x3c" }, ++ { "cpu_clk_unhalted.core", "event=0x3c" }, + { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" }, + { NULL, NULL}, + }; +diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c +index 383674f448fc..f93846edc1e0 100644 +--- a/tools/perf/util/cpumap.c ++++ b/tools/perf/util/cpumap.c +@@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size) + unsigned char *bitmap; + int last_cpu = cpu_map__cpu(map, map->nr - 1); + +- bitmap = zalloc((last_cpu + 7) / 8); ++ if (buf == NULL) ++ return 0; ++ ++ bitmap = zalloc(last_cpu / 8 + 1); + if (bitmap == NULL) { + buf[0] = '\0'; + return 0; +diff --git a/tools/testing/selftests/bpf/sendmsg6_prog.c b/tools/testing/selftests/bpf/sendmsg6_prog.c +index 5aeaa284fc47..a68062820410 100644 +--- a/tools/testing/selftests/bpf/sendmsg6_prog.c ++++ b/tools/testing/selftests/bpf/sendmsg6_prog.c +@@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx) + } + + /* Rewrite destination. */ +- if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) && +- ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) { ++ if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) { + ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0); + ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1); + ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2); +diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config +new file mode 100644 +index 000000000000..63ed533f73d6 +--- /dev/null ++++ b/tools/testing/selftests/kvm/config +@@ -0,0 +1,3 @@ ++CONFIG_KVM=y ++CONFIG_KVM_INTEL=y ++CONFIG_KVM_AMD=y +diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh +index cca2baa03fb8..a8d8e8b3dc81 100755 +--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh ++++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh +@@ -93,18 +93,10 @@ sw1_create() + ip route add vrf v$ol1 192.0.2.16/28 \ + nexthop dev g1a \ + nexthop dev g1b +- +- tc qdisc add dev $ul1 clsact +- tc filter add dev $ul1 egress pref 111 prot ipv4 \ +- flower dst_ip 192.0.2.66 action pass +- tc filter add dev $ul1 egress pref 222 prot ipv4 \ +- flower dst_ip 192.0.2.82 action pass + } + + sw1_destroy() + { +- tc qdisc del dev $ul1 clsact +- + ip route del vrf v$ol1 192.0.2.16/28 + + ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146 +@@ -139,10 +131,18 @@ sw2_create() + ip route add vrf v$ol2 192.0.2.0/28 \ + nexthop dev g2a \ + nexthop dev g2b ++ ++ tc qdisc add dev $ul2 clsact ++ tc filter add dev $ul2 ingress pref 111 prot 802.1Q \ ++ flower vlan_id 111 action pass ++ tc filter add dev $ul2 ingress pref 222 prot 802.1Q \ ++ flower vlan_id 222 action pass + } + + sw2_destroy() + { ++ tc qdisc del dev $ul2 clsact ++ + ip route del vrf v$ol2 192.0.2.0/28 + + ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145 +@@ -187,12 +187,16 @@ setup_prepare() + sw1_create + sw2_create + h2_create ++ ++ forwarding_enable + } + + cleanup() + { + pre_cleanup + ++ forwarding_restore ++ + h2_destroy + sw2_destroy + sw1_destroy +@@ -211,15 +215,15 @@ multipath4_test() + nexthop dev g1a weight $weight1 \ + nexthop dev g1b weight $weight2 + +- local t0_111=$(tc_rule_stats_get $ul1 111 egress) +- local t0_222=$(tc_rule_stats_get $ul1 222 egress) ++ local t0_111=$(tc_rule_stats_get $ul2 111 ingress) ++ local t0_222=$(tc_rule_stats_get $ul2 222 ingress) + + ip vrf exec v$h1 \ + $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \ + -d 1msec -t udp "sp=1024,dp=0-32768" + +- local t1_111=$(tc_rule_stats_get $ul1 111 egress) +- local t1_222=$(tc_rule_stats_get $ul1 222 egress) ++ local t1_111=$(tc_rule_stats_get $ul2 111 ingress) ++ local t1_222=$(tc_rule_stats_get $ul2 222 ingress) + + local d111=$((t1_111 - t0_111)) + local d222=$((t1_222 - t0_222)) |