This is the 4.19.272 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmPgoxsACgkQONu9yGCS
 aT7P4hAAiY1TIECbi0kriTeuRmi/cGkU1eo8ODV+5h0x31cEObCZnsvU6v2LeqOJ
 X0H8U3MwjxyVYoK1L1SUBClgPXqBq0jI4+CrjVSQcl8kWGKhUm/JXzjcOC6wAlB7
 M2BRD2z7M+K4Mem1Q6eV7UEuEBPteLquP4xPlaXKkRAoeWKXcbiRM3ns5h20Acmi
 igDqhzLUAiAIhL429vPAOp34aNDmaZzLXiMt9p0mYkDFP7I904FrlBAsceJV8NZ9
 6oxxQspkoCf8MjBGrV6JcFwNgHjCHM+zXHWhROCE9WL7rknCJidttQKsOtzNE6Gx
 pFF60qFRx6JVsvm0D2lJpEW+ExdlcQ/aacByy3fXajkdyrsl9sTajiw0+1f6P4Oq
 hX8U+HianG4G1HgpOAUeZZBcTn7k7+pdTcogLaQW5Csch2RHMmmnPMZlZqudGUxD
 yJiyd9dRIEPOa8CAdCzI4/xgjEgogS1snS9jRhJ0rwRtSqL3lLrSRdeFWbVjgA54
 HAF9dR76Y4sbmWv4MbUPK+lvROvWv/NtLeiHXCJCGsxWdcOYQTw8ASenaK2IQRUF
 1ows0gRXX3Xzm6R8CvpJqT80ta1rUfD3INycV4kUHeRUlVsrBtu48KX325+ZUXD9
 Lvcbips57QvMMigROx6HpQHbmA1fe/42mZfhf36sgzycAmIzThA=
 =yuwn
 -----END PGP SIGNATURE-----

Merge 4.19.272 into android-4.19-stable

Changes in 4.19.272
	ARM: dts: imx6qdl-gw560x: Remove incorrect 'uart-has-rtscts'
	HID: intel_ish-hid: Add check for ishtp_dma_tx_map
	EDAC/highbank: Fix memory leak in highbank_mc_probe()
	tomoyo: fix broken dependency on *.conf.default
	IB/hfi1: Reject a zero-length user expected buffer
	IB/hfi1: Reserve user expected TIDs
	IB/hfi1: Fix expected receive setup error exit issues
	affs: initialize fsdata in affs_truncate()
	amd-xgbe: TX Flow Ctrl Registers are h/w ver dependent
	amd-xgbe: Delay AN timeout during KR training
	bpf: Fix pointer-leak due to insufficient speculative store bypass mitigation
	phy: rockchip-inno-usb2: Fix missing clk_disable_unprepare() in rockchip_usb2phy_power_on()
	net: nfc: Fix use-after-free in local_cleanup()
	wifi: rndis_wlan: Prevent buffer overflow in rndis_query_oid
	net: usb: sr9700: Handle negative len
	net: mdio: validate parameter addr in mdiobus_get_phy()
	HID: check empty report_list in hid_validate_values()
	usb: gadget: f_fs: Prevent race during ffs_ep0_queue_wait
	usb: gadget: f_fs: Ensure ep0req is dequeued before free_request
	net: mlx5: eliminate anonymous module_init & module_exit
	dmaengine: Fix double increment of client_count in dma_chan_get()
	net: macb: fix PTP TX timestamp failure due to packet padding
	HID: betop: check shape of output reports
	dmaengine: xilinx_dma: commonize DMA copy size calculation
	dmaengine: xilinx_dma: program hardware supported buffer length
	dmaengine: xilinx_dma: Fix devm_platform_ioremap_resource error handling
	dmaengine: xilinx_dma: call of_node_put() when breaking out of for_each_child_of_node()
	tcp: avoid the lookup process failing to get sk in ehash table
	w1: fix deadloop in __w1_remove_master_device()
	w1: fix WARNING after calling w1_process()
	netfilter: conntrack: do not renew entry stuck in tcp SYN_SENT state
	block: fix and cleanup bio_check_ro
	perf env: Do not return pointers to local variables
	fs: reiserfs: remove useless new_opts in reiserfs_remount
	Bluetooth: hci_sync: cancel cmd_timer if hci_open failed
	scsi: hpsa: Fix allocation size for scsi_host_alloc()
	module: Don't wait for GOING modules
	tracing: Make sure trace_printk() can output as soon as it can be used
	trace_events_hist: add check for return value of 'create_hist_field'
	smbd: Make upper layer decide when to destroy the transport
	cifs: Fix oops due to uncleared server->smbd_conn in reconnect
	ARM: 9280/1: mm: fix warning on phys_addr_t to void pointer assignment
	EDAC/device: Respect any driver-supplied workqueue polling value
	net: fix UaF in netns ops registration error path
	netfilter: nft_set_rbtree: skip elements in transaction from garbage collection
	netlink: remove hash::nelems check in netlink_insert
	netlink: annotate data races around nlk->portid
	netlink: annotate data races around dst_portid and dst_group
	netlink: annotate data races around sk_state
	ipv4: prevent potential spectre v1 gadget in ip_metrics_convert()
	netfilter: conntrack: fix vtag checks for ABORT/SHUTDOWN_COMPLETE
	netrom: Fix use-after-free of a listening socket.
	sctp: fail if no bound addresses can be used for a given scope
	net: ravb: Fix possible hang if RIS2_QFF1 happen
	net/tg3: resolve deadlock in tg3_reset_task() during EEH
	Revert "Input: synaptics - switch touchpad on HP Laptop 15-da3001TU to RMI mode"
	x86/i8259: Mark legacy PIC interrupts with IRQ_LEVEL
	drm/i915/display: fix compiler warning about array overrun
	x86/asm: Fix an assembler warning with current binutils
	x86/entry/64: Add instruction suffix to SYSRET
	ARM: dts: imx: Fix pca9547 i2c-mux node name
	dmaengine: imx-sdma: Fix a possible memory leak in sdma_transfer_init
	sysctl: add a new register_sysctl_init() interface
	panic: unset panic_on_warn inside panic()
	exit: Add and use make_task_dead.
	objtool: Add a missing comma to avoid string concatenation
	hexagon: Fix function name in die()
	h8300: Fix build errors from do_exit() to make_task_dead() transition
	ia64: make IA64_MCA_RECOVERY bool instead of tristate
	exit: Put an upper limit on how often we can oops
	exit: Expose "oops_count" to sysfs
	exit: Allow oops_limit to be disabled
	panic: Consolidate open-coded panic_on_warn checks
	panic: Introduce warn_limit
	panic: Expose "warn_count" to sysfs
	docs: Fix path paste-o for /sys/kernel/warn_count
	exit: Use READ_ONCE() for all oops/warn limit reads
	ipv6: ensure sane device mtu in tunnels
	usb: host: xhci-plat: add wakeup entry at sysfs
	Linux 4.19.272

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I4f9ddce1e108e81409d47e00fdeef2bc0d34f793
This commit is contained in:
Greg Kroah-Hartman 2023-02-06 08:16:47 +01:00
commit c97f22d970
108 changed files with 775 additions and 304 deletions

View file

@ -0,0 +1,6 @@
What: /sys/kernel/oops_count
Date: November 2022
KernelVersion: 6.2.0
Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
Description:
Shows how many times the system has Oopsed since last boot.

View file

@ -0,0 +1,6 @@
What: /sys/kernel/warn_count
Date: November 2022
KernelVersion: 6.2.0
Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
Description:
Shows how many times the system has Warned since last boot.

View file

@ -51,6 +51,7 @@ show up in /proc/sys/kernel:
- msgmnb
- msgmni
- nmi_watchdog
- oops_limit
- osrelease
- ostype
- overflowgid
@ -97,6 +98,7 @@ show up in /proc/sys/kernel:
- threads-max
- unprivileged_bpf_disabled
- unknown_nmi_panic
- warn_limit
- watchdog
- watchdog_thresh
- version
@ -556,6 +558,15 @@ scanned for a given scan.
==============================================================
oops_limit:
Number of kernel oopses after which the kernel should panic when
``panic_on_oops`` is not set. Setting this to 0 disables checking
the count. Setting this to 1 has the same effect as setting
``panic_on_oops=1``. The default value is 10000.
==============================================================
osrelease, ostype & version:
# cat osrelease
@ -1116,6 +1127,15 @@ example. If a system hangs up, try pressing the NMI switch.
==============================================================
warn_limit:
Number of kernel warnings after which the kernel should panic when
``panic_on_warn`` is not set. Setting this to 0 disables checking
the warning count. Setting this to 1 has the same effect as setting
``panic_on_warn=1``. The default value is 0.
==============================================================
watchdog:
This parameter can be used to disable or enable the soft lockup detector

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 271
SUBLEVEL = 272
EXTRAVERSION =
NAME = "People's Front"

View file

@ -192,7 +192,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
local_irq_enable();
while (1);
}
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
#ifndef CONFIG_MATHEMU
@ -577,7 +577,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
pc, va, opcode, reg);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
got_exception:
/* Ok, we caught the exception, but we don't want it. Is there
@ -632,7 +632,7 @@ got_exception:
local_irq_enable();
while (1);
}
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
/*

View file

@ -206,7 +206,7 @@ retry:
printk(KERN_ALERT "Unable to handle kernel paging request at "
"virtual address %016lx\n", address);
die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */

View file

@ -462,7 +462,7 @@
scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
status = "okay";
i2c-switch@70 {
i2c-mux@70 {
compatible = "nxp,pca9547";
#address-cells = <1>;
#size-cells = <0>;

View file

@ -463,7 +463,6 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
uart-has-rtscts;
rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
status = "okay";
};

View file

@ -344,7 +344,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
if (panic_on_oops)
panic("Fatal exception");
if (signr)
do_exit(signr);
make_task_dead(signr);
}
/*

View file

@ -149,7 +149,7 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
show_pte(mm, addr);
die("Oops", regs, fsr);
bust_spinlocks(0);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
}
/*

View file

@ -160,7 +160,7 @@ void __init paging_init(const struct machine_desc *mdesc)
mpu_setup();
/* allocate the zero page. */
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);

View file

@ -225,7 +225,7 @@ void die(const char *str, struct pt_regs *regs, int err)
raw_spin_unlock_irqrestore(&die_lock, flags);
if (ret != NOTIFY_STOP)
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
static bool show_unhandled_signals_ratelimited(void)

View file

@ -281,7 +281,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
show_pte(addr);
die("Oops", regs, esr);
bust_spinlocks(0);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
}
static void __do_kernel_fault(unsigned long addr, unsigned int esr,

View file

@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/mm_types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@ -110,7 +111,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err)
dump(fp);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
static int kstack_depth_to_print = 24;

View file

@ -52,7 +52,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
printk(" at virtual address %08lx\n", address);
if (!user_mode(regs))
die("Oops", regs, error_code);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
return 1;
}

View file

@ -234,7 +234,7 @@ int die(const char *str, struct pt_regs *regs, long err)
panic("Fatal exception");
oops_exit();
do_exit(err);
make_task_dead(err);
return 0;
}

View file

@ -445,7 +445,7 @@ config ARCH_PROC_KCORE_TEXT
depends on PROC_KCORE
config IA64_MCA_RECOVERY
tristate "MCA recovery from errors other than TLB."
bool "MCA recovery from errors other than TLB."
config PERFMON
bool "Performance monitor support"

View file

@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kallsyms.h>
@ -176,7 +177,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
spin_unlock(&mca_bh_lock);
/* This process is about to be killed itself */
do_exit(SIGKILL);
make_task_dead(SIGKILL);
}
/**

View file

@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err)
if (panic_on_oops)
panic("Fatal exception");
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
return 0;
}

View file

@ -302,7 +302,7 @@ retry:
regs = NULL;
bust_spinlocks(0);
if (regs)
do_exit(SIGKILL);
make_task_dead(SIGKILL);
return;
out_of_memory:

View file

@ -1139,7 +1139,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
pr_crit("%s: %08x\n", str, nr);
show_registers(fp);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
asmlinkage void set_esp0(unsigned long ssp)

View file

@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs)
pr_alert("Unable to handle kernel access");
pr_cont(" at virtual address %p\n", addr);
die_if_kernel("Oops", regs, 0 /*error_code*/);
do_exit(SIGKILL);
make_task_dead(SIGKILL);
}
return 1;

View file

@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err)
pr_warn("Oops: %s, sig: %ld\n", str, err);
show_regs(fp);
spin_unlock_irq(&die_lock);
/* do_exit() should take care of panic'ing from an interrupt
/* make_task_dead() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
do_exit(err);
make_task_dead(err);
}
/* for user application debugging */

View file

@ -412,7 +412,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
if (regs && kexec_should_crash(current))
crash_kexec(regs);
do_exit(sig);
make_task_dead(sig);
}
extern struct exception_table_entry __start___dbe_table[];

View file

@ -183,7 +183,7 @@ void die(const char *str, struct pt_regs *regs, int err)
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
EXPORT_SYMBOL(die);
@ -286,7 +286,7 @@ void unhandled_interruption(struct pt_regs *regs)
pr_emerg("unhandled_interruption\n");
show_regs(regs);
if (!user_mode(regs))
do_exit(SIGKILL);
make_task_dead(SIGKILL);
force_sig(SIGKILL, current);
}
@ -297,7 +297,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr,
addr, type);
show_regs(regs);
if (!user_mode(regs))
do_exit(SIGKILL);
make_task_dead(SIGKILL);
force_sig(SIGKILL, current);
}
@ -324,7 +324,7 @@ void do_revinsn(struct pt_regs *regs)
pr_emerg("Reserved Instruction\n");
show_regs(regs);
if (!user_mode(regs))
do_exit(SIGILL);
make_task_dead(SIGILL);
force_sig(SIGILL, current);
}

View file

@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err)
show_regs(regs);
spin_unlock_irq(&die_lock);
/*
* do_exit() should take care of panic'ing from an interrupt
* make_task_dead() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
do_exit(err);
make_task_dead(err);
}
void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)

View file

@ -224,7 +224,7 @@ void die(const char *str, struct pt_regs *regs, long err)
__asm__ __volatile__("l.nop 1");
do {} while (1);
#endif
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
/* This is normally the 'Oops' routine */

View file

@ -265,7 +265,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
panic("Fatal exception");
oops_exit();
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
/* gdb uses break 4,8 */

View file

@ -251,7 +251,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
do_exit(signr);
make_task_dead(signr);
}
NOKPROBE_SYMBOL(oops_end);

View file

@ -64,7 +64,7 @@ void die(struct pt_regs *regs, const char *str)
if (panic_on_oops)
panic("Fatal exception");
if (ret != NOTIFY_STOP)
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
void do_trap(struct pt_regs *regs, int signo, int code,

View file

@ -200,7 +200,7 @@ no_context:
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
"paging request", addr);
die(regs, "Oops");
do_exit(SIGKILL);
make_task_dead(SIGKILL);
/*
* We ran out of memory, call the OOM killer, and return the userspace

View file

@ -187,5 +187,5 @@ void die(struct pt_regs *regs, const char *str)
if (panic_on_oops)
panic("Fatal exception: panic_on_oops");
oops_exit();
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}

View file

@ -179,7 +179,7 @@ void s390_handle_mcck(void)
"malfunction (code 0x%016lx).\n", mcck.mcck_code);
printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
current->comm, current->pid);
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
}
EXPORT_SYMBOL_GPL(s390_handle_mcck);

View file

@ -57,7 +57,7 @@ void die(const char *str, struct pt_regs *regs, long err)
if (panic_on_oops)
panic("Fatal exception");
do_exit(SIGSEGV);
make_task_dead(SIGSEGV);
}
void die_if_kernel(const char *str, struct pt_regs *regs, long err)

View file

@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
printk("Instruction DUMP:");
instruction_dump ((unsigned long *) regs->pc);
if(regs->psr & PSR_PS)
do_exit(SIGKILL);
do_exit(SIGSEGV);
make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV);
}
void do_hw_interrupt(struct pt_regs *regs, unsigned long type)

View file

@ -2565,9 +2565,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
if (panic_on_oops)
panic("Fatal exception");
if (regs->tstate & TSTATE_PRIV)
do_exit(SIGKILL);
do_exit(SIGSEGV);
make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV);
}
EXPORT_SYMBOL(die_if_kernel);

View file

@ -1500,13 +1500,13 @@ ENTRY(async_page_fault)
END(async_page_fault)
#endif
ENTRY(rewind_stack_do_exit)
ENTRY(rewind_stack_and_make_dead)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
call do_exit
call make_task_dead
1: jmp 1b
END(rewind_stack_do_exit)
END(rewind_stack_and_make_dead)

View file

@ -1759,10 +1759,10 @@ END(nmi)
ENTRY(ignore_sysret)
UNWIND_HINT_EMPTY
mov $-ENOSYS, %eax
sysret
sysretl
END(ignore_sysret)
ENTRY(rewind_stack_do_exit)
ENTRY(rewind_stack_and_make_dead)
UNWIND_HINT_FUNC
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
@ -1771,5 +1771,5 @@ ENTRY(rewind_stack_do_exit)
leaq -PTREGS_SIZE(%rax), %rsp
UNWIND_HINT_REGS
call do_exit
END(rewind_stack_do_exit)
call make_task_dead
END(rewind_stack_and_make_dead)

View file

@ -326,7 +326,7 @@ unsigned long oops_begin(void)
}
NOKPROBE_SYMBOL(oops_begin);
void __noreturn rewind_stack_do_exit(int signr);
void __noreturn rewind_stack_and_make_dead(int signr);
void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
@ -361,7 +361,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
* reuse the task stack and that existing poisons are invalid.
*/
kasan_unpoison_task_stack(current);
rewind_stack_do_exit(signr);
rewind_stack_and_make_dead(signr);
}
NOKPROBE_SYMBOL(oops_end);

View file

@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq)
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
irq_set_status_flags(irq, IRQ_LEVEL);
enable_irq(irq);
lapic_assign_legacy_vector(irq, true);
}

View file

@ -72,8 +72,10 @@ void __init init_ISA_irqs(void)
legacy_pic->init(0);
for (i = 0; i < nr_legacy_irqs(); i++)
for (i = 0; i < nr_legacy_irqs(); i++) {
irq_set_chip_and_handler(i, chip, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
void __init init_IRQ(void)

View file

@ -22,6 +22,6 @@
*/
ENTRY(__iowrite32_copy)
movl %edx,%ecx
rep movsd
rep movsl
ret
ENDPROC(__iowrite32_copy)

View file

@ -542,5 +542,5 @@ void die(const char * str, struct pt_regs * regs, long err)
if (panic_on_oops)
panic("Fatal exception");
do_exit(err);
make_task_dead(err);
}

View file

@ -2180,10 +2180,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return false;
WARN_ONCE(1,
"generic_make_request: Trying to write "
"to read-only block-device %s (partno %d)\n",
pr_warn("Trying to write to read-only block-device %s (partno %d)\n",
bio_devname(bio, b), part->partno);
/* Older lvm-tools actually trigger this */
return false;

View file

@ -223,7 +223,8 @@ static int dma_chan_get(struct dma_chan *chan)
/* The channel is already in use, update client count */
if (chan->client_count) {
__module_get(owner);
goto out;
chan->client_count++;
return 0;
}
if (!try_module_get(owner))
@ -236,11 +237,11 @@ static int dma_chan_get(struct dma_chan *chan)
goto err_out;
}
chan->client_count++;
if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
balance_ref_count(chan);
out:
chan->client_count++;
return 0;
err_out:

View file

@ -1347,10 +1347,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
sdma_config_ownership(sdmac, false, true, false);
if (sdma_load_context(sdmac))
goto err_desc_out;
goto err_bd_out;
return desc;
err_bd_out:
sdma_free_bd(desc);
err_desc_out:
kfree(desc);
err_out:

View file

@ -164,7 +164,9 @@
#define XILINX_DMA_REG_BTT 0x28
/* AXI DMA Specific Masks/Bit fields */
#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16
@ -428,6 +430,7 @@ struct xilinx_dma_config {
* @rxs_clk: DMA s2mm stream clock
* @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier
* @max_buffer_len: Max buffer length
*/
struct xilinx_dma_device {
void __iomem *regs;
@ -447,6 +450,7 @@ struct xilinx_dma_device {
struct clk *rxs_clk;
u32 nr_channels;
u32 chan_id;
u32 max_buffer_len;
};
/* Macros */
@ -969,6 +973,25 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
return 0;
}
/**
* xilinx_dma_calc_copysize - Calculate the amount of data to copy
* @chan: Driver specific DMA channel
* @size: Total data that needs to be copied
* @done: Amount of data that has been already copied
*
* Return: Amount of data that has to be copied
*/
static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
int size, int done)
{
size_t copy;
copy = min_t(size_t, size - done,
chan->xdev->max_buffer_len);
return copy;
}
/**
* xilinx_dma_tx_status - Get DMA transaction status
* @dchan: DMA channel
@ -1002,7 +1025,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
list_for_each_entry(segment, &desc->segments, node) {
hw = &segment->hw;
residue += (hw->control - hw->status) &
XILINX_DMA_MAX_TRANS_LEN;
chan->xdev->max_buffer_len;
}
}
spin_unlock_irqrestore(&chan->lock, flags);
@ -1262,7 +1285,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
hw->control & XILINX_DMA_MAX_TRANS_LEN);
hw->control & chan->xdev->max_buffer_len);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@ -1365,7 +1388,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
hw->control & XILINX_DMA_MAX_TRANS_LEN);
hw->control & chan->xdev->max_buffer_len);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@ -1729,7 +1752,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct xilinx_cdma_tx_segment *segment;
struct xilinx_cdma_desc_hw *hw;
if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
if (!len || len > chan->xdev->max_buffer_len)
return NULL;
desc = xilinx_dma_alloc_tx_descriptor(chan);
@ -1819,8 +1842,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
copy = min_t(size_t, sg_dma_len(sg) - sg_used,
XILINX_DMA_MAX_TRANS_LEN);
copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
sg_used);
hw = &segment->hw;
/* Fill in the descriptor */
@ -1924,8 +1947,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
copy = min_t(size_t, period_len - sg_used,
XILINX_DMA_MAX_TRANS_LEN);
copy = xilinx_dma_calc_copysize(chan, period_len,
sg_used);
hw = &segment->hw;
xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
period_len * i);
@ -2613,7 +2636,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
struct resource *io;
u32 num_frames, addr_width;
u32 num_frames, addr_width, len_width;
int i, err;
/* Allocate and initialize the DMA engine structure */
@ -2640,13 +2663,30 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Request and map I/O memory */
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xdev->regs = devm_ioremap_resource(&pdev->dev, io);
if (IS_ERR(xdev->regs))
return PTR_ERR(xdev->regs);
if (IS_ERR(xdev->regs)) {
err = PTR_ERR(xdev->regs);
goto disable_clks;
}
/* Retrieve the DMA engine properties from the device tree */
xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
if (!of_property_read_u32(node, "xlnx,sg-length-width",
&len_width)) {
if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
dev_warn(xdev->dev,
"invalid xlnx,sg-length-width property value. Using default width\n");
} else {
if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
xdev->max_buffer_len =
GENMASK(len_width - 1, 0);
}
}
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
err = of_property_read_u32(node, "xlnx,num-fstores",
@ -2719,8 +2759,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Initialize the channels */
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
if (err < 0)
goto disable_clks;
if (err < 0) {
of_node_put(child);
goto error;
}
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@ -2753,12 +2795,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return 0;
disable_clks:
xdma_disable_allclks(xdev);
error:
for (i = 0; i < xdev->nr_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
disable_clks:
xdma_disable_allclks(xdev);
return err;
}

View file

@ -34,6 +34,9 @@
static DEFINE_MUTEX(device_ctls_mutex);
static LIST_HEAD(edac_device_list);
/* Default workqueue processing interval on this instance, in msecs */
#define DEFAULT_POLL_INTERVAL 1000
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
@ -366,7 +369,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
* whole one second to save timers firing all over the period
* between integral seconds
*/
if (edac_dev->poll_msec == 1000)
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@ -396,7 +399,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
* timers firing on sub-second basis, while they are happy
* to fire together on the 1 second exactly
*/
if (edac_dev->poll_msec == 1000)
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@ -430,7 +433,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
edac_dev->delay = msecs_to_jiffies(msec);
/* See comment in edac_device_workq_setup() above */
if (edac_dev->poll_msec == 1000)
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_mod_work(&edac_dev->work, edac_dev->delay);
@ -472,11 +475,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
/* This instance is NOW RUNNING */
edac_dev->op_state = OP_RUNNING_POLL;
/*
* enable workq processing on this instance,
* default = 1000 msec
*/
edac_device_workq_setup(edac_dev, 1000);
edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
} else {
edac_dev->op_state = OP_RUNNING_INTERRUPT;
}

View file

@ -185,8 +185,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
drvdata = mci->pvt_info;
platform_set_drvdata(pdev, mci);
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
return -ENOMEM;
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
res = -ENOMEM;
goto free;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@ -254,6 +256,7 @@ err2:
edac_mc_del_mc(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
free:
edac_mc_free(mci);
return res;
}

View file

@ -4116,7 +4116,18 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool bret;
if (intel_dp->is_mst) {
u8 esi[DP_DPRX_ESI_LEN] = { 0 };
/*
* The +2 is because DP_DPRX_ESI_LEN is 14, but we then
* pass in "esi+10" to drm_dp_channel_eq_ok(), which
* takes a 6-byte array. So we actually need 16 bytes
* here.
*
* Somebody who knows what the limits actually are
* should check this, but for now this is at least
* harmless and avoids a valid compiler warning about
* using more of the array than we have allocated.
*/
u8 esi[DP_DPRX_ESI_LEN+2] = { 0 };
int ret = 0;
int retry;
bool handled;

View file

@ -63,7 +63,6 @@ static int betopff_init(struct hid_device *hid)
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev;
int field_count = 0;
int error;
int i, j;
@ -89,19 +88,21 @@ static int betopff_init(struct hid_device *hid)
* -----------------------------------------
* Do init them with default value.
*/
if (report->maxfield < 4) {
hid_err(hid, "not enough fields in the report: %d\n",
report->maxfield);
return -ENODEV;
}
for (i = 0; i < report->maxfield; i++) {
if (report->field[i]->report_count < 1) {
hid_err(hid, "no values in the field\n");
return -ENODEV;
}
for (j = 0; j < report->field[i]->report_count; j++) {
report->field[i]->value[j] = 0x00;
field_count++;
}
}
if (field_count < 4) {
hid_err(hid, "not enough fields in the report: %d\n",
field_count);
return -ENODEV;
}
betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
if (!betopff)
return -ENOMEM;

View file

@ -980,8 +980,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
* Validating on id 0 means we should examine the first
* report in the list.
*/
report = list_entry(
hid->report_enum[type].report_list.next,
report = list_first_entry_or_null(
&hid->report_enum[type].report_list,
struct hid_report, list);
} else {
report = hid->report_enum[type].report_id_hash[id];

View file

@ -113,6 +113,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
int required_slots = (size / DMA_SLOT_SIZE)
+ 1 * (size % DMA_SLOT_SIZE != 0);
if (!dev->ishtp_dma_tx_map) {
dev_err(dev->devc, "Fail to allocate Tx map\n");
return NULL;
}
spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
free = 1;
@ -159,6 +164,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
return;
}
if (!dev->ishtp_dma_tx_map) {
dev_err(dev->devc, "Fail to allocate Tx map\n");
return;
}
i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
for (j = 0; j < acked_slots; j++) {

View file

@ -325,6 +325,8 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
if (!PAGE_ALIGNED(tinfo->vaddr))
return -EINVAL;
if (tinfo->length == 0)
return -EINVAL;
tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
if (!tidbuf)
@ -335,40 +337,38 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL);
if (!tidbuf->psets) {
kfree(tidbuf);
return -ENOMEM;
ret = -ENOMEM;
goto fail_release_mem;
}
pinned = pin_rcv_pages(fd, tidbuf);
if (pinned <= 0) {
kfree(tidbuf->psets);
kfree(tidbuf);
return pinned;
ret = (pinned < 0) ? pinned : -ENOSPC;
goto fail_unpin;
}
/* Find sets of physically contiguous pages */
tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
/*
* We don't need to access this under a lock since tid_used is per
* process and the same process cannot be in hfi1_user_exp_rcv_clear()
* and hfi1_user_exp_rcv_setup() at the same time.
*/
/* Reserve the number of expected tids to be used. */
spin_lock(&fd->tid_lock);
if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
pageset_count = fd->tid_limit - fd->tid_used;
else
pageset_count = tidbuf->n_psets;
fd->tid_used += pageset_count;
spin_unlock(&fd->tid_lock);
if (!pageset_count)
goto bail;
if (!pageset_count) {
ret = -ENOSPC;
goto fail_unreserve;
}
ngroups = pageset_count / dd->rcv_entries.group_size;
tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
if (!tidlist) {
ret = -ENOMEM;
goto nomem;
goto fail_unreserve;
}
tididx = 0;
@ -464,43 +464,60 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
}
unlock:
mutex_unlock(&uctxt->exp_mutex);
nomem:
hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
mapped_pages, ret);
if (tididx) {
spin_lock(&fd->tid_lock);
fd->tid_used += tididx;
spin_unlock(&fd->tid_lock);
tinfo->tidcnt = tididx;
tinfo->length = mapped_pages * PAGE_SIZE;
if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
tidlist, sizeof(tidlist[0]) * tididx)) {
/*
* On failure to copy to the user level, we need to undo
* everything done so far so we don't leak resources.
*/
tinfo->tidlist = (unsigned long)&tidlist;
hfi1_user_exp_rcv_clear(fd, tinfo);
tinfo->tidlist = 0;
ret = -EFAULT;
goto bail;
}
/* fail if nothing was programmed, set error if none provided */
if (tididx == 0) {
if (ret >= 0)
ret = -ENOSPC;
goto fail_unreserve;
}
/* adjust reserved tid_used to actual count */
spin_lock(&fd->tid_lock);
fd->tid_used -= pageset_count - tididx;
spin_unlock(&fd->tid_lock);
/* unpin all pages not covered by a TID */
unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
false);
tinfo->tidcnt = tididx;
tinfo->length = mapped_pages * PAGE_SIZE;
if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
tidlist, sizeof(tidlist[0]) * tididx)) {
ret = -EFAULT;
goto fail_unprogram;
}
/*
* If not everything was mapped (due to insufficient RcvArray entries,
* for example), unpin all unmapped pages so we can pin them nex time.
*/
if (mapped_pages != pinned)
unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
(pinned - mapped_pages), false);
bail:
kfree(tidbuf->psets);
kfree(tidlist);
kfree(tidbuf->pages);
kfree(tidbuf->psets);
kfree(tidbuf);
return ret > 0 ? 0 : ret;
kfree(tidlist);
return 0;
fail_unprogram:
/* unprogram, unmap, and unpin all allocated TIDs */
tinfo->tidlist = (unsigned long)tidlist;
hfi1_user_exp_rcv_clear(fd, tinfo);
tinfo->tidlist = 0;
pinned = 0; /* nothing left to unpin */
pageset_count = 0; /* nothing left reserved */
fail_unreserve:
spin_lock(&fd->tid_lock);
fd->tid_used -= pageset_count;
spin_unlock(&fd->tid_lock);
fail_unpin:
if (pinned > 0)
unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
fail_release_mem:
kfree(tidbuf->pages);
kfree(tidbuf->psets);
kfree(tidbuf);
kfree(tidlist);
return ret;
}
int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,

View file

@ -192,7 +192,6 @@ static const char * const smbus_pnp_ids[] = {
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
"SYN3286", /* HP Laptop 15-da3001TU */
NULL
};

View file

@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
}
static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
/* From MAC ver 30H the TFCR is per priority, instead of per queue */
if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
return max_q_count;
else
return min_t(unsigned int, pdata->tx_q_count, max_q_count);
}
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
unsigned int i;
unsigned int i, q_count;
/* Clear MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
/* Clear MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
{
struct ieee_pfc *pfc = pdata->pfc;
struct ieee_ets *ets = pdata->ets;
unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
unsigned int i;
unsigned int i, q_count;
/* Set MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++) {
@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
}
/* Set MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);

View file

@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
reg |= XGBE_KR_TRAINING_ENABLE;
reg |= XGBE_KR_TRAINING_START;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
pdata->kr_start_time = jiffies;
netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n");
@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
xgbe_switch_mode(pdata);
pdata->an_result = XGBE_AN_READY;
xgbe_an_restart(pdata);
return XGBE_AN_INCOMPAT_LINK;
@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
{
unsigned long link_timeout;
unsigned long kr_time;
int wait;
link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
if (time_after(jiffies, link_timeout)) {
if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
pdata->phy.autoneg == AUTONEG_ENABLE) {
/* AN restart should not happen while KR training is in progress.
* The while loop ensures no AN restart during KR training,
* waits up to 500ms and AN restart is triggered only if KR
* training is failed.
*/
wait = XGBE_KR_TRAINING_WAIT_ITER;
while (wait--) {
kr_time = pdata->kr_start_time +
msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
if (time_after(jiffies, kr_time))
break;
/* AN restart is not required, if AN result is COMPLETE */
if (pdata->an_result == XGBE_AN_COMPLETE)
return;
usleep_range(10000, 11000);
}
}
netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
xgbe_phy_config_aneg(pdata);
}

View file

@ -290,6 +290,7 @@
/* Auto-negotiation */
#define XGBE_AN_MS_TIMEOUT 500
#define XGBE_LINK_TIMEOUT 5
#define XGBE_KR_TRAINING_WAIT_ITER 50
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
@ -1266,6 +1267,7 @@ struct xgbe_prv_data {
unsigned int parallel_detect;
unsigned int fec_ability;
unsigned long an_start;
unsigned long kr_start_time;
enum xgbe_an_mode an_mode;
/* I2C support */

View file

@ -11189,7 +11189,7 @@ static void tg3_reset_task(struct work_struct *work)
rtnl_lock();
tg3_full_lock(tp, 0);
if (!netif_running(tp->dev)) {
if (tp->pcierr_recovery || !netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
rtnl_unlock();
@ -18240,6 +18240,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
netdev_info(netdev, "PCI I/O error detected\n");
/* Want to make sure that the reset task doesn't run */
tg3_reset_task_cancel(tp);
rtnl_lock();
/* Could be second call or maybe we don't have netdev yet */
@ -18256,9 +18259,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
tg3_timer_stop(tp);
/* Want to make sure that the reset task doesn't run */
tg3_reset_task_cancel(tp);
netif_device_detach(netdev);
/* Clean up software state, even if MMIO is blocked */

View file

@ -1738,7 +1738,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
struct sk_buff *nskb;
u32 fcs;
@ -1752,9 +1751,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
/* FCS could be appeded to tailroom. */
if (tailroom >= ETH_FCS_LEN)
goto add_fcs;
/* FCS could be appeded by moving data to headroom. */
else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
padlen = 0;
/* No room for FCS, need to reallocate skb. */
else
padlen = ETH_FCS_LEN;
@ -1763,10 +1759,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
padlen += ETH_FCS_LEN;
}
if (!cloned && headroom + tailroom >= padlen) {
(*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
skb_set_tail_pointer(*skb, (*skb)->len);
} else {
if (cloned || tailroom < padlen) {
nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;

View file

@ -1683,7 +1683,7 @@ static void mlx5_core_verify_params(void)
}
}
static int __init init(void)
static int __init mlx5_init(void)
{
int err;
@ -1708,7 +1708,7 @@ err_debug:
return err;
}
static void __exit cleanup(void)
static void __exit mlx5_cleanup(void)
{
#ifdef CONFIG_MLX5_CORE_EN
mlx5e_cleanup();
@ -1717,5 +1717,5 @@ static void __exit cleanup(void)
mlx5_unregister_debugfs();
}
module_init(init);
module_exit(cleanup);
module_init(mlx5_init);
module_exit(mlx5_cleanup);

View file

@ -738,14 +738,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
priv->stats[RAVB_BE].rx_over_errors++;
/* Receive Descriptor Empty int */
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF1)
priv->stats[RAVB_NC].rx_over_errors++;

View file

@ -103,7 +103,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
{
struct mdio_device *mdiodev = bus->mdio_map[addr];
struct mdio_device *mdiodev;
if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
return NULL;
mdiodev = bus->mdio_map[addr];
if (!mdiodev)
return NULL;

View file

@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* ignore the CRC length */
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
if (len > ETH_FRAME_LEN || len > skb->len)
if (len > ETH_FRAME_LEN || len > skb->len || len < 0)
return 0;
/* the last packet of current skb */

View file

@ -712,8 +712,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
struct rndis_query *get;
struct rndis_query_c *get_c;
} u;
int ret, buflen;
int resplen, respoffs, copylen;
int ret;
size_t buflen, resplen, respoffs, copylen;
buflen = *len + sizeof(*u.get);
if (buflen < CONTROL_BUFFER_SIZE)
@ -748,22 +748,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
if (respoffs > buflen) {
/* Device returned data offset outside buffer, error. */
netdev_dbg(dev->net, "%s(%s): received invalid "
"data offset: %d > %d\n", __func__,
oid_to_string(oid), respoffs, buflen);
netdev_dbg(dev->net,
"%s(%s): received invalid data offset: %zu > %zu\n",
__func__, oid_to_string(oid), respoffs, buflen);
ret = -EINVAL;
goto exit_unlock;
}
if ((resplen + respoffs) > buflen) {
/* Device would have returned more data if buffer would
* have been big enough. Copy just the bits that we got.
*/
copylen = buflen - respoffs;
} else {
copylen = resplen;
}
copylen = min(resplen, buflen - respoffs);
if (copylen > *len)
copylen = *len;

View file

@ -477,8 +477,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
return ret;
ret = property_enable(base, &rport->port_cfg->phy_sus, false);
if (ret)
if (ret) {
clk_disable_unprepare(rphy->clk480m);
return ret;
}
/* waiting for the utmi_clk to become stable */
usleep_range(1500, 2000);

View file

@ -5771,7 +5771,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
{
struct Scsi_Host *sh;
sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
if (sh == NULL) {
dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
return -ENOMEM;

View file

@ -271,6 +271,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
struct usb_request *req = ffs->ep0req;
int ret;
if (!req)
return -EINVAL;
req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
spin_unlock_irq(&ffs->ev.waitq.lock);
@ -1807,10 +1810,14 @@ static void functionfs_unbind(struct ffs_data *ffs)
ENTER();
if (!WARN_ON(!ffs->gadget)) {
/* dequeue before freeing ep0req */
usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
mutex_lock(&ffs->mutex);
usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
ffs->ep0req = NULL;
ffs->gadget = NULL;
clear_bit(FFS_FL_BOUND, &ffs->flags);
mutex_unlock(&ffs->mutex);
ffs_data_put(ffs);
}
}

View file

@ -261,7 +261,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
*priv = *priv_match;
}
device_wakeup_enable(hcd->self.controller);
device_set_wakeup_capable(&pdev->dev, true);
xhci->clk = clk;
xhci->reg_clk = reg_clk;

View file

@ -1140,6 +1140,8 @@ int w1_process(void *data)
/* remainder if it woke up early */
unsigned long jremain = 0;
atomic_inc(&dev->refcnt);
for (;;) {
if (!jremain && dev->search_count) {
@ -1167,8 +1169,10 @@ int w1_process(void *data)
*/
mutex_unlock(&dev->list_mutex);
if (kthread_should_stop())
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
break;
}
/* Only sleep when the search is active. */
if (dev->search_count) {

View file

@ -60,10 +60,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
dev->search_count = w1_search_count;
dev->enable_pullup = w1_enable_pullup;
/* 1 for w1_process to decrement
* 1 for __w1_remove_master_device to decrement
/* For __w1_remove_master_device to decrement
*/
atomic_set(&dev->refcnt, 2);
atomic_set(&dev->refcnt, 1);
INIT_LIST_HEAD(&dev->slist);
INIT_LIST_HEAD(&dev->async_list);

View file

@ -878,7 +878,7 @@ affs_truncate(struct inode *inode)
if (inode->i_size > AFFS_I(inode)->mmu_private) {
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
void *fsdata = NULL;
loff_t isize = inode->i_size;
int res;

View file

@ -429,7 +429,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->ssocket->state, server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
}
} else if (cifs_rdma_enabled(server))
smbd_destroy(server);
server->sequence_number = 0;
server->session_estab = false;
kfree(server->session_key.response);
@ -799,10 +800,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
wake_up_all(&server->request_q);
/* give those requests time to exit */
msleep(125);
if (cifs_rdma_enabled(server) && server->smbd_conn) {
smbd_destroy(server->smbd_conn);
server->smbd_conn = NULL;
}
if (cifs_rdma_enabled(server))
smbd_destroy(server);
if (server->ssocket) {
sock_release(server->ssocket);
server->ssocket = NULL;

View file

@ -320,6 +320,9 @@ static int smbd_conn_upcall(
info->transport_status = SMBD_DISCONNECTED;
smbd_process_disconnected(info);
wake_up(&info->disconn_wait);
wake_up_interruptible(&info->wait_reassembly_queue);
wake_up_interruptible_all(&info->wait_send_queue);
break;
default:
@ -1478,21 +1481,102 @@ static void idle_connection_timer(struct work_struct *work)
info->keep_alive_interval*HZ);
}
/* Destroy this SMBD connection, called from upper layer */
void smbd_destroy(struct smbd_connection *info)
/*
* Destroy the transport and related RDMA and memory resources
* Need to go through all the pending counters and make sure on one is using
* the transport while it is destroyed
*/
void smbd_destroy(struct TCP_Server_Info *server)
{
struct smbd_connection *info = server->smbd_conn;
struct smbd_response *response;
unsigned long flags;
if (!info) {
log_rdma_event(INFO, "rdma session already destroyed\n");
return;
}
log_rdma_event(INFO, "destroying rdma session\n");
if (info->transport_status != SMBD_DISCONNECTED) {
rdma_disconnect(server->smbd_conn->id);
log_rdma_event(INFO, "wait for transport being disconnected\n");
wait_event(
info->disconn_wait,
info->transport_status == SMBD_DISCONNECTED);
}
/* Kick off the disconnection process */
smbd_disconnect_rdma_connection(info);
log_rdma_event(INFO, "destroying qp\n");
ib_drain_qp(info->id->qp);
rdma_destroy_qp(info->id);
log_rdma_event(INFO, "wait for transport being destroyed\n");
wait_event(info->wait_destroy,
info->transport_status == SMBD_DESTROYED);
log_rdma_event(INFO, "cancelling idle timer\n");
cancel_delayed_work_sync(&info->idle_timer_work);
log_rdma_event(INFO, "cancelling send immediate work\n");
cancel_delayed_work_sync(&info->send_immediate_work);
log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
wait_event(info->wait_send_pending,
atomic_read(&info->send_pending) == 0);
wait_event(info->wait_send_payload_pending,
atomic_read(&info->send_payload_pending) == 0);
/* It's not posssible for upper layer to get to reassembly */
log_rdma_event(INFO, "drain the reassembly queue\n");
do {
spin_lock_irqsave(&info->reassembly_queue_lock, flags);
response = _get_first_reassembly(info);
if (response) {
list_del(&response->list);
spin_unlock_irqrestore(
&info->reassembly_queue_lock, flags);
put_receive_buffer(info, response);
} else
spin_unlock_irqrestore(
&info->reassembly_queue_lock, flags);
} while (response);
info->reassembly_data_length = 0;
log_rdma_event(INFO, "free receive buffers\n");
wait_event(info->wait_receive_queues,
info->count_receive_queue + info->count_empty_packet_queue
== info->receive_credit_max);
destroy_receive_buffers(info);
/*
* For performance reasons, memory registration and deregistration
* are not locked by srv_mutex. It is possible some processes are
* blocked on transport srv_mutex while holding memory registration.
* Release the transport srv_mutex to allow them to hit the failure
* path when sending data, and then release memory registartions.
*/
log_rdma_event(INFO, "freeing mr list\n");
wake_up_interruptible_all(&info->wait_mr);
while (atomic_read(&info->mr_used_count)) {
mutex_unlock(&server->srv_mutex);
msleep(1000);
mutex_lock(&server->srv_mutex);
}
destroy_mr_list(info);
ib_free_cq(info->send_cq);
ib_free_cq(info->recv_cq);
ib_dealloc_pd(info->pd);
rdma_destroy_id(info->id);
/* free mempools */
mempool_destroy(info->request_mempool);
kmem_cache_destroy(info->request_cache);
mempool_destroy(info->response_mempool);
kmem_cache_destroy(info->response_cache);
info->transport_status = SMBD_DESTROYED;
destroy_workqueue(info->workqueue);
log_rdma_event(INFO, "rdma session destroyed\n");
kfree(info);
server->smbd_conn = NULL;
}
/*
@ -1514,17 +1598,9 @@ int smbd_reconnect(struct TCP_Server_Info *server)
*/
if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
log_rdma_event(INFO, "disconnecting transport\n");
smbd_disconnect_rdma_connection(server->smbd_conn);
smbd_destroy(server);
}
/* wait until the transport is destroyed */
if (!wait_event_timeout(server->smbd_conn->wait_destroy,
server->smbd_conn->transport_status == SMBD_DESTROYED, 5*HZ))
return -EAGAIN;
destroy_workqueue(server->smbd_conn->workqueue);
kfree(server->smbd_conn);
create_conn:
log_rdma_event(INFO, "creating rdma session\n");
server->smbd_conn = smbd_get_connection(
@ -1741,12 +1817,13 @@ static struct smbd_connection *_smbd_get_connection(
conn_param.retry_count = SMBD_CM_RETRY;
conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
conn_param.flow_control = 0;
init_waitqueue_head(&info->wait_destroy);
log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
&addr_in->sin_addr, port);
init_waitqueue_head(&info->conn_wait);
init_waitqueue_head(&info->disconn_wait);
init_waitqueue_head(&info->wait_reassembly_queue);
rc = rdma_connect(info->id, &conn_param);
if (rc) {
log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
@ -1770,8 +1847,6 @@ static struct smbd_connection *_smbd_get_connection(
}
init_waitqueue_head(&info->wait_send_queue);
init_waitqueue_head(&info->wait_reassembly_queue);
INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work);
queue_delayed_work(info->workqueue, &info->idle_timer_work,
@ -1812,7 +1887,7 @@ static struct smbd_connection *_smbd_get_connection(
allocate_mr_failed:
/* At this point, need to a full transport shutdown */
smbd_destroy(info);
smbd_destroy(server);
return NULL;
negotiation_failed:

View file

@ -71,6 +71,7 @@ struct smbd_connection {
struct completion ri_done;
wait_queue_head_t conn_wait;
wait_queue_head_t wait_destroy;
wait_queue_head_t disconn_wait;
struct completion negotiate_completion;
bool negotiate_done;
@ -288,7 +289,7 @@ struct smbd_connection *smbd_get_connection(
/* Reconnect SMBDirect session */
int smbd_reconnect(struct TCP_Server_Info *server);
/* Destroy SMBDirect session */
void smbd_destroy(struct smbd_connection *info);
void smbd_destroy(struct TCP_Server_Info *server);
/* Interface for carrying upper layer I/O through send/recv */
int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
@ -331,7 +332,7 @@ struct smbd_connection {};
static inline void *smbd_get_connection(
struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
static inline void smbd_destroy(struct smbd_connection *info) {}
static inline void smbd_destroy(struct TCP_Server_Info *server) {}
static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
#endif

View file

@ -13,6 +13,7 @@
#include <linux/namei.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/kmemleak.h>
#include "internal.h"
static const struct dentry_operations proc_sys_dentry_operations;
@ -1376,6 +1377,38 @@ struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *tab
}
EXPORT_SYMBOL(register_sysctl);
/**
* __register_sysctl_init() - register sysctl table to path
* @path: path name for sysctl base
* @table: This is the sysctl table that needs to be registered to the path
* @table_name: The name of sysctl table, only used for log printing when
* registration fails
*
* The sysctl interface is used by userspace to query or modify at runtime
* a predefined value set on a variable. These variables however have default
* values pre-set. Code which depends on these variables will always work even
* if register_sysctl() fails. If register_sysctl() fails you'd just loose the
* ability to query or modify the sysctls dynamically at run time. Chances of
* register_sysctl() failing on init are extremely low, and so for both reasons
* this function does not return any error as it is used by initialization code.
*
* Context: Can only be called after your respective sysctl base path has been
* registered. So for instance, most base directories are registered early on
* init before init levels are processed through proc_sys_init() and
* sysctl_init().
*/
void __init __register_sysctl_init(const char *path, struct ctl_table *table,
const char *table_name)
{
struct ctl_table_header *hdr = register_sysctl(path, table);
if (unlikely(!hdr)) {
pr_err("failed when register_sysctl %s to %s\n", table_name, path);
return;
}
kmemleak_not_leak(hdr);
}
static char *append_path(const char *path, char *pos, const char *name)
{
int namelen;

View file

@ -1443,7 +1443,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
unsigned long safe_mask = 0;
unsigned int commit_max_age = (unsigned int)-1;
struct reiserfs_journal *journal = SB_JOURNAL(s);
char *new_opts;
int err;
char *qf_names[REISERFS_MAXQUOTAS];
unsigned int qfmt = 0;
@ -1451,10 +1450,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
int i;
#endif
new_opts = kstrdup(arg, GFP_KERNEL);
if (arg && !new_opts)
return -ENOMEM;
sync_filesystem(s);
reiserfs_write_lock(s);
@ -1605,7 +1600,6 @@ out_ok_unlocked:
out_err_unlock:
reiserfs_write_unlock(s);
out_err:
kfree(new_opts);
return err;
}

View file

@ -300,6 +300,7 @@ extern long (*panic_blink)(int state);
__printf(1, 2)
void panic(const char *fmt, ...) __noreturn __cold;
void nmi_panic(struct pt_regs *regs, const char *msg);
void check_panic_on_warn(const char *origin);
extern void oops_enter(void);
extern void oops_exit(void);
void print_oops_end_marker(void);

View file

@ -36,6 +36,7 @@ extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
void __noreturn make_task_dead(int signr);
extern void proc_caches_init(void);

View file

@ -198,6 +198,9 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
void unregister_sysctl_table(struct ctl_table_header * table);
extern int sysctl_init(void);
extern void __register_sysctl_init(const char *path, struct ctl_table *table,
const char *table_name);
#define register_sysctl_init(path, table) __register_sysctl_init(path, table, #table)
extern struct ctl_table sysctl_mount_point[];

View file

@ -1012,7 +1012,9 @@ static int check_stack_write(struct bpf_verifier_env *env,
bool sanitize = reg && is_spillable_regtype(reg->type);
for (i = 0; i < size; i++) {
if (state->stack[spi].slot_type[i] == STACK_INVALID) {
u8 type = state->stack[spi].slot_type[i];
if (type != STACK_MISC && type != STACK_ZERO) {
sanitize = true;
break;
}

View file

@ -62,12 +62,59 @@
#include <linux/random.h>
#include <linux/rcuwait.h>
#include <linux/compat.h>
#include <linux/sysfs.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
/*
* The default value should be high enough to not crash a system that randomly
* crashes its kernel from time to time, but low enough to at least not permit
* overflowing 32-bit refcounts or the ldsem writer count.
*/
static unsigned int oops_limit = 10000;
#ifdef CONFIG_SYSCTL
static struct ctl_table kern_exit_table[] = {
{
.procname = "oops_limit",
.data = &oops_limit,
.maxlen = sizeof(oops_limit),
.mode = 0644,
.proc_handler = proc_douintvec,
},
{ }
};
static __init int kernel_exit_sysctls_init(void)
{
register_sysctl_init("kernel", kern_exit_table);
return 0;
}
late_initcall(kernel_exit_sysctls_init);
#endif
static atomic_t oops_count = ATOMIC_INIT(0);
#ifdef CONFIG_SYSFS
static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
char *page)
{
return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
}
static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
static __init int kernel_exit_sysfs_init(void)
{
sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
return 0;
}
late_initcall(kernel_exit_sysfs_init);
#endif
static void __unhash_process(struct task_struct *p, bool group_dead)
{
nr_threads--;
@ -923,6 +970,31 @@ void __noreturn do_exit(long code)
}
EXPORT_SYMBOL_GPL(do_exit);
void __noreturn make_task_dead(int signr)
{
/*
* Take the task off the cpu after something catastrophic has
* happened.
*/
unsigned int limit;
/*
* Every time the system oopses, if the oops happens while a reference
* to an object was held, the reference leaks.
* If the oops doesn't also leak memory, repeated oopsing can cause
* reference counters to wrap around (if they're not using refcount_t).
* This means that repeated oopsing can make unexploitable-looking bugs
* exploitable through repeated oopsing.
* To make sure this can't happen, place an upper bound on how often the
* kernel may oops without panic().
*/
limit = READ_ONCE(oops_limit);
if (atomic_inc_return(&oops_count) >= limit && limit)
panic("Oopsed too often (kernel.oops_limit is %d)", limit);
do_exit(signr);
}
void complete_and_exit(struct completion *comp, long code)
{
if (comp)

View file

@ -3462,7 +3462,8 @@ static bool finished_loading(const char *name)
sched_annotate_sleep();
mutex_lock(&module_mutex);
mod = find_module_all(name, strlen(name), true);
ret = !mod || mod->state == MODULE_STATE_LIVE;
ret = !mod || mod->state == MODULE_STATE_LIVE
|| mod->state == MODULE_STATE_GOING;
mutex_unlock(&module_mutex);
return ret;
@ -3616,20 +3617,35 @@ static int add_unformed_module(struct module *mod)
mod->state = MODULE_STATE_UNFORMED;
again:
mutex_lock(&module_mutex);
old = find_module_all(mod->name, strlen(mod->name), true);
if (old != NULL) {
if (old->state != MODULE_STATE_LIVE) {
if (old->state == MODULE_STATE_COMING
|| old->state == MODULE_STATE_UNFORMED) {
/* Wait in case it fails to load. */
mutex_unlock(&module_mutex);
err = wait_event_interruptible(module_wq,
finished_loading(mod->name));
if (err)
goto out_unlocked;
goto again;
/* The module might have gone in the meantime. */
mutex_lock(&module_mutex);
old = find_module_all(mod->name, strlen(mod->name),
true);
}
err = -EEXIST;
/*
* We are here only when the same module was being loaded. Do
* not try to load it again right now. It prevents long delays
* caused by serialized module load failures. It might happen
* when more devices of the same type trigger load of
* a particular module.
*/
if (old && old->state == MODULE_STATE_LIVE)
err = -EEXIST;
else
err = -EBUSY;
goto out;
}
mod_update_bounds(mod);

View file

@ -29,6 +29,7 @@
#include <linux/bug.h>
#include <linux/ratelimit.h>
#include <linux/debugfs.h>
#include <linux/sysfs.h>
#include <asm/sections.h>
#define PANIC_TIMER_STEP 100
@ -42,6 +43,7 @@ static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
bool crash_kexec_post_notifiers;
int panic_on_warn __read_mostly;
static unsigned int warn_limit __read_mostly;
int panic_timeout = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
@ -52,6 +54,45 @@ EXPORT_SYMBOL(panic_notifier_list);
void (*vendor_panic_cb)(u64 sp);
EXPORT_SYMBOL_GPL(vendor_panic_cb);
#ifdef CONFIG_SYSCTL
static struct ctl_table kern_panic_table[] = {
{
.procname = "warn_limit",
.data = &warn_limit,
.maxlen = sizeof(warn_limit),
.mode = 0644,
.proc_handler = proc_douintvec,
},
{ }
};
static __init int kernel_panic_sysctls_init(void)
{
register_sysctl_init("kernel", kern_panic_table);
return 0;
}
late_initcall(kernel_panic_sysctls_init);
#endif
static atomic_t warn_count = ATOMIC_INIT(0);
#ifdef CONFIG_SYSFS
static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr,
char *page)
{
return sysfs_emit(page, "%d\n", atomic_read(&warn_count));
}
static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count);
static __init int kernel_panic_sysfs_init(void)
{
sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL);
return 0;
}
late_initcall(kernel_panic_sysfs_init);
#endif
static long no_blink(int state)
{
return 0;
@ -127,6 +168,19 @@ void nmi_panic(struct pt_regs *regs, const char *msg)
}
EXPORT_SYMBOL(nmi_panic);
void check_panic_on_warn(const char *origin)
{
unsigned int limit;
if (panic_on_warn)
panic("%s: panic_on_warn set ...\n", origin);
limit = READ_ONCE(warn_limit);
if (atomic_inc_return(&warn_count) >= limit && limit)
panic("%s: system warned too often (kernel.warn_limit is %d)",
origin, limit);
}
/**
* panic - halt the system
* @fmt: The text string to print
@ -144,6 +198,16 @@ void panic(const char *fmt, ...)
int old_cpu, this_cpu;
bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
if (panic_on_warn) {
/*
* This thread may hit another WARN() in the panic path.
* Resetting this prevents additional WARN() from panicking the
* system on this thread. Other threads are blocked by the
* panic_mutex in panic().
*/
panic_on_warn = 0;
}
/*
* Disable local interrupts. This will prevent panic_smp_self_stop
* from deadlocking the first cpu that invokes the panic, since
@ -536,16 +600,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
if (args)
vprintk(args->fmt, args->args);
if (panic_on_warn) {
/*
* This thread may hit another WARN() in the panic path.
* Resetting this prevents additional WARN() from panicking the
* system on this thread. Other threads are blocked by the
* panic_mutex in panic().
*/
panic_on_warn = 0;
panic("panic_on_warn set ...\n");
}
check_panic_on_warn("kernel");
print_modules();

View file

@ -4006,8 +4006,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
print_ip_sym(preempt_disable_ip);
pr_cont("\n");
}
if (panic_on_warn)
panic("scheduling while atomic\n");
check_panic_on_warn("scheduling while atomic");
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);

View file

@ -8674,6 +8674,8 @@ void __init early_trace_init(void)
static_key_enable(&tracepoint_printk_key.key);
}
tracer_alloc_buffers();
init_events();
}
void __init trace_init(void)

View file

@ -1530,6 +1530,7 @@ extern void trace_event_enable_cmd_record(bool enable);
extern void trace_event_enable_tgid_record(bool enable);
extern int event_trace_init(void);
extern int init_events(void);
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
extern int event_trace_del_tracer(struct trace_array *tr);

View file

@ -2314,6 +2314,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
hist_field->fn = hist_field_log2;
hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
if (!hist_field->operands[0])
goto free;
hist_field->size = hist_field->operands[0]->size;
hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
if (!hist_field->type)

View file

@ -1395,7 +1395,7 @@ static struct trace_event *events[] __initdata = {
NULL
};
__init static int init_events(void)
__init int init_events(void)
{
struct trace_event *event;
int i, ret;
@ -1413,4 +1413,3 @@ __init static int init_events(void)
return 0;
}
early_initcall(init_events);

View file

@ -91,8 +91,7 @@ static void end_report(unsigned long *flags)
pr_err("==================================================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irqrestore(&report_lock, *flags);
if (panic_on_warn)
panic("panic_on_warn set ...\n");
check_panic_on_warn("KASAN");
kasan_enable_current();
}

View file

@ -1519,6 +1519,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
hdev->flush(hdev);
if (hdev->sent_cmd) {
cancel_delayed_work_sync(&hdev->cmd_timer);
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = NULL;
}

View file

@ -132,12 +132,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
return 0;
if (ops->id && ops->size) {
cleanup:
ng = rcu_dereference_protected(net->gen,
lockdep_is_held(&pernet_ops_rwsem));
ng->ptr[*ops->id] = NULL;
}
cleanup:
kfree(data);
out:

View file

@ -579,8 +579,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
spin_lock(lock);
if (osk) {
WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
ret = sk_nulls_del_node_init_rcu(osk);
} else if (found_dup_sk) {
ret = sk_hashed(osk);
if (ret) {
/* Before deleting the node, we insert a new one to make
* sure that the look-up-sk process would not miss either
* of them and that at least one node would exist in ehash
* table all the time. Otherwise there's a tiny chance
* that lookup process could find nothing in ehash table.
*/
__sk_nulls_add_node_tail_rcu(sk, list);
sk_nulls_del_node_init_rcu(osk);
}
goto unlock;
}
if (found_dup_sk) {
*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
if (*found_dup_sk)
ret = false;
@ -589,6 +601,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
if (ret)
__sk_nulls_add_node_rcu(sk, list);
unlock:
spin_unlock(lock);
return ret;

View file

@ -80,10 +80,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
}
EXPORT_SYMBOL_GPL(inet_twsk_put);
static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
struct hlist_nulls_head *list)
static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&tw->tw_node, list);
hlist_nulls_add_tail_rcu(&tw->tw_node, list);
}
static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
@ -119,7 +119,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
spin_lock(lock);
inet_twsk_add_node_rcu(tw, &ehead->chain);
inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
/* Step 3: Remove SK from hash chain */
if (__sk_nulls_del_node_init_rcu(sk))

View file

@ -1,4 +1,5 @@
#include <linux/netlink.h>
#include <linux/nospec.h>
#include <linux/rtnetlink.h>
#include <linux/types.h>
#include <net/ip.h>
@ -24,6 +25,7 @@ int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
if (type > RTAX_MAX)
return -EINVAL;
type = array_index_nospec(type, RTAX_MAX + 1);
if (type == RTAX_CC_ALGO) {
char tmp[TCP_CA_NAME_MAX];

View file

@ -1154,14 +1154,16 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
dev->needed_headroom = dst_len;
if (set_mtu) {
dev->mtu = rt->dst.dev->mtu - t_hlen;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
if (dev->type == ARPHRD_ETHER)
dev->mtu -= ETH_HLEN;
int mtu = rt->dst.dev->mtu - t_hlen;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
mtu -= 8;
if (dev->type == ARPHRD_ETHER)
mtu -= ETH_HLEN;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
WRITE_ONCE(dev->mtu, mtu);
}
}
ip6_rt_put(rt);

View file

@ -1435,6 +1435,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
struct __ip6_tnl_parm *p = &t->parms;
struct flowi6 *fl6 = &t->fl.u.ip6;
int t_hlen;
int mtu;
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@ -1477,12 +1478,13 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
dev->hard_header_len = rt->dst.dev->hard_header_len +
t_hlen;
dev->mtu = rt->dst.dev->mtu - t_hlen;
mtu = rt->dst.dev->mtu - t_hlen;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
mtu -= 8;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
WRITE_ONCE(dev->mtu, mtu);
}
ip6_rt_put(rt);
}

View file

@ -1082,10 +1082,12 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
if (tdev && !netif_is_l3_master(tdev)) {
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
int mtu;
dev->mtu = tdev->mtu - t_hlen;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
mtu = tdev->mtu - t_hlen;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
WRITE_ONCE(dev->mtu, mtu);
}
}

View file

@ -317,22 +317,29 @@ static int sctp_packet(struct nf_conn *ct,
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
/* Special cases of Verification tag check (Sec 8.5.1) */
if (sch->type == SCTP_CID_INIT) {
/* Sec 8.5.1 (A) */
/* (A) vtag MUST be zero */
if (sh->vtag != 0)
goto out_unlock;
} else if (sch->type == SCTP_CID_ABORT) {
/* Sec 8.5.1 (B) */
if (sh->vtag != ct->proto.sctp.vtag[dir] &&
sh->vtag != ct->proto.sctp.vtag[!dir])
/* (B) vtag MUST match own vtag if T flag is unset OR
* MUST match peer's vtag if T flag is set
*/
if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
sh->vtag != ct->proto.sctp.vtag[dir]) ||
((sch->flags & SCTP_CHUNK_FLAG_T) &&
sh->vtag != ct->proto.sctp.vtag[!dir]))
goto out_unlock;
} else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
/* Sec 8.5.1 (C) */
if (sh->vtag != ct->proto.sctp.vtag[dir] &&
sh->vtag != ct->proto.sctp.vtag[!dir] &&
sch->flags & SCTP_CHUNK_FLAG_T)
/* (C) vtag MUST match own vtag if T flag is unset OR
* MUST match peer's vtag if T flag is set
*/
if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
sh->vtag != ct->proto.sctp.vtag[dir]) ||
((sch->flags & SCTP_CHUNK_FLAG_T) &&
sh->vtag != ct->proto.sctp.vtag[!dir]))
goto out_unlock;
} else if (sch->type == SCTP_CID_COOKIE_ECHO) {
/* Sec 8.5.1 (D) */
/* (D) vtag must be same as init_vtag as found in INIT_ACK */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
} else if (sch->type == SCTP_CID_HEARTBEAT) {

View file

@ -1094,6 +1094,16 @@ static int tcp_packet(struct nf_conn *ct,
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
/* do not renew timeout on SYN retransmit.
*
* Else port reuse by client or NAT middlebox can keep
* entry alive indefinitely (including nat info).
*/
return NF_ACCEPT;
}
/* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
* pickup with loose=1. Avoid large ESTABLISHED timeout.
*/

Some files were not shown because too many files have changed in this diff Show more