This is the 4.19.18 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlxMGy0ACgkQONu9yGCS
 aT5ppQ/8COjyZg1aTrCrd0ttMHYotw3Lb4B6E/SCf2ub4X38SxGz9irhQ7r2FKdK
 w0ZXlLOF2ddqWe6BUnIfWago4Pk1GBpg3bgnp5XyYTjlJbfI2yZ9ggiO0iNYBPaL
 fN2JwM9eze/7cDlpYbhwGpF4+Wz8wTrzh+NIputcvC6n3SQH/cTGmOUa9rlamQju
 uukkvLanAYY3sqDCl4B415Ds44ROU4filqHYIkvZC81jc3Q0YZ8M7cTmpLcDQKGz
 8Z+Veil07jEM9bF2W8iX79nwxMT+edFC62HMuRCoxJKq+1kccw1TVMWpQ8TWbv13
 zeLOqXxNP6VcNaC251q3QzlInRDp1dtr8KtzA/OG0WFnZBTEDng/iChhiL8qZt0R
 9+Sz7n9uZ5pMRK3tr03Ccjg3AneKWRqad2iaTB/kOwAdu7Uqxz8U9qUuRDFPV7OY
 KTMCCfdS8XpMHl/S+Cvg2dqSNiBEkNmowYO6NvQClG0aoN4/6wH+m2TZ0hCl6PVq
 pNFOTJmp7FOaztEZC4rqW8DoOGeGaNo5DP9A2XKKDR20F7EiAE437ApEQ4p5QGVk
 ek4uslZkwJWU/UOzXRl/Hoz0OlI0ixsdZy1vw88HCl7SD1E7xHJpnRUkOjigTT1Q
 nbCt0Nm/A2+c1tKbzU+PVW8FtIbutZhW1BtrqaIbbHr9NBTICR0=
 =Yg+/
 -----END PGP SIGNATURE-----

Merge 4.19.18 into android-4.19

Changes in 4.19.18
	ipv6: Consider sk_bound_dev_if when binding a socket to a v4 mapped address
	mlxsw: spectrum: Disable lag port TX before removing it
	mlxsw: spectrum_switchdev: Set PVID correctly during VLAN deletion
	net: dsa: mv88x6xxx: mv88e6390 errata
	net, skbuff: do not prefer skb allocation fails early
	qmi_wwan: add MTU default to qmap network interface
	r8169: Add support for new Realtek Ethernet
	ipv6: Take rcu_read_lock in __inet6_bind for mapped addresses
	net: clear skb->tstamp in bridge forwarding path
	netfilter: ipset: Allow matching on destination MAC address for mac and ipmac sets
	gpio: pl061: Move irq_chip definition inside struct pl061
	drm/amd/display: Guard against null stream_state in set_crc_source
	drm/amdkfd: fix interrupt spin lock
	ixgbe: allow IPsec Tx offload in VEPA mode
	platform/x86: asus-wmi: Tell the EC the OS will handle the display off hotkey
	e1000e: allow non-monotonic SYSTIM readings
	usb: typec: tcpm: Do not disconnect link for self powered devices
	selftests/bpf: enable (uncomment) all tests in test_libbpf.sh
	of: overlay: add missing of_node_put() after add new node to changeset
	writeback: don't decrement wb->refcnt if !wb->bdi
	serial: set suppress_bind_attrs flag only if builtin
	bpf: Allow narrow loads with offset > 0
	ALSA: oxfw: add support for APOGEE duet FireWire
	x86/mce: Fix -Wmissing-prototypes warnings
	MIPS: SiByte: Enable swiotlb for SWARM, LittleSur and BigSur
	crypto: ecc - regularize scalar for scalar multiplication
	arm64: perf: set suppress_bind_attrs flag to true
	drm/atomic-helper: Complete fake_commit->flip_done potentially earlier
	clk: meson: meson8b: fix incorrect divider mapping in cpu_scale_table
	samples: bpf: fix: error handling regarding kprobe_events
	usb: gadget: udc: renesas_usb3: add a safety connection way for forced_b_device
	fpga: altera-cvp: fix probing for multiple FPGAs on the bus
	selinux: always allow mounting submounts
	ASoC: pcm3168a: Don't disable pcm3168a when CONFIG_PM defined
	scsi: qedi: Check for session online before getting iSCSI TLV data.
	drm/amdgpu: Reorder uvd ring init before uvd resume
	rxe: IB_WR_REG_MR does not capture MR's iova field
	efi/libstub: Disable some warnings for x86{,_64}
	jffs2: Fix use of uninitialized delayed_work, lockdep breakage
	clk: imx: make mux parent strings const
	pstore/ram: Do not treat empty buffers as valid
	media: uvcvideo: Refactor teardown of uvc on USB disconnect
	powerpc/xmon: Fix invocation inside lock region
	powerpc/pseries/cpuidle: Fix preempt warning
	media: firewire: Fix app_info parameter type in avc_ca{,_app}_info
	ASoC: use dma_ops of parent device for acp_audio_dma
	media: venus: core: Set dma maximum segment size
	staging: erofs: fix use-after-free of on-stack `z_erofs_vle_unzip_io'
	net: call sk_dst_reset when set SO_DONTROUTE
	scsi: target: use consistent left-aligned ASCII INQUIRY data
	scsi: target/core: Make sure that target_wait_for_sess_cmds() waits long enough
	selftests: do not macro-expand failed assertion expressions
	arm64: kasan: Increase stack size for KASAN_EXTRA
	clk: imx6q: reset exclusive gates on init
	arm64: Fix minor issues with the dcache_by_line_op macro
	bpf: relax verifier restriction on BPF_MOV | BPF_ALU
	kconfig: fix file name and line number of warn_ignored_character()
	kconfig: fix memory leak when EOF is encountered in quotation
	mmc: atmel-mci: do not assume idle after atmci_request_end
	btrfs: volumes: Make sure there is no overlap of dev extents at mount time
	btrfs: alloc_chunk: fix more DUP stripe size handling
	btrfs: fix use-after-free due to race between replace start and cancel
	btrfs: improve error handling of btrfs_add_link
	tty/serial: do not free trasnmit buffer page under port lock
	perf intel-pt: Fix error with config term "pt=0"
	perf tests ARM: Disable breakpoint tests 32-bit
	perf svghelper: Fix unchecked usage of strncpy()
	perf parse-events: Fix unchecked usage of strncpy()
	perf vendor events intel: Fix Load_Miss_Real_Latency on SKL/SKX
	netfilter: ipt_CLUSTERIP: check MAC address when duplicate config is set
	netfilter: ipt_CLUSTERIP: remove wrong WARN_ON_ONCE in netns exit routine
	netfilter: ipt_CLUSTERIP: fix deadlock in netns exit routine
	x86/topology: Use total_cpus for max logical packages calculation
	dm crypt: use u64 instead of sector_t to store iv_offset
	dm kcopyd: Fix bug causing workqueue stalls
	perf stat: Avoid segfaults caused by negated options
	tools lib subcmd: Don't add the kernel sources to the include path
	dm snapshot: Fix excessive memory usage and workqueue stalls
	perf cs-etm: Correct packets swapping in cs_etm__flush()
	perf tools: Add missing sigqueue() prototype for systems lacking it
	perf tools: Add missing open_memstream() prototype for systems lacking it
	quota: Lock s_umount in exclusive mode for Q_XQUOTA{ON,OFF} quotactls.
	clocksource/drivers/integrator-ap: Add missing of_node_put()
	dm: Check for device sector overflow if CONFIG_LBDAF is not set
	Bluetooth: btusb: Add support for Intel bluetooth device 8087:0029
	ALSA: bebob: fix model-id of unit for Apogee Ensemble
	sysfs: Disable lockdep for driver bind/unbind files
	IB/usnic: Fix potential deadlock
	scsi: mpt3sas: fix memory ordering on 64bit writes
	scsi: smartpqi: correct lun reset issues
	ath10k: fix peer stats null pointer dereference
	scsi: smartpqi: call pqi_free_interrupts() in pqi_shutdown()
	scsi: megaraid: fix out-of-bound array accesses
	iomap: don't search past page end in iomap_is_partially_uptodate
	ocfs2: fix panic due to unrecovered local alloc
	mm/page-writeback.c: don't break integrity writeback on ->writepage() error
	mm/swap: use nr_node_ids for avail_lists in swap_info_struct
	userfaultfd: clear flag if remap event not enabled
	mm, proc: be more verbose about unstable VMA flags in /proc/<pid>/smaps
	iwlwifi: mvm: Send LQ command as async when necessary
	Bluetooth: Fix unnecessary error message for HCI request completion
	ipmi: fix use-after-free of user->release_barrier.rda
	ipmi: msghandler: Fix potential Spectre v1 vulnerabilities
	ipmi: Prevent use-after-free in deliver_response
	ipmi:ssif: Fix handling of multi-part return messages
	ipmi: Don't initialize anything in the core until something uses it
	Linux 4.19.18

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2019-01-26 11:58:37 +01:00
commit 26bf816608
138 changed files with 1190 additions and 555 deletions

View file

@ -499,7 +499,9 @@ manner. The codes are the following:
Note that there is no guarantee that every flag and associated mnemonic will
be present in all further kernel releases. Things get changed, the flags may
be vanished or the reverse -- new added.
be vanished or the reverse -- new added. Interpretation of their meaning
might change in future as well. So each consumer of these flags has to
follow each specific kernel version for the exact semantic.
The "Name" field will only be present on a mapping that has been named by
userspace, and will show the name passed in by userspace.

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 17
SUBLEVEL = 18
EXTRAVERSION =
NAME = "People's Front"

View file

@ -378,27 +378,33 @@ alternative_endif
* size: size of the region
* Corrupts: kaddr, size, tmp1, tmp2
*/
.macro __dcache_op_workaround_clean_cache, op, kaddr
alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
dc \op, \kaddr
alternative_else
dc civac, \kaddr
alternative_endif
.endm
.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
dcache_line_size \tmp1, \tmp2
add \size, \kaddr, \size
sub \tmp2, \tmp1, #1
bic \kaddr, \kaddr, \tmp2
9998:
.if (\op == cvau || \op == cvac)
alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
dc \op, \kaddr
alternative_else
dc civac, \kaddr
alternative_endif
.elseif (\op == cvap)
alternative_if ARM64_HAS_DCPOP
.ifc \op, cvau
__dcache_op_workaround_clean_cache \op, \kaddr
.else
.ifc \op, cvac
__dcache_op_workaround_clean_cache \op, \kaddr
.else
.ifc \op, cvap
sys 3, c7, c12, 1, \kaddr // dc cvap
alternative_else
dc cvac, \kaddr
alternative_endif
.else
dc \op, \kaddr
.endif
.endif
.endif
add \kaddr, \kaddr, \tmp1
cmp \kaddr, \size
b.lo 9998b

View file

@ -76,12 +76,17 @@
/*
* KASAN requires 1/8th of the kernel virtual address space for the shadow
* region. KASAN can bloat the stack significantly, so double the (minimum)
* stack size when KASAN is in use.
* stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
* on.
*/
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
#ifdef CONFIG_KASAN_EXTRA
#define KASAN_THREAD_SHIFT 2
#else
#define KASAN_THREAD_SHIFT 1
#endif /* CONFIG_KASAN_EXTRA */
#else
#define KASAN_SHADOW_SIZE (0)
#define KASAN_THREAD_SHIFT 0

View file

@ -1274,6 +1274,7 @@ static struct platform_driver armv8_pmu_driver = {
.driver = {
.name = ARMV8_PMU_PDEV_NAME,
.of_match_table = armv8_pmu_of_device_ids,
.suppress_bind_attrs = true,
},
.probe = armv8_pmu_device_probe,
};

View file

@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area)
* - size - size in question
*/
ENTRY(__clean_dcache_area_pop)
alternative_if_not ARM64_HAS_DCPOP
b __clean_dcache_area_poc
alternative_else_nop_endif
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
ENDPIPROC(__clean_dcache_area_pop)

View file

@ -794,6 +794,7 @@ config SIBYTE_SWARM
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
@ -814,6 +815,7 @@ config SIBYTE_SENTOSA
select SYS_HAS_CPU_SB1
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
@ -826,6 +828,7 @@ config SIBYTE_BIGSUR
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SNI_RM
bool "SNI RM200/300/400"

View file

@ -1,4 +1,5 @@
obj-y := cfe.o
obj-$(CONFIG_SWIOTLB) += dma.o
obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o

View file

@ -0,0 +1,14 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* DMA support for Broadcom SiByte platforms.
*
* Copyright (c) 2018 Maciej W. Rozycki
*/
#include <linux/swiotlb.h>
#include <asm/bootinfo.h>
void __init plat_swiotlb_setup(void)
{
swiotlb_init(1);
}

View file

@ -75,6 +75,9 @@ static int xmon_gate;
#define xmon_owner 0
#endif /* CONFIG_SMP */
#ifdef CONFIG_PPC_PSERIES
static int set_indicator_token = RTAS_UNKNOWN_SERVICE;
#endif
static unsigned long in_xmon __read_mostly = 0;
static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
@ -358,7 +361,6 @@ static inline void disable_surveillance(void)
#ifdef CONFIG_PPC_PSERIES
/* Since this can't be a module, args should end up below 4GB. */
static struct rtas_args args;
int token;
/*
* At this point we have got all the cpus we can into
@ -367,11 +369,11 @@ static inline void disable_surveillance(void)
* If we did try to take rtas.lock there would be a
* real possibility of deadlock.
*/
token = rtas_token("set-indicator");
if (token == RTAS_UNKNOWN_SERVICE)
if (set_indicator_token == RTAS_UNKNOWN_SERVICE)
return;
rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0);
rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL,
SURVEILLANCE_TOKEN, 0, 0);
#endif /* CONFIG_PPC_PSERIES */
}
@ -3672,6 +3674,14 @@ static void xmon_init(int enable)
__debugger_iabr_match = xmon_iabr_match;
__debugger_break_match = xmon_break_match;
__debugger_fault_handler = xmon_fault_handler;
#ifdef CONFIG_PPC_PSERIES
/*
* Get the token here to avoid trying to get a lock
* during the crash, causing a deadlock.
*/
set_indicator_token = rtas_token("set-indicator");
#endif
} else {
__debugger = NULL;
__debugger_ipi = NULL;

View file

@ -104,9 +104,9 @@ extern int panic_on_unrecovered_nmi;
void math_emulate(struct math_emu_info *);
#ifndef CONFIG_X86_32
asmlinkage void smp_thermal_interrupt(void);
asmlinkage void smp_threshold_interrupt(void);
asmlinkage void smp_deferred_error_interrupt(void);
asmlinkage void smp_thermal_interrupt(struct pt_regs *regs);
asmlinkage void smp_threshold_interrupt(struct pt_regs *regs);
asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs);
#endif
extern void ist_enter(struct pt_regs *regs);

View file

@ -23,6 +23,7 @@
#include <linux/string.h>
#include <asm/amd_nb.h>
#include <asm/traps.h>
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/msr.h>
@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
};
const char *smca_get_name(enum smca_bank_types t)
static const char *smca_get_name(enum smca_bank_types t)
{
if (t >= N_SMCA_BANK_TYPES)
return NULL;
@ -824,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
mce_log(&m);
}
asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);

View file

@ -25,6 +25,7 @@
#include <linux/cpu.h>
#include <asm/processor.h>
#include <asm/traps.h>
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/msr.h>
@ -390,7 +391,7 @@ static void unexpected_thermal_interrupt(void)
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r)
asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);

View file

@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <asm/irq_vectors.h>
#include <asm/traps.h>
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/trace/irq_vectors.h>
@ -18,7 +19,7 @@ static void default_threshold_interrupt(void)
void (*mce_threshold_vector)(void) = default_threshold_interrupt;
asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);

View file

@ -1346,7 +1346,7 @@ void __init calculate_max_logical_packages(void)
* extrapolate the boot cpu's data to all packages.
*/
ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
__max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
pr_info("Max logical packages: %u\n", __max_logical_packages);
}

View file

@ -842,15 +842,23 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
static void ecc_point_mult(struct ecc_point *result,
const struct ecc_point *point, const u64 *scalar,
u64 *initial_z, u64 *curve_prime,
u64 *initial_z, const struct ecc_curve *curve,
unsigned int ndigits)
{
/* R0 and R1 */
u64 rx[2][ECC_MAX_DIGITS];
u64 ry[2][ECC_MAX_DIGITS];
u64 z[ECC_MAX_DIGITS];
u64 sk[2][ECC_MAX_DIGITS];
u64 *curve_prime = curve->p;
int i, nb;
int num_bits = vli_num_bits(scalar, ndigits);
int num_bits;
int carry;
carry = vli_add(sk[0], scalar, curve->n, ndigits);
vli_add(sk[1], sk[0], curve->n, ndigits);
scalar = sk[!carry];
num_bits = sizeof(u64) * ndigits * 8 + 1;
vli_set(rx[1], point->x, ndigits);
vli_set(ry[1], point->y, ndigits);
@ -1004,7 +1012,7 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
goto out;
}
ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits);
ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
if (ecc_point_is_zero(pk)) {
ret = -EAGAIN;
goto err_free_point;
@ -1090,7 +1098,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto err_alloc_product;
}
ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
ecc_swap_digits(product->x, secret, ndigits);

View file

@ -31,6 +31,9 @@ static struct kset *system_kset;
#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct driver_attribute driver_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
static int __must_check bus_rescan_devices_helper(struct device *dev,
void *data);
@ -195,7 +198,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
bus_put(bus);
return err;
}
static DRIVER_ATTR_WO(unbind);
static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
/*
* Manually attach a device to a driver.
@ -231,7 +234,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
bus_put(bus);
return err;
}
static DRIVER_ATTR_WO(bind);
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
{

View file

@ -343,6 +343,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Intel Bluetooth devices */
{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
{ USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@ -2054,6 +2055,35 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return -EILSEQ;
}
static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
struct intel_boot_params *params,
char *fw_name, size_t len,
const char *suffix)
{
switch (ver->hw_variant) {
case 0x0b: /* SfP */
case 0x0c: /* WsP */
snprintf(fw_name, len, "intel/ibt-%u-%u.%s",
le16_to_cpu(ver->hw_variant),
le16_to_cpu(params->dev_revid),
suffix);
break;
case 0x11: /* JfP */
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s",
le16_to_cpu(ver->hw_variant),
le16_to_cpu(ver->hw_revision),
le16_to_cpu(ver->fw_revision),
suffix);
break;
default:
return false;
}
return true;
}
static int btusb_setup_intel_new(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@ -2105,7 +2135,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x11: /* JfP */
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* QnJ, IcP */
case 0x14: /* CcP */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
@ -2189,23 +2219,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
*
*/
switch (ver.hw_variant) {
case 0x0b: /* SfP */
case 0x0c: /* WsP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
le16_to_cpu(ver.hw_variant),
le16_to_cpu(params.dev_revid));
break;
case 0x11: /* JfP */
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* QnJ, IcP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
le16_to_cpu(ver.hw_variant),
le16_to_cpu(ver.hw_revision),
le16_to_cpu(ver.fw_revision));
break;
default:
err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
sizeof(fwname), "sfi");
if (!err) {
bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
@ -2221,23 +2237,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Save the DDC file name for later use to apply once the firmware
* downloading is done.
*/
switch (ver.hw_variant) {
case 0x0b: /* SfP */
case 0x0c: /* WsP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
le16_to_cpu(ver.hw_variant),
le16_to_cpu(params.dev_revid));
break;
case 0x11: /* JfP */
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* QnJ, IcP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
le16_to_cpu(ver.hw_variant),
le16_to_cpu(ver.hw_revision),
le16_to_cpu(ver.fw_revision));
break;
default:
err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
sizeof(fwname), "ddc");
if (!err) {
bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}

View file

@ -29,6 +29,7 @@
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/uuid.h>
#include <linux/nospec.h>
#define PFX "IPMI message handler: "
@ -61,7 +62,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data,
{ }
#endif
static int initialized;
static bool initialized;
static bool drvregistered;
enum ipmi_panic_event_op {
IPMI_SEND_PANIC_EVENT_NONE,
@ -611,7 +613,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
struct srcu_struct ipmi_interfaces_srcu;
/*
* List of watchers that want to know when smi's are added and deleted.
@ -719,7 +721,15 @@ struct watcher_entry {
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
int index;
int index, rv;
/*
* Make sure the driver is actually initialized, this handles
* problems with initialization order.
*/
rv = ipmi_init_msghandler();
if (rv)
return rv;
mutex_lock(&smi_watchers_mutex);
@ -883,7 +893,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
if (user) {
user->handler->ipmi_recv_hndl(msg, user->handler_data);
release_ipmi_user(msg->user, index);
release_ipmi_user(user, index);
} else {
/* User went away, give up. */
ipmi_free_recv_msg(msg);
@ -1075,7 +1085,7 @@ int ipmi_create_user(unsigned int if_num,
{
unsigned long flags;
struct ipmi_user *new_user;
int rv = 0, index;
int rv, index;
struct ipmi_smi *intf;
/*
@ -1093,19 +1103,10 @@ int ipmi_create_user(unsigned int if_num,
* Make sure the driver is actually initialized, this handles
* problems with initialization order.
*/
if (!initialized) {
rv = ipmi_init_msghandler();
if (rv)
return rv;
/*
* The init code doesn't return an error if it was turned
* off, but it won't initialize. Check that.
*/
if (!initialized)
return -ENODEV;
}
new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
if (!new_user)
return -ENOMEM;
@ -1182,6 +1183,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
static void free_user(struct kref *ref)
{
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
cleanup_srcu_struct(&user->release_barrier);
kfree(user);
}
@ -1258,7 +1260,6 @@ int ipmi_destroy_user(struct ipmi_user *user)
{
_ipmi_destroy_user(user);
cleanup_srcu_struct(&user->release_barrier);
kref_put(&user->refcount, free_user);
return 0;
@ -1297,10 +1298,12 @@ int ipmi_set_my_address(struct ipmi_user *user,
if (!user)
return -ENODEV;
if (channel >= IPMI_MAX_CHANNELS)
if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
else
} else {
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].address = address;
}
release_ipmi_user(user, index);
return rv;
@ -1317,10 +1320,12 @@ int ipmi_get_my_address(struct ipmi_user *user,
if (!user)
return -ENODEV;
if (channel >= IPMI_MAX_CHANNELS)
if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
else
} else {
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].address;
}
release_ipmi_user(user, index);
return rv;
@ -1337,10 +1342,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
if (!user)
return -ENODEV;
if (channel >= IPMI_MAX_CHANNELS)
if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
else
} else {
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].lun = LUN & 0x3;
}
release_ipmi_user(user, index);
return 0;
@ -1357,10 +1364,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
if (!user)
return -ENODEV;
if (channel >= IPMI_MAX_CHANNELS)
if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
else
} else {
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].lun;
}
release_ipmi_user(user, index);
return rv;
@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf,
{
if (addr->channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
*lun = intf->addrinfo[addr->channel].lun;
*saddr = intf->addrinfo[addr->channel].address;
return 0;
@ -3294,17 +3304,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
* Make sure the driver is actually initialized, this handles
* problems with initialization order.
*/
if (!initialized) {
rv = ipmi_init_msghandler();
if (rv)
return rv;
/*
* The init code doesn't return an error if it was turned
* off, but it won't initialize. Check that.
*/
if (!initialized)
return -ENODEV;
}
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf)
@ -5020,6 +5022,22 @@ static int panic_event(struct notifier_block *this,
return NOTIFY_DONE;
}
/* Must be called with ipmi_interfaces_mutex held. */
static int ipmi_register_driver(void)
{
int rv;
if (drvregistered)
return 0;
rv = driver_register(&ipmidriver.driver);
if (rv)
pr_err("Could not register IPMI driver\n");
else
drvregistered = true;
return rv;
}
static struct notifier_block panic_block = {
.notifier_call = panic_event,
.next = NULL,
@ -5030,41 +5048,47 @@ static int ipmi_init_msghandler(void)
{
int rv;
mutex_lock(&ipmi_interfaces_mutex);
rv = ipmi_register_driver();
if (rv)
goto out;
if (initialized)
return 0;
goto out;
rv = driver_register(&ipmidriver.driver);
if (rv) {
pr_err(PFX "Could not register IPMI driver\n");
return rv;
}
pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n");
init_srcu_struct(&ipmi_interfaces_srcu);
timer_setup(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
initialized = 1;
initialized = true;
return 0;
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
static int __init ipmi_init_msghandler_mod(void)
{
ipmi_init_msghandler();
return 0;
int rv;
pr_info("version " IPMI_DRIVER_VERSION "\n");
mutex_lock(&ipmi_interfaces_mutex);
rv = ipmi_register_driver();
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
static void __exit cleanup_ipmi(void)
{
int count;
if (!initialized)
return;
atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
if (initialized) {
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_block);
/*
* This can't be called if any interfaces exist, so no worry
@ -5079,9 +5103,7 @@ static void __exit cleanup_ipmi(void)
atomic_inc(&stop_operation);
del_timer_sync(&ipmi_timer);
driver_unregister(&ipmidriver.driver);
initialized = 0;
initialized = false;
/* Check for buffer leaks. */
count = atomic_read(&smi_msg_inuse_count);
@ -5090,6 +5112,10 @@ static void __exit cleanup_ipmi(void)
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
pr_warn(PFX "recv message count %d at exit\n", count);
cleanup_srcu_struct(&ipmi_interfaces_srcu);
}
if (drvregistered)
driver_unregister(&ipmidriver.driver);
}
module_exit(cleanup_ipmi);

View file

@ -630,8 +630,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
/* Remove the multi-part read marker. */
len -= 2;
data += 2;
for (i = 0; i < len; i++)
ssif_info->data[i] = data[i+2];
ssif_info->data[i] = data[i];
ssif_info->multi_len = len;
ssif_info->multi_pos = 1;
@ -659,8 +660,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
blocknum = data[0];
len--;
data++;
if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
if (blocknum != 0xff && len != 31) {
/* All blocks but the last must have 31 data bytes. */
result = -EIO;
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
pr_info("Received middle message <31\n");
goto continue_op;
}
if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
/* Received message too big, abort the operation. */
result = -E2BIG;
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@ -669,16 +681,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
goto continue_op;
}
/* Remove the blocknum from the data. */
len--;
for (i = 0; i < len; i++)
ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
ssif_info->data[i + ssif_info->multi_len] = data[i];
ssif_info->multi_len += len;
if (blocknum == 0xff) {
/* End of read */
len = ssif_info->multi_len;
data = ssif_info->data;
} else if (blocknum + 1 != ssif_info->multi_pos) {
} else if (blocknum != ssif_info->multi_pos) {
/*
* Out of sequence block, just abort. Block
* numbers start at zero for the second block,
@ -706,6 +716,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
}
continue_op:
if (result < 0) {
ssif_inc_stat(ssif_info, receive_errors);
} else {
@ -713,8 +724,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
ssif_inc_stat(ssif_info, received_message_parts);
}
continue_op:
if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
pr_info(PFX "DONE 1: state = %d, result=%d.\n",
ssif_info->ssif_state, result);

View file

@ -154,7 +154,7 @@ static const struct clk_ops clk_busy_mux_ops = {
struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
u8 width, void __iomem *busy_reg, u8 busy_shift,
const char **parent_names, int num_parents)
const char * const *parent_names, int num_parents)
{
struct clk_busy_mux *busy;
struct clk *clk;

View file

@ -70,7 +70,7 @@ static const struct clk_ops clk_fixup_mux_ops = {
};
struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
u8 shift, u8 width, const char **parents,
u8 shift, u8 width, const char * const *parents,
int num_parents, void (*fixup)(u32 *val))
{
struct clk_fixup_mux *fixup_mux;

View file

@ -508,8 +508,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
* lvds1_gate and lvds2_gate are pseudo-gates. Both can be
* independently configured as clock inputs or outputs. We treat
* the "output_enable" bit as a gate, even though it's really just
* enabling clock output.
* enabling clock output. Initially the gate bits are cleared, as
* otherwise the exclusive configuration gets locked in the setup done
* by software running before the clock driver, with no way to change
* it.
*/
writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));

View file

@ -63,14 +63,14 @@ struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
u8 width, void __iomem *busy_reg, u8 busy_shift,
const char **parent_names, int num_parents);
const char * const *parent_names, int num_parents);
struct clk *imx_clk_fixup_divider(const char *name, const char *parent,
void __iomem *reg, u8 shift, u8 width,
void (*fixup)(u32 *val));
struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
u8 shift, u8 width, const char **parents,
u8 shift, u8 width, const char * const *parents,
int num_parents, void (*fixup)(u32 *val));
static inline struct clk *imx_clk_fixed(const char *name, int rate)
@ -79,7 +79,8 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate)
}
static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
u8 shift, u8 width, const char **parents, int num_parents)
u8 shift, u8 width, const char * const *parents,
int num_parents)
{
return clk_register_mux(NULL, name, parents, num_parents,
CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
@ -192,7 +193,8 @@ static inline struct clk *imx_clk_gate4(const char *name, const char *parent,
}
static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
u8 shift, u8 width, const char **parents, int num_parents)
u8 shift, u8 width, const char * const *parents,
int num_parents)
{
return clk_register_mux(NULL, name, parents, num_parents,
CLK_SET_RATE_NO_REPARENT, reg, shift,
@ -200,7 +202,8 @@ static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
}
static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
u8 shift, u8 width, const char **parents, int num_parents)
u8 shift, u8 width, const char * const *parents,
int num_parents)
{
return clk_register_mux(NULL, name, parents, num_parents,
CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
@ -208,8 +211,9 @@ static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
}
static inline struct clk *imx_clk_mux_flags(const char *name,
void __iomem *reg, u8 shift, u8 width, const char **parents,
int num_parents, unsigned long flags)
void __iomem *reg, u8 shift, u8 width,
const char * const *parents, int num_parents,
unsigned long flags)
{
return clk_register_mux(NULL, name, parents, num_parents,
flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0,

View file

@ -568,13 +568,14 @@ static struct clk_fixed_factor meson8b_cpu_div3 = {
};
static const struct clk_div_table cpu_scale_table[] = {
{ .val = 2, .div = 4 },
{ .val = 3, .div = 6 },
{ .val = 4, .div = 8 },
{ .val = 5, .div = 10 },
{ .val = 6, .div = 12 },
{ .val = 7, .div = 14 },
{ .val = 8, .div = 16 },
{ .val = 1, .div = 4 },
{ .val = 2, .div = 6 },
{ .val = 3, .div = 8 },
{ .val = 4, .div = 10 },
{ .val = 5, .div = 12 },
{ .val = 6, .div = 14 },
{ .val = 7, .div = 16 },
{ .val = 8, .div = 18 },
{ /* sentinel */ },
};

View file

@ -181,8 +181,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
int irq;
struct clk *clk;
unsigned long rate;
struct device_node *pri_node;
struct device_node *sec_node;
struct device_node *alias_node;
base = of_io_request_and_map(node, 0, "integrator-timer");
if (IS_ERR(base))
@ -204,7 +203,18 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
return err;
}
pri_node = of_find_node_by_path(path);
alias_node = of_find_node_by_path(path);
/*
* The pointer is used as an identifier not as a pointer, we
* can drop the refcount on the of__node immediately after
* getting it.
*/
of_node_put(alias_node);
if (node == alias_node)
/* The primary timer lacks IRQ, use as clocksource */
return integrator_clocksource_init(rate, base);
err = of_property_read_string(of_aliases,
"arm,timer-secondary", &path);
@ -213,14 +223,11 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
return err;
}
alias_node = of_find_node_by_path(path);
sec_node = of_find_node_by_path(path);
of_node_put(alias_node);
if (node == pri_node)
/* The primary timer lacks IRQ, use as clocksource */
return integrator_clocksource_init(rate, base);
if (node == sec_node) {
if (node == alias_node) {
/* The secondary timer will drive the clock event */
irq = irq_of_parse_and_map(node, 0);
return integrator_clockevent_init(rate, base, irq);

View file

@ -247,7 +247,13 @@ static int pseries_idle_probe(void)
return -ENODEV;
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
if (lppaca_shared_proc(get_lppaca())) {
/*
* Use local_paca instead of get_lppaca() since
* preemption is not disabled, and it is not required in
* fact, since lppaca_ptr does not need to be the value
* associated to the current CPU, it can be from any CPU.
*/
if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
cpuidle_state_table = shared_states;
max_idle_state = ARRAY_SIZE(shared_states);
} else {

View file

@ -9,7 +9,10 @@ cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
$(call cc-disable-warning, address-of-packed-member) \
$(call cc-disable-warning, gnu)
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin

View file

@ -468,14 +468,6 @@ static int altera_cvp_probe(struct pci_dev *pdev,
goto err_unmap;
}
ret = driver_create_file(&altera_cvp_driver.driver,
&driver_attr_chkcfg);
if (ret) {
dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
fpga_mgr_unregister(mgr);
goto err_unmap;
}
return 0;
err_unmap:
@ -493,7 +485,6 @@ static void altera_cvp_remove(struct pci_dev *pdev)
struct altera_cvp_conf *conf = mgr->priv;
u16 cmd;
driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
fpga_mgr_unregister(mgr);
pci_iounmap(pdev, conf->map);
pci_release_region(pdev, CVP_BAR);
@ -502,7 +493,30 @@ static void altera_cvp_remove(struct pci_dev *pdev)
pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
module_pci_driver(altera_cvp_driver);
static int __init altera_cvp_init(void)
{
int ret;
ret = pci_register_driver(&altera_cvp_driver);
if (ret)
return ret;
ret = driver_create_file(&altera_cvp_driver.driver,
&driver_attr_chkcfg);
if (ret)
pr_warn("Can't create sysfs chkcfg file\n");
return 0;
}
static void __exit altera_cvp_exit(void)
{
driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
pci_unregister_driver(&altera_cvp_driver);
}
module_init(altera_cvp_init);
module_exit(altera_cvp_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");

View file

@ -54,6 +54,7 @@ struct pl061 {
void __iomem *base;
struct gpio_chip gc;
struct irq_chip irq_chip;
int parent_irq;
#ifdef CONFIG_PM
@ -281,15 +282,6 @@ static int pl061_irq_set_wake(struct irq_data *d, unsigned int state)
return irq_set_irq_wake(pl061->parent_irq, state);
}
static struct irq_chip pl061_irqchip = {
.name = "pl061",
.irq_ack = pl061_irq_ack,
.irq_mask = pl061_irq_mask,
.irq_unmask = pl061_irq_unmask,
.irq_set_type = pl061_irq_type,
.irq_set_wake = pl061_irq_set_wake,
};
static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
@ -328,6 +320,13 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
/*
* irq_chip support
*/
pl061->irq_chip.name = dev_name(dev);
pl061->irq_chip.irq_ack = pl061_irq_ack;
pl061->irq_chip.irq_mask = pl061_irq_mask;
pl061->irq_chip.irq_unmask = pl061_irq_unmask;
pl061->irq_chip.irq_set_type = pl061_irq_type;
pl061->irq_chip.irq_set_wake = pl061_irq_set_wake;
writeb(0, pl061->base + GPIOIE); /* disable irqs */
irq = adev->irq[0];
if (irq < 0) {
@ -336,14 +335,14 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
}
pl061->parent_irq = irq;
ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip,
ret = gpiochip_irqchip_add(&pl061->gc, &pl061->irq_chip,
0, handle_bad_irq,
IRQ_TYPE_NONE);
if (ret) {
dev_info(&adev->dev, "could not add irqchip\n");
return ret;
}
gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip,
gpiochip_set_chained_irqchip(&pl061->gc, &pl061->irq_chip,
irq, pl061_irq_handler);
amba_set_drvdata(adev, pl061);

View file

@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
if (r)
return r;
r = amdgpu_uvd_resume(adev);
if (r)
return r;
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
r = amdgpu_uvd_resume(adev);
if (r)
return r;
r = amdgpu_uvd_entity_init(adev);
return r;

View file

@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
if (r)
return r;
r = amdgpu_uvd_resume(adev);
if (r)
return r;
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
r = amdgpu_uvd_resume(adev);
if (r)
return r;
r = amdgpu_uvd_entity_init(adev);
return r;

View file

@ -420,16 +420,16 @@ static int uvd_v6_0_sw_init(void *handle)
DRM_INFO("UVD ENC is disabled\n");
}
r = amdgpu_uvd_resume(adev);
if (r)
return r;
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
r = amdgpu_uvd_resume(adev);
if (r)
return r;
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];

View file

@ -444,10 +444,6 @@ static int uvd_v7_0_sw_init(void *handle)
DRM_INFO("PSP loading UVD firmware\n");
}
r = amdgpu_uvd_resume(adev);
if (r)
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
@ -479,6 +475,10 @@ static int uvd_v7_0_sw_init(void *handle)
}
}
r = amdgpu_uvd_resume(adev);
if (r)
return r;
r = amdgpu_uvd_entity_init(adev);
if (r)
return r;

View file

@ -661,6 +661,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
bool is_patched = false;
unsigned long flags;
if (!kfd->init_complete)
return;
@ -670,7 +671,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
return;
}
spin_lock(&kfd->interrupt_lock);
spin_lock_irqsave(&kfd->interrupt_lock, flags);
if (kfd->interrupts_active
&& interrupt_is_wanted(kfd, ih_ring_entry,
@ -679,7 +680,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
is_patched ? patched_ihre : ih_ring_entry))
queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock(&kfd->interrupt_lock);
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}
int kgd2kfd_quiesce_mm(struct mm_struct *mm)

View file

@ -60,6 +60,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
return -EINVAL;
}
if (!stream_state) {
DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
return -EINVAL;
}
/* When enabling CRC, we should also disable dithering. */
if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
if (dc_stream_configure_crc(stream_state->ctx->dc,

View file

@ -1425,6 +1425,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
crtc->base.id, crtc->name);
}
if (old_state->fake_commit)
complete_all(&old_state->fake_commit->flip_done);
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);

View file

@ -334,13 +334,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
usnic_dbg("\n");
mutex_lock(&us_ibdev->usdev_lock);
if (ib_get_eth_speed(ibdev, port, &props->active_speed,
&props->active_width)) {
mutex_unlock(&us_ibdev->usdev_lock);
&props->active_width))
return -EINVAL;
}
/*
* usdev_lock is acquired after (and not before) ib_get_eth_speed call
* because acquiring rtnl_lock in ib_get_eth_speed, while holding
* usdev_lock could lead to a deadlock.
*/
mutex_lock(&us_ibdev->usdev_lock);
/* props being zeroed by the caller, avoid zeroing it here */
props->lid = 0;

View file

@ -640,6 +640,7 @@ next_wqe:
rmr->access = wqe->wr.wr.reg.access;
rmr->lkey = wqe->wr.wr.reg.key;
rmr->rkey = wqe->wr.wr.reg.key;
rmr->iova = wqe->wr.wr.reg.mr->iova;
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
} else {

View file

@ -49,7 +49,7 @@ struct convert_context {
struct bio *bio_out;
struct bvec_iter iter_in;
struct bvec_iter iter_out;
sector_t cc_sector;
u64 cc_sector;
atomic_t cc_pending;
union {
struct skcipher_request *req;
@ -81,7 +81,7 @@ struct dm_crypt_request {
struct convert_context *ctx;
struct scatterlist sg_in[4];
struct scatterlist sg_out[4];
sector_t iv_sector;
u64 iv_sector;
};
struct crypt_config;
@ -160,7 +160,7 @@ struct crypt_config {
struct iv_lmk_private lmk;
struct iv_tcw_private tcw;
} iv_gen_private;
sector_t iv_offset;
u64 iv_offset;
unsigned int iv_size;
unsigned short int sector_size;
unsigned char sector_shift;
@ -2789,7 +2789,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -EINVAL;
if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
ti->error = "Invalid device sector";
goto bad;
}

View file

@ -141,7 +141,7 @@ static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **a
unsigned long long tmpll;
char dummy;
if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
ti->error = "Invalid device sector";
return -EINVAL;
}

View file

@ -213,7 +213,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
devname = dm_shift_arg(&as);
r = -EINVAL;
if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
ti->error = "Invalid device sector";
goto bad;
}

View file

@ -56,15 +56,17 @@ struct dm_kcopyd_client {
atomic_t nr_jobs;
/*
* We maintain three lists of jobs:
* We maintain four lists of jobs:
*
* i) jobs waiting for pages
* ii) jobs that have pages, and are waiting for the io to be issued.
* iii) jobs that have completed.
* iii) jobs that don't need to do any IO and just run a callback
* iv) jobs that have completed.
*
* All three of these are protected by job_lock.
* All four of these are protected by job_lock.
*/
spinlock_t job_lock;
struct list_head callback_jobs;
struct list_head complete_jobs;
struct list_head io_jobs;
struct list_head pages_jobs;
@ -625,6 +627,7 @@ static void do_work(struct work_struct *work)
struct dm_kcopyd_client *kc = container_of(work,
struct dm_kcopyd_client, kcopyd_work);
struct blk_plug plug;
unsigned long flags;
/*
* The order that these are called is *very* important.
@ -633,6 +636,10 @@ static void do_work(struct work_struct *work)
* list. io jobs call wake when they complete and it all
* starts again.
*/
spin_lock_irqsave(&kc->job_lock, flags);
list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
spin_unlock_irqrestore(&kc->job_lock, flags);
blk_start_plug(&plug);
process_jobs(&kc->complete_jobs, kc, run_complete_job);
process_jobs(&kc->pages_jobs, kc, run_pages_job);
@ -650,7 +657,7 @@ static void dispatch_job(struct kcopyd_job *job)
struct dm_kcopyd_client *kc = job->kc;
atomic_inc(&kc->nr_jobs);
if (unlikely(!job->source.count))
push(&kc->complete_jobs, job);
push(&kc->callback_jobs, job);
else if (job->pages == &zero_page_list)
push(&kc->io_jobs, job);
else
@ -858,7 +865,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
job->read_err = read_err;
job->write_err = write_err;
push(&kc->complete_jobs, job);
push(&kc->callback_jobs, job);
wake(kc);
}
EXPORT_SYMBOL(dm_kcopyd_do_callback);
@ -888,6 +895,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
return ERR_PTR(-ENOMEM);
spin_lock_init(&kc->job_lock);
INIT_LIST_HEAD(&kc->callback_jobs);
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs);
@ -939,6 +947,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
/* Wait for completion of all jobs submitted by this client. */
wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
BUG_ON(!list_empty(&kc->callback_jobs));
BUG_ON(!list_empty(&kc->complete_jobs));
BUG_ON(!list_empty(&kc->io_jobs));
BUG_ON(!list_empty(&kc->pages_jobs));

View file

@ -45,7 +45,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -EINVAL;
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
ti->error = "Invalid device sector";
goto bad;
}

View file

@ -943,7 +943,8 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
char dummy;
int ret;
if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
offset != (sector_t)offset) {
ti->error = "Invalid offset";
return -EINVAL;
}

View file

@ -19,6 +19,7 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
#include <linux/semaphore.h>
#include "dm.h"
@ -105,6 +106,9 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
/* Maximum number of in-flight COW jobs. */
struct semaphore cow_count;
struct dm_kcopyd_client *kcopyd_client;
/* Wait for events based on state_bits */
@ -145,6 +149,19 @@ struct dm_snapshot {
#define RUNNING_MERGE 0
#define SHUTDOWN_MERGE 1
/*
* Maximum number of chunks being copied on write.
*
* The value was decided experimentally as a trade-off between memory
* consumption, stalling the kernel's workqueues and maintaining a high enough
* throughput.
*/
#define DEFAULT_COW_THRESHOLD 2048
static int cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
"A percentage of time allocated for copy on write");
@ -1190,6 +1207,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
r = PTR_ERR(s->kcopyd_client);
@ -1575,6 +1594,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
rb_link_node(&pe->out_of_order_node, parent, p);
rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
}
up(&s->cow_count);
}
/*
@ -1598,6 +1618,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
down(&s->cow_count);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@ -1617,6 +1638,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
down(&s->cow_count);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);

View file

@ -78,7 +78,7 @@ static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err;
}
if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) {
if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
ti->error = "Invalid striped device offset";
goto err;
}

View file

@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r)
return r->operand[7];
}
int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
unsigned int *len)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
struct avc_response_frame *r = (void *)fdtv->avc_data;
@ -1009,7 +1010,8 @@ out:
return ret;
}
int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
unsigned int *len)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
struct avc_response_frame *r = (void *)fdtv->avc_data;

View file

@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
struct dvb_diseqc_master_cmd *diseqcmd);
void avc_remote_ctrl_work(struct work_struct *work);
int avc_register_remote_control(struct firedtv *fdtv);
int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
unsigned int *len);
int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
unsigned int *len);
int avc_ca_reset(struct firedtv *fdtv);
int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);

View file

@ -264,6 +264,14 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
return ret;
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
if (!dev->dma_parms)
return -ENOMEM;
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
INIT_LIST_HEAD(&core->instances);
mutex_init(&core->lock);
INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);

View file

@ -1824,11 +1824,7 @@ static void uvc_delete(struct kref *kref)
usb_put_intf(dev->intf);
usb_put_dev(dev->udev);
if (dev->vdev.dev)
v4l2_device_unregister(&dev->vdev);
#ifdef CONFIG_MEDIA_CONTROLLER
if (media_devnode_is_registered(dev->mdev.devnode))
media_device_unregister(&dev->mdev);
media_device_cleanup(&dev->mdev);
#endif
@ -1885,6 +1881,15 @@ static void uvc_unregister_video(struct uvc_device *dev)
uvc_debugfs_cleanup_stream(stream);
}
uvc_status_unregister(dev);
if (dev->vdev.dev)
v4l2_device_unregister(&dev->vdev);
#ifdef CONFIG_MEDIA_CONTROLLER
if (media_devnode_is_registered(dev->mdev.devnode))
media_device_unregister(&dev->mdev);
#endif
}
int uvc_register_video_device(struct uvc_device *dev,

View file

@ -54,7 +54,7 @@ error:
return ret;
}
static void uvc_input_cleanup(struct uvc_device *dev)
static void uvc_input_unregister(struct uvc_device *dev)
{
if (dev->input)
input_unregister_device(dev->input);
@ -71,7 +71,7 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
#else
#define uvc_input_init(dev)
#define uvc_input_cleanup(dev)
#define uvc_input_unregister(dev)
#define uvc_input_report_key(dev, code, value)
#endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */
@ -292,12 +292,16 @@ int uvc_status_init(struct uvc_device *dev)
return 0;
}
void uvc_status_cleanup(struct uvc_device *dev)
void uvc_status_unregister(struct uvc_device *dev)
{
usb_kill_urb(dev->int_urb);
uvc_input_unregister(dev);
}
void uvc_status_cleanup(struct uvc_device *dev)
{
usb_free_urb(dev->int_urb);
kfree(dev->status);
uvc_input_cleanup(dev);
}
int uvc_status_start(struct uvc_device *dev, gfp_t flags)

View file

@ -750,6 +750,7 @@ int uvc_register_video_device(struct uvc_device *dev,
/* Status */
int uvc_status_init(struct uvc_device *dev);
void uvc_status_unregister(struct uvc_device *dev);
void uvc_status_cleanup(struct uvc_device *dev);
int uvc_status_start(struct uvc_device *dev, gfp_t flags);
void uvc_status_stop(struct uvc_device *dev);

View file

@ -1954,13 +1954,14 @@ static void atmci_tasklet_func(unsigned long priv)
}
atmci_request_end(host, host->mrq);
state = STATE_IDLE;
goto unlock; /* atmci_request_end() sets host->state */
break;
}
} while (state != prev_state);
host->state = state;
unlock:
spin_unlock(&host->lock);
}

View file

@ -2391,6 +2391,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_stats_clear(chip);
}
/* The mv88e6390 has some hidden registers used for debug and
* development. The errata also makes use of them.
*/
static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
int reg, u16 val)
{
u16 ctrl;
int err;
err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
PORT_RESERVED_1A, val);
if (err)
return err;
ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
reg;
return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
PORT_RESERVED_1A, ctrl);
}
static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
}
static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
int reg, u16 *val)
{
u16 ctrl;
int err;
ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
reg;
err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
PORT_RESERVED_1A, ctrl);
if (err)
return err;
err = mv88e6390_hidden_wait(chip);
if (err)
return err;
return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
PORT_RESERVED_1A, val);
}
/* Check if the errata has already been applied. */
static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
{
int port;
int err;
u16 val;
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
err = mv88e6390_hidden_read(chip, port, 0, &val);
if (err) {
dev_err(chip->dev,
"Error reading hidden register: %d\n", err);
return false;
}
if (val != 0x01c0)
return false;
}
return true;
}
/* The 6390 copper ports have an errata which require poking magic
* values into undocumented hidden registers and then performing a
* software reset.
*/
static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
{
int port;
int err;
if (mv88e6390_setup_errata_applied(chip))
return 0;
/* Set the ports into blocking mode */
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
if (err)
return err;
}
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
if (err)
return err;
}
return mv88e6xxx_software_reset(chip);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
@ -2403,6 +2504,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
mutex_lock(&chip->reg_lock);
if (chip->info->ops->setup_errata) {
err = chip->info->ops->setup_errata(chip);
if (err)
goto unlock;
}
/* Cache the cmode of each port. */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (chip->info->ops->port_get_cmode) {
@ -3201,6 +3308,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
static const struct mv88e6xxx_ops mv88e6190_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@ -3243,6 +3351,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
static const struct mv88e6xxx_ops mv88e6190x_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@ -3285,6 +3394,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
static const struct mv88e6xxx_ops mv88e6191_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@ -3374,6 +3484,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@ -3675,6 +3786,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
static const struct mv88e6xxx_ops mv88e6390_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@ -3722,6 +3834,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
static const struct mv88e6xxx_ops mv88e6390x_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,

View file

@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus {
};
struct mv88e6xxx_ops {
/* Switch Setup Errata, called early in the switch setup to
* allow any errata actions to be performed
*/
int (*setup_errata)(struct mv88e6xxx_chip *chip);
int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
int (*ip_pri_map)(struct mv88e6xxx_chip *chip);

View file

@ -251,6 +251,16 @@
/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
/* Offset 0x1a: Magic undocumented errata register */
#define PORT_RESERVED_1A 0x1a
#define PORT_RESERVED_1A_BUSY BIT(15)
#define PORT_RESERVED_1A_WRITE BIT(14)
#define PORT_RESERVED_1A_READ 0
#define PORT_RESERVED_1A_PORT_SHIFT 5
#define PORT_RESERVED_1A_BLOCK (0xf << 10)
#define PORT_RESERVED_1A_CTRL_PORT 4
#define PORT_RESERVED_1A_DATA_PORT 5
int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
u16 *val);
int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,

View file

@ -173,10 +173,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
u64 ns;
u64 cycles, ns;
spin_lock_irqsave(&adapter->systim_lock, flags);
ns = timecounter_read(&adapter->tc);
/* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
cycles = adapter->cc.read(&adapter->cc);
ns = timecounter_cyc2time(&adapter->tc, cycles);
spin_unlock_irqrestore(&adapter->systim_lock, flags);
*ts = ns_to_timespec64(ns);
@ -232,9 +236,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
struct timespec64 ts;
u64 ns;
adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
/* Update the timecounter */
ns = timecounter_read(&adapter->tc);
ts = ns_to_timespec64(ns);
e_dbg("SYSTIM overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);

View file

@ -4,6 +4,7 @@
#include "ixgbe.h"
#include <net/xfrm.h>
#include <crypto/aead.h>
#include <linux/if_bridge.h>
/**
* ixgbe_ipsec_set_tx_sa - set the Tx SA registers
@ -676,7 +677,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
} else {
struct tx_sa tsa;
if (adapter->num_vfs)
if (adapter->num_vfs &&
adapter->bridge_mode != BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
/* find the first unused index */

View file

@ -4635,12 +4635,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
lower_dev,
upper_dev);
} else if (netif_is_lag_master(upper_dev)) {
if (info->linking)
if (info->linking) {
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
upper_dev);
else
} else {
mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
false);
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
upper_dev);
}
} else if (netif_is_ovs_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);

View file

@ -1761,7 +1761,7 @@ static void
mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
{
u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);

View file

@ -214,6 +214,8 @@ enum cfg_version {
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
{ PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },

View file

@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev)
dev->addr_len = 0;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->netdev_ops = &qmimux_netdev_ops;
dev->mtu = 1500;
dev->needs_free_netdev = true;
}

View file

@ -71,7 +71,7 @@ void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer)
if (!peer || !peer->sta)
goto out;
arsta = (struct ath10k_sta *)peer->sta->drv_priv;

View file

@ -2589,7 +2589,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
rcu_read_lock();
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
if (!peer || !peer->sta) {
ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
peer_id);
goto out;
@ -2642,7 +2642,7 @@ static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
rcu_read_lock();
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
if (!peer || !peer->sta) {
ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
peer_id);
goto out;

View file

@ -2938,7 +2938,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
false);
ret = iwl_mvm_update_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
@ -2954,7 +2955,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
/* enable beacon filtering */
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
true);
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&

View file

@ -1685,7 +1685,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#endif /* CONFIG_IWLWIFI_DEBUGFS */
/* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
void rs_update_last_rssi(struct iwl_mvm *mvm,

View file

@ -1280,7 +1280,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
(unsigned long)(lq_sta->last_tx +
(IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
iwl_mvm_rs_rate_init(mvm, sta, info->band);
iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
return;
}
lq_sta->last_tx = jiffies;
@ -2870,9 +2870,8 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
static void rs_initialize_lq(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
enum nl80211_band band)
enum nl80211_band band, bool update)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_scale_tbl_info *tbl;
struct rs_rate *rate;
u8 active_tbl = 0;
@ -2901,8 +2900,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
/* TODO restore station should remember the lq cmd */
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq,
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED);
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
}
static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
@ -3155,7 +3153,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
* Called after adding a new station to initialize rate scaling
*/
static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band)
enum nl80211_band band, bool update)
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
@ -3235,7 +3233,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_reset_frame_stats(mvm);
#endif
rs_initialize_lq(mvm, sta, lq_sta, band);
rs_initialize_lq(mvm, sta, lq_sta, band, update);
}
static void rs_drv_rate_update(void *mvm_r,
@ -3255,7 +3253,7 @@ static void rs_drv_rate_update(void *mvm_r,
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
ieee80211_stop_tx_ba_session(sta, tid);
iwl_mvm_rs_rate_init(mvm, sta, sband->band);
iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
}
#ifdef CONFIG_MAC80211_DEBUGFS
@ -4112,12 +4110,12 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
};
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band)
enum nl80211_band band, bool update)
{
if (iwl_mvm_has_tlc_offload(mvm))
rs_fw_rate_init(mvm, sta, band);
else
rs_drv_rate_init(mvm, sta, band);
rs_drv_rate_init(mvm, sta, band, update);
}
int iwl_mvm_rate_control_register(void)

View file

@ -420,7 +420,7 @@ struct iwl_lq_sta {
/* Initialize station's rate scaling information after adding station */
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band);
enum nl80211_band band, bool init);
/* Notify RS about Tx status */
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,

View file

@ -900,20 +900,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
* after station has been added.
* @sync: This command can be sent synchronously.
*
* The link quality command is sent as the last step of station creation.
* This is the special case in which init is set and we call a callback in
* this case to clear the state indicating that station creation is in
* progress.
*/
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
{
struct iwl_host_cmd cmd = {
.id = LQ_CMD,
.len = { sizeof(struct iwl_lq_cmd), },
.flags = init ? 0 : CMD_ASYNC,
.flags = sync ? 0 : CMD_ASYNC,
.data = { lq, },
};

View file

@ -378,7 +378,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
if (ret)
return ret;
return build_changeset_next_level(ovcs, tchild, node);
ret = build_changeset_next_level(ovcs, tchild, node);
of_node_put(tchild);
return ret;
}
if (node->phandle && tchild->phandle)

View file

@ -2231,7 +2231,8 @@ static int asus_wmi_add(struct platform_device *pdev)
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
goto fail_backlight;
}
} else
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
status = wmi_install_notify_handler(asus->driver->event_guid,
asus_wmi_notify, asus);

View file

@ -1266,7 +1266,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, drv_map);
if (ld >= MAX_LOGICAL_DRIVES_EXT) {
if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
lbInfo[ldCount].loadBalanceFlag = 0;
continue;
}

View file

@ -2832,7 +2832,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
device_id < instance->fw_supported_vd_count)) {
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
if (ld >= instance->fw_supported_vd_count)
if (ld >= instance->fw_supported_vd_count - 1)
fp_possible = 0;
else {
raid = MR_LdRaidGet(ld, local_map_ptr);

View file

@ -3344,8 +3344,9 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
static inline void
_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
{
wmb();
__raw_writeq(b, addr);
mmiowb();
barrier();
}
#else
static inline void

View file

@ -952,6 +952,9 @@ static int qedi_find_boot_info(struct qedi_ctx *qedi,
cls_sess = iscsi_conn_to_session(cls_conn);
sess = cls_sess->dd_data;
if (!iscsi_is_session_online(cls_sess))
continue;
if (pri_ctrl_flags) {
if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
!strcmp(pri_tgt->ip_addr, ep_ip_addr)) {

View file

@ -2720,6 +2720,9 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
switch (response->header.iu_type) {
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
if (io_request->scmd)
io_request->scmd->result = 0;
/* fall through */
case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
break;
case PQI_RESPONSE_IU_TASK_MANAGEMENT:
@ -6686,6 +6689,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
* storage.
*/
rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
pqi_free_interrupts(ctrl_info);
pqi_reset(ctrl_info);
if (rc == 0)
return;

View file

@ -724,13 +724,18 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
bool background = tagptr_unfold_tags(t);
if (atomic_add_return(bios, &io->pending_bios))
return;
if (!background) {
unsigned long flags;
if (background)
spin_lock_irqsave(&io->u.wait.lock, flags);
if (!atomic_add_return(bios, &io->pending_bios))
wake_up_locked(&io->u.wait);
spin_unlock_irqrestore(&io->u.wait.lock, flags);
return;
}
if (!atomic_add_return(bios, &io->pending_bios))
queue_work(z_erofs_workqueue, &io->u.work);
else
wake_up(&io->u.wait);
}
static inline void z_erofs_vle_read_endio(struct bio *bio)

View file

@ -108,12 +108,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
buf[7] = 0x2; /* CmdQue=1 */
memcpy(&buf[8], "LIO-ORG ", 8);
memset(&buf[16], 0x20, 16);
/*
* ASCII data fields described as being left-aligned shall have any
* unused bytes at the end of the field (i.e., highest offset) and the
* unused bytes shall be filled with ASCII space characters (20h).
*/
memset(&buf[8], 0x20, 8 + 16 + 4);
memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1);
memcpy(&buf[16], dev->t10_wwn.model,
min_t(size_t, strlen(dev->t10_wwn.model), 16));
strnlen(dev->t10_wwn.model, 16));
memcpy(&buf[32], dev->t10_wwn.revision,
min_t(size_t, strlen(dev->t10_wwn.revision), 4));
strnlen(dev->t10_wwn.revision, 4));
buf[4] = 31; /* Set additional length to 31 */
return 0;
@ -251,7 +256,9 @@ check_t10_vend_desc:
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
buf[off+2] = 0x0;
memcpy(&buf[off+4], "LIO-ORG", 8);
/* left align Vendor ID and pad with spaces */
memset(&buf[off+4], 0x20, 8);
memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1);
/* Extra Byte for NULL Terminator */
id_len++;
/* Identifier Length */

View file

@ -224,19 +224,28 @@ void transport_subsystem_check_init(void)
sub_api_initialized = 1;
}
static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
{
struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
wake_up(&sess->cmd_list_wq);
}
/**
* transport_init_session - initialize a session object
* @se_sess: Session object pointer.
*
* The caller must have zero-initialized @se_sess before calling this function.
*/
void transport_init_session(struct se_session *se_sess)
int transport_init_session(struct se_session *se_sess)
{
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
spin_lock_init(&se_sess->sess_cmd_lock);
init_waitqueue_head(&se_sess->cmd_list_wq);
return percpu_ref_init(&se_sess->cmd_count,
target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
}
EXPORT_SYMBOL(transport_init_session);
@ -247,6 +256,7 @@ EXPORT_SYMBOL(transport_init_session);
struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
int ret;
se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
if (!se_sess) {
@ -254,7 +264,11 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
transport_init_session(se_sess);
ret = transport_init_session(se_sess);
if (ret < 0) {
kfree(se_sess);
return ERR_PTR(ret);
}
se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
@ -581,6 +595,7 @@ void transport_free_session(struct se_session *se_sess)
sbitmap_queue_free(&se_sess->sess_tag_pool);
kvfree(se_sess->sess_cmd_map);
}
percpu_ref_exit(&se_sess->cmd_count);
kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);
@ -2724,6 +2739,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
}
se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
percpu_ref_get(&se_sess->cmd_count);
out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@ -2754,8 +2770,6 @@ static void target_release_cmd_kref(struct kref *kref)
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_del_init(&se_cmd->se_cmd_list);
if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
wake_up(&se_sess->cmd_list_wq);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
@ -2763,6 +2777,8 @@ static void target_release_cmd_kref(struct kref *kref)
se_cmd->se_tfo->release_cmd(se_cmd);
if (compl)
complete(compl);
percpu_ref_put(&se_sess->cmd_count);
}
/**
@ -2891,6 +2907,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
se_sess->sess_tearing_down = 1;
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
percpu_ref_kill(&se_sess->cmd_count);
}
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
@ -2905,17 +2923,14 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
WARN_ON_ONCE(!se_sess->sess_tearing_down);
spin_lock_irq(&se_sess->sess_cmd_lock);
do {
ret = wait_event_lock_irq_timeout(
se_sess->cmd_list_wq,
list_empty(&se_sess->sess_cmd_list),
se_sess->sess_cmd_lock, 180 * HZ);
ret = wait_event_timeout(se_sess->cmd_list_wq,
percpu_ref_is_zero(&se_sess->cmd_count),
180 * HZ);
list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
target_show_cmd("session shutdown: still waiting for ",
cmd);
} while (ret <= 0);
spin_unlock_irq(&se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);

View file

@ -480,6 +480,8 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
int target_xcopy_setup_pt(void)
{
int ret;
xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
if (!xcopy_wq) {
pr_err("Unable to allocate xcopy_wq\n");
@ -497,7 +499,9 @@ int target_xcopy_setup_pt(void)
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
transport_init_session(&xcopy_pt_sess);
ret = transport_init_session(&xcopy_pt_sess);
if (ret < 0)
return ret;
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;

View file

@ -2780,6 +2780,7 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
.name = "sbsa-uart",
.of_match_table = of_match_ptr(sbsa_uart_of_match),
.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
},
};
@ -2808,6 +2809,7 @@ static struct amba_driver pl011_driver = {
.drv = {
.name = "uart-pl011",
.pm = &pl011_dev_pm_ops,
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
},
.id_table = pl011_ids,
.probe = pl011_probe,

View file

@ -919,6 +919,7 @@ static struct platform_driver pic32_uart_platform_driver = {
.driver = {
.name = PIC32_DEV_NAME,
.of_match_table = of_match_ptr(pic32_serial_dt_ids),
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_PIC32),
},
};

View file

@ -205,10 +205,15 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
if (!state->xmit.buf) {
state->xmit.buf = (unsigned char *) page;
uart_circ_clear(&state->xmit);
uart_port_unlock(uport, flags);
} else {
uart_port_unlock(uport, flags);
/*
* Do not free() the page under the port lock, see
* uart_shutdown().
*/
free_page(page);
}
uart_port_unlock(uport, flags);
retval = uport->ops->startup(uport);
if (retval == 0) {
@ -268,6 +273,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
struct uart_port *uport = uart_port_check(state);
struct tty_port *port = &state->port;
unsigned long flags = 0;
char *xmit_buf = NULL;
/*
* Set the TTY IO error marker
@ -298,14 +304,18 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
tty_port_set_suspended(port, 0);
/*
* Free the transmit buffer page.
* Do not free() the transmit buffer page under the port lock since
* this can create various circular locking scenarios. For instance,
* console driver may need to allocate/free a debug object, which
* can endup in printk() recursion.
*/
uart_port_lock(state, flags);
if (state->xmit.buf) {
free_page((unsigned long)state->xmit.buf);
xmit_buf = state->xmit.buf;
state->xmit.buf = NULL;
}
uart_port_unlock(uport, flags);
if (xmit_buf)
free_page((unsigned long)xmit_buf);
}
/**

View file

@ -1608,6 +1608,7 @@ static struct platform_driver cdns_uart_platform_driver = {
.name = CDNS_UART_NAME,
.of_match_table = cdns_uart_of_match,
.pm = &cdns_uart_dev_pm_ops,
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_XILINX_PS_UART),
},
};

View file

@ -358,6 +358,7 @@ struct renesas_usb3 {
bool extcon_host; /* check id and set EXTCON_USB_HOST */
bool extcon_usb; /* check vbus and set EXTCON_USB */
bool forced_b_device;
bool start_to_connect;
};
#define gadget_to_renesas_usb3(_gadget) \
@ -476,6 +477,7 @@ static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
{
usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
if (!usb3->workaround_for_vbus)
usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
}
@ -700,8 +702,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
usb3_set_mode_by_role_sw(usb3, host);
usb3_vbus_out(usb3, a_dev);
/* for A-Peripheral or forced B-device mode */
if ((!host && a_dev) ||
(usb3->workaround_for_vbus && usb3->forced_b_device))
if ((!host && a_dev) || usb3->start_to_connect)
usb3_connect(usb3);
spin_unlock_irqrestore(&usb3->lock, flags);
}
@ -2432,7 +2433,11 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (!strncmp(buf, "1", 1))
usb3->start_to_connect = false;
if (usb3->workaround_for_vbus && usb3->forced_b_device &&
!strncmp(buf, "2", 1))
usb3->start_to_connect = true;
else if (!strncmp(buf, "1", 1))
usb3->forced_b_device = true;
else
usb3->forced_b_device = false;
@ -2440,7 +2445,7 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
if (usb3->workaround_for_vbus)
usb3_disconnect(usb3);
/* Let this driver call usb3_connect() anyway */
/* Let this driver call usb3_connect() if needed */
usb3_check_id(usb3);
return count;

View file

@ -317,6 +317,9 @@ struct tcpm_port {
/* Deadline in jiffies to exit src_try_wait state */
unsigned long max_wait;
/* port belongs to a self powered device */
bool self_powered;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@ -3257,7 +3260,8 @@ static void run_state_machine(struct tcpm_port *port)
case SRC_HARD_RESET_VBUS_OFF:
tcpm_set_vconn(port, true);
tcpm_set_vbus(port, false);
tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
TYPEC_HOST);
tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
break;
case SRC_HARD_RESET_VBUS_ON:
@ -3270,7 +3274,8 @@ static void run_state_machine(struct tcpm_port *port)
memset(&port->pps_data, 0, sizeof(port->pps_data));
tcpm_set_vconn(port, false);
tcpm_set_charge(port, false);
tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
TYPEC_DEVICE);
/*
* VBUS may or may not toggle, depending on the adapter.
* If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
@ -4415,6 +4420,8 @@ sink:
return -EINVAL;
port->operating_snk_mw = mw / 1000;
port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
return 0;
}
@ -4723,6 +4730,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
port->typec_caps.prefer_role = tcfg->default_role;
port->typec_caps.type = tcfg->type;
port->typec_caps.data = tcfg->data;
port->self_powered = port->tcpc->config->self_powered;
return 0;
}

View file

@ -800,20 +800,36 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
btrfs_dev_replace_write_unlock(dev_replace);
goto leave;
break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
tgt_device = dev_replace->tgtdev;
src_device = dev_replace->srcdev;
btrfs_dev_replace_write_unlock(dev_replace);
btrfs_scrub_cancel(fs_info);
/* btrfs_dev_replace_finishing() will handle the cleanup part */
btrfs_info_in_rcu(fs_info,
"dev_replace from %s (devid %llu) to %s canceled",
btrfs_dev_name(src_device), src_device->devid,
btrfs_dev_name(tgt_device));
break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
/*
* Scrub doing the replace isn't running so we need to do the
* cleanup step of btrfs_dev_replace_finishing() here
*/
result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
tgt_device = dev_replace->tgtdev;
src_device = dev_replace->srcdev;
dev_replace->tgtdev = NULL;
dev_replace->srcdev = NULL;
break;
}
dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
dev_replace->replace_state =
BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
dev_replace->time_stopped = ktime_get_real_seconds();
dev_replace->item_needs_writeback = 1;
btrfs_dev_replace_write_unlock(dev_replace);
btrfs_scrub_cancel(fs_info);
trans = btrfs_start_transaction(root, 0);
@ -825,14 +841,17 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
WARN_ON(ret);
btrfs_info_in_rcu(fs_info,
"dev_replace from %s (devid %llu) to %s canceled",
"suspended dev_replace from %s (devid %llu) to %s canceled",
btrfs_dev_name(src_device), src_device->devid,
btrfs_dev_name(tgt_device));
if (tgt_device)
btrfs_destroy_dev_replace_tgtdev(tgt_device);
break;
default:
result = -EINVAL;
}
leave:
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return result;
}

View file

@ -6440,14 +6440,19 @@ fail_dir_item:
err = btrfs_del_root_ref(trans, key.objectid,
root->root_key.objectid, parent_ino,
&local_index, name, name_len);
if (err)
btrfs_abort_transaction(trans, err);
} else if (add_backref) {
u64 local_index;
int err;
err = btrfs_del_inode_ref(trans, root, name, name_len,
ino, parent_ino, &local_index);
if (err)
btrfs_abort_transaction(trans, err);
}
/* Return the original error code */
return ret;
}

View file

@ -4768,19 +4768,17 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
/*
* Use the number of data stripes to figure out how big this chunk
* is really going to be in terms of logical address space,
* and compare that answer with the max chunk size
* and compare that answer with the max chunk size. If it's higher,
* we try to reduce stripe_size.
*/
if (stripe_size * data_stripes > max_chunk_size) {
stripe_size = div_u64(max_chunk_size, data_stripes);
/* bump the answer up to a 16MB boundary */
stripe_size = round_up(stripe_size, SZ_16M);
/*
* But don't go higher than the limits we found while searching
* for free extents
* Reduce stripe_size, round it up to a 16MB boundary again and
* then use it, unless it ends up being even bigger than the
* previous value we had already.
*/
stripe_size = min(devices_info[ndevs - 1].max_avail,
stripe_size = min(round_up(div_u64(max_chunk_size,
data_stripes), SZ_16M),
stripe_size);
}
@ -7474,6 +7472,8 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
struct btrfs_path *path;
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
u64 prev_devid = 0;
u64 prev_dev_ext_end = 0;
int ret = 0;
key.objectid = 1;
@ -7518,10 +7518,22 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
physical_len = btrfs_dev_extent_length(leaf, dext);
/* Check if this dev extent overlaps with the previous one */
if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
devid, physical_offset, prev_dev_ext_end);
ret = -EUCLEAN;
goto out;
}
ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
physical_offset, physical_len);
if (ret < 0)
goto out;
prev_devid = devid;
prev_dev_ext_end = physical_offset + physical_len;
ret = btrfs_next_item(root, path);
if (ret < 0)
goto out;

View file

@ -488,16 +488,29 @@ done:
}
EXPORT_SYMBOL_GPL(iomap_readpages);
/*
* iomap_is_partially_uptodate checks whether blocks within a page are
* uptodate or not.
*
* Returns true if all blocks which correspond to a file portion
* we want to read within the page are uptodate.
*/
int
iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count)
{
struct iomap_page *iop = to_iomap_page(page);
struct inode *inode = page->mapping->host;
unsigned first = from >> inode->i_blkbits;
unsigned last = (from + count - 1) >> inode->i_blkbits;
unsigned len, first, last;
unsigned i;
/* Limit range to one page */
len = min_t(unsigned, PAGE_SIZE - from, count);
/* First and last blocks in range within page */
first = from >> inode->i_blkbits;
last = (from + len - 1) >> inode->i_blkbits;
if (iop) {
for (i = first; i <= last; i++)
if (!test_bit(i, iop->uptodate))

View file

@ -101,6 +101,7 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
if (jffs2_is_writebuffered(c))
cancel_delayed_work_sync(&c->wbuf_dwork);
#endif

View file

@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
if (num_used
|| alloc->id1.bitmap1.i_used
|| alloc->id1.bitmap1.i_total
|| la->la_bm_off)
mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
|| la->la_bm_off) {
mlog(ML_ERROR, "inconsistent detected, clean journal with"
" unrecovered local alloc, please run fsck.ocfs2!\n"
"found = %u, set = %u, taken = %u, off = %u\n",
num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
le32_to_cpu(alloc->id1.bitmap1.i_total),
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
status = -EINVAL;
goto bail;
}
osb->local_alloc_bh = alloc_bh;
osb->local_alloc_state = OCFS2_LA_ENABLED;

View file

@ -496,6 +496,11 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
sig ^= PERSISTENT_RAM_SIG;
if (prz->buffer->sig == sig) {
if (buffer_size(prz) == 0) {
pr_debug("found existing empty buffer\n");
return 0;
}
if (buffer_size(prz) > prz->buffer_size ||
buffer_start(prz) > buffer_size(prz))
pr_info("found existing invalid buffer, size %zu, start %zu\n",

View file

@ -791,7 +791,8 @@ static int quotactl_cmd_write(int cmd)
/* Return true if quotactl command is manipulating quota on/off state */
static bool quotactl_cmd_onoff(int cmd)
{
return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF);
return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) ||
(cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF);
}
/*

View file

@ -736,10 +736,18 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
struct userfaultfd_ctx *ctx;
ctx = vma->vm_userfaultfd_ctx.ctx;
if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
if (!ctx)
return;
if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
vm_ctx->ctx = ctx;
userfaultfd_ctx_get(ctx);
WRITE_ONCE(ctx->mmap_changing, true);
} else {
/* Drop uffd context if remap feature not enabled */
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
}
}

View file

@ -258,6 +258,14 @@ static inline void wb_get(struct bdi_writeback *wb)
*/
static inline void wb_put(struct bdi_writeback *wb)
{
if (WARN_ON_ONCE(!wb->bdi)) {
/*
* A driver bug might cause a file to be removed before bdi was
* initialized.
*/
return;
}
if (wb != &wb->bdi->wb)
percpu_ref_put(&wb->refcnt);
}

View file

@ -665,24 +665,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size)
return size;
}
static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
u32 size_default)
{
size_default = bpf_ctx_off_adjust_machine(size_default);
size_access = bpf_ctx_off_adjust_machine(size_access);
#ifdef __LITTLE_ENDIAN
return (off & (size_default - 1)) == 0;
#else
return (off & (size_default - 1)) + size_access == size_default;
#endif
}
static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
return bpf_ctx_narrow_align_ok(off, size, size_default) &&
size <= size_default && (size & (size - 1)) == 0;
return size <= size_default && (size & (size - 1)) == 0;
}
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))

View file

@ -232,7 +232,6 @@ struct swap_info_struct {
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
@ -273,6 +272,16 @@ struct swap_info_struct {
*/
struct work_struct discard_work; /* discard worker */
struct swap_cluster_list discard_clusters; /* discard clusters list */
struct plist_node avail_lists[0]; /*
* entries in swap_avail_heads, one
* entry per node.
* Must be last as the number of the
* array is nr_node_ids, which is not
* a fixed value so have to allocate
* dynamically.
* And it has to be an array so that
* plist_for_each_* can work.
*/
};
#ifdef CONFIG_64BIT

View file

@ -89,6 +89,7 @@ struct tcpc_config {
enum typec_port_data data;
enum typec_role default_role;
bool try_role_hw; /* try.{src,snk} implemented in hardware */
bool self_powered; /* port belongs to a self powered device */
const struct typec_altmode_desc *alt_modes;
};

Some files were not shown because too many files have changed in this diff Show more