This is the 4.19.284 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmR14SUACgkQONu9yGCS
 aT5twA/9FzPtWeYCa9WdaW3YrlwXAwUSX+Q749XupcGbrXS1cljiB7XzvSQ48Ce9
 FrI+b4UNRmq1sjBq39GMVTCpVcis1PhI5uthvP/eNIazFvAb8Ksidsv10cGKtmi6
 dhe8+z6InAut46npKt+YHLTrgu+rkQ7nWk5thT52JLD2VsTf/AwNvy2wDVrtpwND
 XldYW/jP6GErmPXVdy2nBzP5kFKWpd6DIVrnKrP0g+G1UF6mV1mg2Bt9aoMyWenK
 TU9cv+FwAr40EmPSn6ooJbo0oOgJrkOidaoJEIgzOw4MWv/lNd6dijuKlkfKg56s
 elIa+TAlQBkkfXWNDSg8RCT0Im6iw+qVMmuIUvn4Y6zyFhQS2kBPZHavOHrIdYK3
 HKkEjl1l24z/k7HSkPVS+FR7YxF9EeQunJBJjA3NGLx4woFVoqCgCp5C5cAyC1D1
 lVE8lAPq/R5oIPgsL7WwYCdwvlnoA4R8HFmS/53ySRxQ839A0Ea1vQB96ISPdoGk
 AxU1DarM+BxLQbYVaW+HmDctox0wlhV9pmlSmRNzGDno0OsME9e7grUSxBC96ogf
 GFFYs2zTKE8y9/1LzBQSrJdXHjofOrupIEVHHcb8Bit6tuQ/hAIYl4erzIrJUc1e
 c0OuIcevfYbyUFYzYnWFkPWo0akRWcAIUKba5rzQV4lPpCGOfJc=
 =0ViV
 -----END PGP SIGNATURE-----

Merge 4.19.284 into android-4.19-stable

Changes in 4.19.284
	net: Fix load-tearing on sk->sk_stamp in sock_recv_cmsgs().
	netlink: annotate accesses to nlk->cb_running
	net: annotate sk->sk_err write from do_recvmmsg()
	tcp: reduce POLLOUT events caused by TCP_NOTSENT_LOWAT
	tcp: return EPOLLOUT from tcp_poll only when notsent_bytes is half the limit
	tcp: factor out __tcp_close() helper
	tcp: add annotations around sk->sk_shutdown accesses
	ipvlan:Fix out-of-bounds caused by unclear skb->cb
	net: datagram: fix data-races in datagram_poll()
	af_unix: Fix a data race of sk->sk_receive_queue->qlen.
	af_unix: Fix data races around sk->sk_shutdown.
	fs: hfsplus: remove WARN_ON() from hfsplus_cat_{read,write}_inode()
	drm/amd/display: Use DC_LOG_DC in the trasform pixel function
	regmap: cache: Return error in cache sync operations for REGCACHE_NONE
	memstick: r592: Fix UAF bug in r592_remove due to race condition
	firmware: arm_sdei: Fix sleep from invalid context BUG
	ACPI: EC: Fix oops when removing custom query handlers
	drm/tegra: Avoid potential 32-bit integer overflow
	ACPICA: Avoid undefined behavior: applying zero offset to null pointer
	ACPICA: ACPICA: check null return of ACPI_ALLOCATE_ZEROED in acpi_db_display_objects
	wifi: brcmfmac: cfg80211: Pass the PMK in binary instead of hex
	ext2: Check block size validity during mount
	net: pasemi: Fix return type of pasemi_mac_start_tx()
	net: Catch invalid index in XPS mapping
	lib: cpu_rmap: Avoid use after free on rmap->obj array entries
	scsi: message: mptlan: Fix use after free bug in mptlan_remove() due to race condition
	gfs2: Fix inode height consistency check
	ext4: set goal start correctly in ext4_mb_normalize_request
	ext4: Fix best extent lstart adjustment logic in ext4_mb_new_inode_pa()
	f2fs: fix to drop all dirty pages during umount() if cp_error is set
	wifi: iwlwifi: dvm: Fix memcpy: detected field-spanning write backtrace
	Bluetooth: L2CAP: fix "bad unlock balance" in l2cap_disconnect_rsp
	staging: rtl8192e: Replace macro RTL_PCI_DEVICE with PCI_DEVICE
	HID: logitech-hidpp: Don't use the USB serial for USB devices
	HID: logitech-hidpp: Reconcile USB and Unifying serials
	spi: spi-imx: fix MX51_ECSPI_* macros when cs > 3
	HID: wacom: generic: Set battery quirk only when we see battery data
	usb: typec: tcpm: fix multiple times discover svids error
	serial: 8250: Reinit port->pm on port specific driver unbind
	mcb-pci: Reallocate memory region to avoid memory overlapping
	sched: Fix KCSAN noinstr violation
	recordmcount: Fix memory leaks in the uwrite function
	clk: tegra20: fix gcc-7 constant overflow warning
	Input: xpad - add constants for GIP interface numbers
	phy: st: miphy28lp: use _poll_timeout functions for waits
	mfd: dln2: Fix memory leak in dln2_probe()
	btrfs: replace calls to btrfs_find_free_ino with btrfs_find_free_objectid
	btrfs: fix space cache inconsistency after error loading it from disk
	cpupower: Make TSC read per CPU for Mperf monitor
	af_key: Reject optional tunnel/BEET mode templates in outbound policies
	net: fec: Better handle pm_runtime_get() failing in .remove()
	vsock: avoid to close connected socket after the timeout
	drivers: provide devm_platform_ioremap_resource()
	serial: arc_uart: fix of_iomap leak in `arc_serial_probe`
	ip6_gre: Fix skb_under_panic in __gre6_xmit()
	ip6_gre: Make o_seqno start from 0 in native mode
	ip_gre, ip6_gre: Fix race condition on o_seqno in collect_md mode
	erspan: get the proto with the md version for collect_md
	media: netup_unidvb: fix use-after-free at del_timer()
	drm/exynos: fix g2d_open/close helper function definitions
	net: nsh: Use correct mac_offset to unwind gso skb in nsh_gso_segment()
	net: bcmgenet: Remove phy_stop() from bcmgenet_netif_stop()
	net: bcmgenet: Restore phy_stop() depending upon suspend/close
	cassini: Fix a memory leak in the error handling path of cas_init_one()
	igb: fix bit_shift to be in [1..8] range
	vlan: fix a potential uninit-value in vlan_dev_hard_start_xmit()
	usb-storage: fix deadlock when a scsi command timeouts more than once
	usb: typec: altmodes/displayport: fix pin_assignment_show
	ALSA: hda: Fix Oops by 9.1 surround channel names
	ALSA: hda: Add NVIDIA codec IDs a3 through a7 to patch table
	statfs: enforce statfs[64] structure initialization
	serial: Add support for Advantech PCI-1611U card
	ceph: force updating the msg pointer in non-split case
	tpm/tpm_tis: Disable interrupts for more Lenovo devices
	nilfs2: fix use-after-free bug of nilfs_root in nilfs_evict_inode()
	netfilter: nftables: add nft_parse_register_load() and use it
	netfilter: nftables: add nft_parse_register_store() and use it
	netfilter: nftables: statify nft_parse_register()
	netfilter: nf_tables: validate registers coming from userspace.
	netfilter: nf_tables: add nft_setelem_parse_key()
	netfilter: nf_tables: allow up to 64 bytes in the set element data area
	netfilter: nf_tables: stricter validation of element data
	netfilter: nf_tables: validate NFTA_SET_ELEM_OBJREF based on NFT_SET_OBJECT flag
	netfilter: nf_tables: do not allow RULE_ID to refer to another chain
	HID: wacom: Force pen out of prox if no events have been received in a while
	Add Acer Aspire Ethos 8951G model quirk
	ALSA: hda/realtek - More constifications
	ALSA: hda/realtek - Add Headset Mic supported for HP cPC
	ALSA: hda/realtek - Enable headset mic of Acer X2660G with ALC662
	ALSA: hda/realtek - Enable the headset of Acer N50-600 with ALC662
	ALSA: hda/realtek - The front Mic on a HP machine doesn't work
	ALSA: hda/realtek: Fix the mic type detection issue for ASUS G551JW
	ALSA: hda/realtek - Add headset Mic support for Lenovo ALC897 platform
	ALSA: hda/realtek - ALC897 headset MIC no sound
	ALSA: hda/realtek: Add a quirk for HP EliteDesk 805
	lib/string_helpers: Introduce string_upper() and string_lower() helpers
	usb: gadget: u_ether: Convert prints to device prints
	usb: gadget: u_ether: Fix host MAC address case
	vc_screen: rewrite vcs_size to accept vc, not inode
	vc_screen: reload load of struct vc_data pointer in vcs_write() to avoid UAF
	s390/qdio: get rid of register asm
	s390/qdio: fix do_sqbs() inline assembly constraint
	spi: spi-fsl-spi: automatically adapt bits-per-word in cpu mode
	spi: fsl-spi: Re-organise transfer bits_per_word adaptation
	spi: fsl-cpm: Use 16 bit mode for large transfers with even size
	ALSA: hda/ca0132: add quirk for EVGA X299 DARK
	m68k: Move signal frame following exception on 68020/030
	parisc: Allow to reboot machine after system halt
	btrfs: use nofs when cleaning up aborted transactions
	x86/mm: Avoid incomplete Global INVLPG flushes
	selftests/memfd: Fix unknown type name build failure
	parisc: Fix flush_dcache_page() for usage from irq context
	ALSA: hda/realtek - Fixed one of HP ALC671 platform Headset Mic supported
	ALSA: hda/realtek - Fix inverted bass GPIO pin on Acer 8951G
	udplite: Fix NULL pointer dereference in __sk_mem_raise_allocated().
	USB: core: Add routines for endpoint checks in old drivers
	USB: sisusbvga: Add endpoint checks
	media: radio-shark: Add endpoint checks
	net: fix skb leak in __skb_tstamp_tx()
	bpf: Fix mask generation for 32-bit narrow loads of 64-bit fields
	ipv6: Fix out-of-bounds access in ipv6_find_tlv()
	power: supply: leds: Fix blink to LED on transition
	power: supply: bq27xxx: Fix bq27xxx_battery_update() race condition
	power: supply: bq27xxx: Fix I2C IRQ race on remove
	power: supply: bq27xxx: Fix poll_interval handling and races on remove
	power: supply: sbs-charger: Fix INHIBITED bit for Status reg
	coresight: Fix signedness bug in tmc_etr_buf_insert_barrier_packet()
	xen/pvcalls-back: fix double frees with pvcalls_new_active_socket()
	x86/show_trace_log_lvl: Ensure stack pointer is aligned, again
	ASoC: Intel: Skylake: Fix declaration of enum skl_ch_cfg
	forcedeth: Fix an error handling path in nv_probe()
	3c589_cs: Fix an error handling path in tc589_probe()
	drivers: depend on HAS_IOMEM for devm_platform_ioremap_resource()
	Linux 4.19.284

Change-Id: I88843be551e748e295ea608158a2db7ab4486a65
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-08 11:16:01 +00:00
commit 4e2cad2c2a
152 changed files with 1632 additions and 795 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 283
SUBLEVEL = 284
EXTRAVERSION =
NAME = "People's Front"

View file

@ -882,11 +882,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
}
static inline void __user *
get_sigframe(struct ksignal *ksig, size_t frame_size)
get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
{
unsigned long usp = sigsp(rdusp(), ksig);
unsigned long gap = 0;
return (void __user *)((usp - frame_size) & -8UL);
if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
/* USP is unreliable so use worst-case value */
gap = 256;
}
return (void __user *)((usp - gap - frame_size) & -8UL);
}
static int setup_frame(struct ksignal *ksig, sigset_t *set,
@ -904,7 +910,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
}
frame = get_sigframe(ksig, sizeof(*frame) + fsize);
frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);
if (fsize)
err |= copy_to_user (frame + 1, regs + 1, fsize);
@ -975,7 +981,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
}
frame = get_sigframe(ksig, sizeof(*frame));
frame = get_sigframe(ksig, tregs, sizeof(*frame));
if (fsize)
err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);

View file

@ -57,6 +57,11 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
xa_lock_irqsave(&mapping->i_pages, flags)
#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
xa_unlock_irqrestore(&mapping->i_pages, flags)
#define flush_icache_page(vma,page) do { \
flush_kernel_dcache_page(page); \

View file

@ -309,6 +309,7 @@ void flush_dcache_page(struct page *page)
struct vm_area_struct *mpnt;
unsigned long offset;
unsigned long addr, old_addr = 0;
unsigned long flags;
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
@ -328,7 +329,7 @@ void flush_dcache_page(struct page *page)
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
* to flush one address here for them all to become coherent */
flush_dcache_mmap_lock(mapping);
flush_dcache_mmap_lock_irqsave(mapping, flags);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
@ -351,7 +352,7 @@ void flush_dcache_page(struct page *page)
old_addr = addr;
}
}
flush_dcache_mmap_unlock(mapping);
flush_dcache_mmap_unlock_irqrestore(mapping, flags);
}
EXPORT_SYMBOL(flush_dcache_page);

View file

@ -138,13 +138,18 @@ void machine_power_off(void)
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
printk("Power off or press RETURN to reboot.\n");
/* prevent soft lockup/stalled CPU messages for endless loop. */
rcu_sysrq_start();
lockup_detector_soft_poweroff();
for (;;);
while (1) {
/* reboot if user presses RETURN key */
if (pdc_iodc_getc() == 13) {
printk("Rebooting...\n");
machine_restart(NULL);
}
}
}
void (*pm_power_off)(void);

View file

@ -74,6 +74,11 @@
#define INTEL_FAM6_LAKEFIELD 0x8A
#define INTEL_FAM6_ALDERLAKE 0x97
#define INTEL_FAM6_ALDERLAKE_L 0x9A
#define INTEL_FAM6_ALDERLAKE_N 0xBE
#define INTEL_FAM6_RAPTORLAKE 0xB7
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
#define INTEL_FAM6_RAPTORLAKE_S 0xBF
/* "Small Core" Processors (Atom) */

View file

@ -171,7 +171,6 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack);
stack = stack ? : get_stack_pointer(task, regs);
regs = unwind_get_entry_regs(&state, &partial);
/*
@ -190,9 +189,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* - hardirq stack
* - entry stack
*/
for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
for (stack = stack ?: get_stack_pointer(task, regs);
stack;
stack = stack_info.next_sp) {
const char *stack_name;
stack = PTR_ALIGN(stack, sizeof(long));
if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
/*
* We weren't on a valid stack. It's possible that

View file

@ -9,6 +9,7 @@
#include <linux/kmemleak.h>
#include <asm/set_memory.h>
#include <asm/cpu_device_id.h>
#include <asm/e820/api.h>
#include <asm/init.h>
#include <asm/page.h>
@ -207,6 +208,24 @@ static void __init probe_page_size_mask(void)
}
}
#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
.family = 6, \
.model = _model, \
}
/*
* INVLPG may not properly flush Global entries
* on these CPUs when PCIDs are enabled.
*/
static const struct x86_cpu_id invlpg_miss_ids[] = {
INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
{}
};
static void setup_pcid(void)
{
if (!IS_ENABLED(CONFIG_X86_64))
@ -215,6 +234,12 @@ static void setup_pcid(void)
if (!boot_cpu_has(X86_FEATURE_PCID))
return;
if (x86_match_cpu(invlpg_miss_ids)) {
pr_info("Incomplete global flushes, disabling PCID");
setup_clear_cpu_cap(X86_FEATURE_PCID);
return;
}
if (boot_cpu_has(X86_FEATURE_PGE)) {
/*
* This can't be cr4_set_bits_and_update_boot() -- the

View file

@ -571,6 +571,9 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg)
object_info =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_object_info));
if (!object_info)
return (AE_NO_MEMORY);
/* Walk the namespace from the root */
(void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,

View file

@ -576,9 +576,14 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE(ds_init_aml_walk);
walk_state->parser_state.aml =
walk_state->parser_state.aml_start = aml_start;
walk_state->parser_state.aml_end =
walk_state->parser_state.pkg_end = aml_start + aml_length;
walk_state->parser_state.aml_start =
walk_state->parser_state.aml_end =
walk_state->parser_state.pkg_end = aml_start;
/* Avoid undefined behavior: applying zero offset to null pointer */
if (aml_length != 0) {
walk_state->parser_state.aml_end += aml_length;
walk_state->parser_state.pkg_end += aml_length;
}
/* The next_op of the next_walk will be the beginning of the method */

View file

@ -1153,6 +1153,7 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
{
acpi_ec_remove_query_handlers(ec, false, query_bit);
flush_workqueue(ec_query_wq);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);

View file

@ -80,6 +80,26 @@ struct resource *platform_get_resource(struct platform_device *dev,
}
EXPORT_SYMBOL_GPL(platform_get_resource);
/**
* devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
* device
*
* @pdev: platform device to use both for memory resource lookup as well as
* resource managemend
* @index: resource index
*/
#ifdef CONFIG_HAS_IOMEM
void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index)
{
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
return devm_ioremap_resource(&pdev->dev, res);
}
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
#endif /* CONFIG_HAS_IOMEM */
/**
* platform_get_irq - get an IRQ for a device
* @dev: platform device

View file

@ -347,6 +347,9 @@ int regcache_sync(struct regmap *map)
const char *name;
bool bypass;
if (WARN_ON(map->cache_type == REGCACHE_NONE))
return -EINVAL;
BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
@ -416,6 +419,9 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
const char *name;
bool bypass;
if (WARN_ON(map->cache_type == REGCACHE_NONE))
return -EINVAL;
BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);

View file

@ -87,6 +87,22 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
},
},
{
.callback = tpm_tis_disable_irq,
.ident = "ThinkStation P360 Tiny",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"),
},
},
{
.callback = tpm_tis_disable_irq,
.ident = "ThinkPad L490",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"),
},
},
{}
};

View file

@ -29,24 +29,24 @@
#define MISC_CLK_ENB 0x48
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30)
#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
#define OSC_CTRL_OSC_FREQ_MASK (3u<<30)
#define OSC_CTRL_OSC_FREQ_13MHZ (0u<<30)
#define OSC_CTRL_OSC_FREQ_19_2MHZ (1u<<30)
#define OSC_CTRL_OSC_FREQ_12MHZ (2u<<30)
#define OSC_CTRL_OSC_FREQ_26MHZ (3u<<30)
#define OSC_CTRL_MASK (0x3f2u | OSC_CTRL_OSC_FREQ_MASK)
#define OSC_CTRL_PLL_REF_DIV_MASK (3<<28)
#define OSC_CTRL_PLL_REF_DIV_1 (0<<28)
#define OSC_CTRL_PLL_REF_DIV_2 (1<<28)
#define OSC_CTRL_PLL_REF_DIV_4 (2<<28)
#define OSC_CTRL_PLL_REF_DIV_MASK (3u<<28)
#define OSC_CTRL_PLL_REF_DIV_1 (0u<<28)
#define OSC_CTRL_PLL_REF_DIV_2 (1u<<28)
#define OSC_CTRL_PLL_REF_DIV_4 (2u<<28)
#define OSC_FREQ_DET 0x58
#define OSC_FREQ_DET_TRIG (1<<31)
#define OSC_FREQ_DET_TRIG (1u<<31)
#define OSC_FREQ_DET_STATUS 0x5c
#define OSC_FREQ_DET_BUSY (1<<31)
#define OSC_FREQ_DET_CNT_MASK 0xFFFF
#define OSC_FREQ_DET_BUSYu (1<<31)
#define OSC_FREQ_DET_CNT_MASK 0xFFFFu
#define TEGRA20_CLK_PERIPH_BANKS 3

View file

@ -43,6 +43,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
/* entry point from firmware to arch asm code */
static unsigned long sdei_entry_point;
static int sdei_hp_state;
struct sdei_event {
/* These three are protected by the sdei_list_lock */
struct list_head list;
@ -303,8 +305,6 @@ int sdei_mask_local_cpu(void)
{
int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to mask CPU[%u]: %d\n",
@ -317,6 +317,7 @@ int sdei_mask_local_cpu(void)
static void _ipi_mask_cpu(void *ignored)
{
WARN_ON_ONCE(preemptible());
sdei_mask_local_cpu();
}
@ -324,8 +325,6 @@ int sdei_unmask_local_cpu(void)
{
int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to unmask CPU[%u]: %d\n",
@ -338,6 +337,7 @@ int sdei_unmask_local_cpu(void)
static void _ipi_unmask_cpu(void *ignored)
{
WARN_ON_ONCE(preemptible());
sdei_unmask_local_cpu();
}
@ -345,6 +345,8 @@ static void _ipi_private_reset(void *ignored)
{
int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
NULL);
if (err && err != -EIO)
@ -391,8 +393,6 @@ static void _local_event_enable(void *data)
int err;
struct sdei_crosscall_args *arg = data;
WARN_ON_ONCE(preemptible());
err = sdei_api_event_enable(arg->event->event_num);
sdei_cross_call_return(arg, err);
@ -483,8 +483,6 @@ static void _local_event_unregister(void *data)
int err;
struct sdei_crosscall_args *arg = data;
WARN_ON_ONCE(preemptible());
err = sdei_api_event_unregister(arg->event->event_num);
sdei_cross_call_return(arg, err);
@ -573,8 +571,6 @@ static void _local_event_register(void *data)
struct sdei_registered_event *reg;
struct sdei_crosscall_args *arg = data;
WARN_ON(preemptible());
reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
reg, 0, 0);
@ -754,6 +750,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
{
int rv;
WARN_ON_ONCE(preemptible());
switch (action) {
case CPU_PM_ENTER:
rv = sdei_mask_local_cpu();
@ -802,7 +800,7 @@ static int sdei_device_freeze(struct device *dev)
int err;
/* unregister private events */
cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
cpuhp_remove_state(sdei_entry_point);
err = sdei_unregister_shared();
if (err)
@ -823,12 +821,15 @@ static int sdei_device_thaw(struct device *dev)
return err;
}
err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
if (err)
if (err < 0) {
pr_warn("Failed to re-register CPU hotplug notifier...\n");
return err;
}
return err;
sdei_hp_state = err;
return 0;
}
static int sdei_device_restore(struct device *dev)
@ -860,7 +861,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
* We are going to reset the interface, after this there is no point
* doing work when we take CPUs offline.
*/
cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
cpuhp_remove_state(sdei_hp_state);
sdei_platform_reset();
@ -973,13 +974,15 @@ static int sdei_probe(struct platform_device *pdev)
goto remove_cpupm;
}
err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
if (err) {
if (err < 0) {
pr_warn("Failed to register CPU hotplug notifier...\n");
goto remove_reboot;
}
sdei_hp_state = err;
return 0;
remove_reboot:

View file

@ -778,7 +778,7 @@ static void dce_transform_set_pixel_storage_depth(
color_depth = COLOR_DEPTH_101010;
pixel_depth = 0;
expan_mode = 1;
BREAK_TO_DEBUGGER();
DC_LOG_DC("The pixel depth %d is not valid, set COLOR_DEPTH_101010 instead.", depth);
break;
}
@ -792,8 +792,7 @@ static void dce_transform_set_pixel_storage_depth(
if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
/*we should use unsupported capabilities
* unless it is required by w/a*/
DC_LOG_WARNING("%s: Capability not supported",
__func__);
DC_LOG_DC("%s: Capability not supported", __func__);
}
}

View file

@ -37,11 +37,11 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
static inline int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
{
return 0;
}
void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
static inline void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
{ }
#endif

View file

@ -829,7 +829,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
struct drm_dp_link *link)
{
const u64 f = 100000, link_rate = link->rate * 1000;
const u64 pclk = mode->clock * 1000;
const u64 pclk = (u64)mode->clock * 1000;
u64 input, output, watermark, num;
struct tegra_sor_params params;
u32 num_syms_per_line;

View file

@ -675,8 +675,7 @@ static int hidpp_unifying_init(struct hidpp_device *hidpp)
if (ret)
return ret;
snprintf(hdev->uniq, sizeof(hdev->uniq), "%04x-%4phD",
hdev->product, &serial);
snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial);
dbg_hid("HID++ Unifying: Got serial: %s\n", hdev->uniq);
name = hidpp_unifying_get_name(hidpp);
@ -777,6 +776,54 @@ static bool hidpp_is_connected(struct hidpp_device *hidpp)
return ret == 0;
}
/* -------------------------------------------------------------------------- */
/* 0x0003: Device Information */
/* -------------------------------------------------------------------------- */
#define HIDPP_PAGE_DEVICE_INFORMATION 0x0003
#define CMD_GET_DEVICE_INFO 0x00
static int hidpp_get_serial(struct hidpp_device *hidpp, u32 *serial)
{
struct hidpp_report response;
u8 feature_type;
u8 feature_index;
int ret;
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_DEVICE_INFORMATION,
&feature_index,
&feature_type);
if (ret)
return ret;
ret = hidpp_send_fap_command_sync(hidpp, feature_index,
CMD_GET_DEVICE_INFO,
NULL, 0, &response);
if (ret)
return ret;
/* See hidpp_unifying_get_serial() */
*serial = *((u32 *)&response.rap.params[1]);
return 0;
}
static int hidpp_serial_init(struct hidpp_device *hidpp)
{
struct hid_device *hdev = hidpp->hid_dev;
u32 serial;
int ret;
ret = hidpp_get_serial(hidpp, &serial);
if (ret)
return ret;
snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial);
dbg_hid("HID++ DeviceInformation: Got serial: %s\n", hdev->uniq);
return 0;
}
/* -------------------------------------------------------------------------- */
/* 0x0005: GetDeviceNameType */
/* -------------------------------------------------------------------------- */
@ -3040,6 +3087,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (hidpp->quirks & HIDPP_QUIRK_UNIFYING)
hidpp_unifying_init(hidpp);
else if (hid_is_usb(hidpp->hid_dev))
hidpp_serial_init(hidpp);
connected = hidpp_is_connected(hidpp);
atomic_set(&hidpp->connected, connected);

View file

@ -94,6 +94,7 @@
#include <linux/leds.h>
#include <linux/usb/input.h>
#include <linux/power_supply.h>
#include <linux/timer.h>
#include <asm/unaligned.h>
/*
@ -170,6 +171,7 @@ struct wacom {
struct delayed_work init_work;
struct wacom_remote *remote;
struct work_struct mode_change_work;
struct timer_list idleprox_timer;
bool generic_has_leds;
struct wacom_leds {
struct wacom_group_leds *groups;
@ -242,4 +244,5 @@ struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group,
struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur);
int wacom_equivalent_usage(int usage);
int wacom_initialize_leds(struct wacom *wacom);
void wacom_idleprox_timeout(struct timer_list *list);
#endif

View file

@ -2754,6 +2754,7 @@ static int wacom_probe(struct hid_device *hdev,
INIT_WORK(&wacom->battery_work, wacom_battery_work);
INIT_WORK(&wacom->remote_work, wacom_remote_work);
INIT_WORK(&wacom->mode_change_work, wacom_mode_change_work);
timer_setup(&wacom->idleprox_timer, &wacom_idleprox_timeout, TIMER_DEFERRABLE);
/* ask for the report descriptor to be loaded by HID */
error = hid_parse(hdev);
@ -2802,6 +2803,7 @@ static void wacom_remove(struct hid_device *hdev)
cancel_work_sync(&wacom->battery_work);
cancel_work_sync(&wacom->remote_work);
cancel_work_sync(&wacom->mode_change_work);
del_timer_sync(&wacom->idleprox_timer);
if (hdev->bus == BUS_BLUETOOTH)
device_remove_file(&hdev->dev, &dev_attr_speed);

View file

@ -15,6 +15,7 @@
#include "wacom_wac.h"
#include "wacom.h"
#include <linux/input/mt.h>
#include <linux/jiffies.h>
/* resolution for penabled devices */
#define WACOM_PL_RES 20
@ -45,6 +46,43 @@ static int wacom_numbered_button_to_key(int n);
static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
int group);
static void wacom_force_proxout(struct wacom_wac *wacom_wac)
{
struct input_dev *input = wacom_wac->pen_input;
wacom_wac->shared->stylus_in_proximity = 0;
input_report_key(input, BTN_TOUCH, 0);
input_report_key(input, BTN_STYLUS, 0);
input_report_key(input, BTN_STYLUS2, 0);
input_report_key(input, BTN_STYLUS3, 0);
input_report_key(input, wacom_wac->tool[0], 0);
if (wacom_wac->serial[0]) {
input_report_abs(input, ABS_MISC, 0);
}
input_report_abs(input, ABS_PRESSURE, 0);
wacom_wac->tool[0] = 0;
wacom_wac->id[0] = 0;
wacom_wac->serial[0] = 0;
input_sync(input);
}
void wacom_idleprox_timeout(struct timer_list *list)
{
struct wacom *wacom = from_timer(wacom, list, idleprox_timer);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
if (!wacom_wac->hid_data.sense_state) {
return;
}
hid_warn(wacom->hdev, "%s: tool appears to be hung in-prox. forcing it out.\n", __func__);
wacom_force_proxout(wacom_wac);
}
/*
* Percent of battery capacity for Graphire.
* 8th value means AC online and show 100% capacity.
@ -1839,18 +1877,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
static void wacom_wac_battery_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct wacom_features *features = &wacom_wac->features;
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
switch (equivalent_usage) {
case HID_DG_BATTERYSTRENGTH:
case WACOM_HID_WD_BATTERY_LEVEL:
case WACOM_HID_WD_BATTERY_CHARGING:
features->quirks |= WACOM_QUIRK_BATTERY;
break;
}
return;
}
static void wacom_wac_battery_event(struct hid_device *hdev, struct hid_field *field,
@ -1871,18 +1898,21 @@ static void wacom_wac_battery_event(struct hid_device *hdev, struct hid_field *f
wacom_wac->hid_data.bat_connected = 1;
wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
}
wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
break;
case WACOM_HID_WD_BATTERY_LEVEL:
value = value * 100 / (field->logical_maximum - field->logical_minimum);
wacom_wac->hid_data.battery_capacity = value;
wacom_wac->hid_data.bat_connected = 1;
wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
break;
case WACOM_HID_WD_BATTERY_CHARGING:
wacom_wac->hid_data.bat_charging = value;
wacom_wac->hid_data.ps_connected = value;
wacom_wac->hid_data.bat_connected = 1;
wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
break;
}
}
@ -1898,18 +1928,15 @@ static void wacom_wac_battery_report(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct wacom_features *features = &wacom_wac->features;
if (features->quirks & WACOM_QUIRK_BATTERY) {
int status = wacom_wac->hid_data.bat_status;
int capacity = wacom_wac->hid_data.battery_capacity;
bool charging = wacom_wac->hid_data.bat_charging;
bool connected = wacom_wac->hid_data.bat_connected;
bool powered = wacom_wac->hid_data.ps_connected;
int status = wacom_wac->hid_data.bat_status;
int capacity = wacom_wac->hid_data.battery_capacity;
bool charging = wacom_wac->hid_data.bat_charging;
bool connected = wacom_wac->hid_data.bat_connected;
bool powered = wacom_wac->hid_data.ps_connected;
wacom_notify_battery(wacom_wac, status, capacity, charging,
connected, powered);
}
wacom_notify_battery(wacom_wac, status, capacity, charging,
connected, powered);
}
static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
@ -2266,6 +2293,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
value = field->logical_maximum - value;
break;
case HID_DG_INRANGE:
mod_timer(&wacom->idleprox_timer, jiffies + msecs_to_jiffies(100));
wacom_wac->hid_data.inrange_state = value;
if (!(features->quirks & WACOM_QUIRK_SENSE))
wacom_wac->hid_data.sense_state = value;

View file

@ -901,7 +901,7 @@ tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
len = tmc_etr_buf_get_data(etr_buf, offset,
CORESIGHT_BARRIER_PKT_SIZE, &bufp);
if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
if (WARN_ON(len < 0 || len < CORESIGHT_BARRIER_PKT_SIZE))
return -EINVAL;
coresight_insert_barrier_packet(bufp);
return offset + CORESIGHT_BARRIER_PKT_SIZE;

View file

@ -503,6 +503,9 @@ struct xboxone_init_packet {
}
#define GIP_WIRED_INTF_DATA 0
#define GIP_WIRED_INTF_AUDIO 1
/*
* This packet is required for all Xbox One pads with 2015
* or later firmware installed (or present from the factory).
@ -1827,7 +1830,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
}
if (xpad->xtype == XTYPE_XBOXONE &&
intf->cur_altsetting->desc.bInterfaceNumber != 0) {
intf->cur_altsetting->desc.bInterfaceNumber != GIP_WIRED_INTF_DATA) {
/*
* The Xbox One controller lists three interfaces all with the
* same interface class, subclass and protocol. Differentiate by

View file

@ -34,7 +34,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct resource *res;
struct priv *priv;
int ret;
int ret, table_size;
unsigned long flags;
priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL);
@ -93,7 +93,30 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret < 0)
goto out_mcb_bus;
dev_dbg(&pdev->dev, "Found %d cells\n", ret);
table_size = ret;
if (table_size < CHAM_HEADER_SIZE) {
/* Release the previous resources */
devm_iounmap(&pdev->dev, priv->base);
devm_release_mem_region(&pdev->dev, priv->mapbase, CHAM_HEADER_SIZE);
/* Then, allocate it again with the actual chameleon table size */
res = devm_request_mem_region(&pdev->dev, priv->mapbase,
table_size,
KBUILD_MODNAME);
if (!res) {
dev_err(&pdev->dev, "Failed to request PCI memory\n");
ret = -EBUSY;
goto out_mcb_bus;
}
priv->base = devm_ioremap(&pdev->dev, priv->mapbase, table_size);
if (!priv->base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
ret = -ENOMEM;
goto out_mcb_bus;
}
}
mcb_bus_add_devices(priv->bus);

View file

@ -706,7 +706,7 @@ static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
netup_unidvb_dma_enable(dma, 0);
msleep(50);
cancel_work_sync(&dma->work);
del_timer(&dma->timeout);
del_timer_sync(&dma->timeout);
}
static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)

View file

@ -316,6 +316,16 @@ static int usb_shark_probe(struct usb_interface *intf,
{
struct shark_device *shark;
int retval = -ENOMEM;
static const u8 ep_addresses[] = {
SHARK_IN_EP | USB_DIR_IN,
SHARK_OUT_EP | USB_DIR_OUT,
0};
/* Are the expected endpoints present? */
if (!usb_check_int_endpoints(intf, ep_addresses)) {
dev_err(&intf->dev, "Invalid radioSHARK device\n");
return -EINVAL;
}
shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
if (!shark)

View file

@ -282,6 +282,16 @@ static int usb_shark_probe(struct usb_interface *intf,
{
struct shark_device *shark;
int retval = -ENOMEM;
static const u8 ep_addresses[] = {
SHARK_IN_EP | USB_DIR_IN,
SHARK_OUT_EP | USB_DIR_OUT,
0};
/* Are the expected endpoints present? */
if (!usb_check_int_endpoints(intf, ep_addresses)) {
dev_err(&intf->dev, "Invalid radioSHARK2 device\n");
return -EINVAL;
}
shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
if (!shark)

View file

@ -831,7 +831,7 @@ static void r592_remove(struct pci_dev *pdev)
/* Stop the processing thread.
That ensures that we won't take any more requests */
kthread_stop(dev->io_thread);
del_timer_sync(&dev->detect_timer);
r592_enable_device(dev, false);
while (!error && dev->req) {

View file

@ -1430,7 +1430,9 @@ mptlan_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct net_device *dev = ioc->netdev;
struct mpt_lan_priv *priv = netdev_priv(dev);
cancel_delayed_work_sync(&priv->post_buckets_task);
if(dev != NULL) {
unregister_netdev(dev);
free_netdev(dev);

View file

@ -800,6 +800,7 @@ out_stop_rx:
dln2_stop_rx_urbs(dln2);
out_free:
usb_put_dev(dln2->usb_dev);
dln2_free(dln2);
return ret;

View file

@ -196,6 +196,7 @@ static int tc589_probe(struct pcmcia_device *link)
{
struct el3_private *lp;
struct net_device *dev;
int ret;
dev_dbg(&link->dev, "3c589_attach()\n");
@ -219,7 +220,15 @@ static int tc589_probe(struct pcmcia_device *link)
dev->ethtool_ops = &netdev_ethtool_ops;
return tc589_config(link);
ret = tc589_config(link);
if (ret)
goto err_free_netdev;
return 0;
err_free_netdev:
free_netdev(dev);
return ret;
}
static void tc589_detach(struct pcmcia_device *link)

View file

@ -2980,7 +2980,7 @@ err_clk_disable:
return ret;
}
static void bcmgenet_netif_stop(struct net_device *dev)
static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@ -2995,7 +2995,8 @@ static void bcmgenet_netif_stop(struct net_device *dev)
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
phy_stop(dev->phydev);
if (stop_phy)
phy_stop(dev->phydev);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@ -3021,7 +3022,7 @@ static int bcmgenet_close(struct net_device *dev)
netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
bcmgenet_netif_stop(dev);
bcmgenet_netif_stop(dev, false);
/* Really kill the PHY state machine and disconnect from it */
phy_disconnect(dev->phydev);
@ -3721,7 +3722,7 @@ static int bcmgenet_suspend(struct device *d)
netif_device_detach(dev);
bcmgenet_netif_stop(dev);
bcmgenet_netif_stop(dev, true);
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);

View file

@ -3721,7 +3721,9 @@ fec_drv_remove(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
return ret;
dev_err(&pdev->dev,
"Failed to resume device in remove callback (%pe)\n",
ERR_PTR(ret));
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@ -3734,8 +3736,13 @@ fec_drv_remove(struct platform_device *pdev)
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
/* After pm_runtime_get_sync() failed, the clks are still off, so skip
* disabling them again.
*/
if (ret >= 0) {
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
}
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);

View file

@ -425,7 +425,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
u8 bit_shift = 0;
u8 bit_shift = 1;
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
@ -433,7 +433,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
bit_shift++;
/* The portion of the address that is used for the hash table

View file

@ -6061,6 +6061,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
return 0;
out_error:
nv_mgmt_release_sema(dev);
if (phystate_orig)
writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
out_freering:

View file

@ -1435,7 +1435,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
}
static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct pasemi_mac * const mac = netdev_priv(dev);
struct pasemi_mac_txring * const txring = tx_ring(mac);

View file

@ -5138,6 +5138,8 @@ err_out_iounmap:
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
vfree(cp->fw_data);
pci_iounmap(pdev, cp->regs);

View file

@ -443,6 +443,9 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
@ -481,6 +484,9 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, dst);
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
err = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;

View file

@ -1238,13 +1238,14 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
{
struct brcmf_wsec_pmk_le pmk;
int i, err;
int err;
/* convert to firmware key format */
pmk.key_len = cpu_to_le16(pmk_len << 1);
pmk.flags = cpu_to_le16(BRCMF_WSEC_PASSPHRASE);
for (i = 0; i < pmk_len; i++)
snprintf(&pmk.key[2 * i], 3, "%02x", pmk_data[i]);
memset(&pmk, 0, sizeof(pmk));
/* pass pmk directly */
pmk.key_len = cpu_to_le16(pmk_len);
pmk.flags = cpu_to_le16(0);
memcpy(pmk.key, pmk_data, pmk_len);
/* store psk in firmware */
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,

View file

@ -1101,6 +1101,7 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
{
__le16 key_flags;
struct iwl_addsta_cmd sta_cmd;
size_t to_copy;
int i;
spin_lock_bh(&priv->sta_lock);
@ -1120,7 +1121,9 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
for (i = 0; i < 5; i++)
sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
/* keyconf may contain MIC rx/tx keys which iwl does not use */
to_copy = min_t(size_t, sizeof(sta_cmd.key.key), keyconf->keylen);
memcpy(sta_cmd.key.key, keyconf->key, to_copy);
break;
case WLAN_CIPHER_SUITE_WEP104:
key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;

View file

@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@ -488,19 +489,11 @@ static inline void miphy28lp_pcie_config_gen(struct miphy28lp_phy *miphy_phy)
static inline int miphy28lp_wait_compensation(struct miphy28lp_phy *miphy_phy)
{
unsigned long finish = jiffies + 5 * HZ;
u8 val;
/* Waiting for Compensation to complete */
do {
val = readb_relaxed(miphy_phy->base + MIPHY_COMP_FSM_6);
if (time_after_eq(jiffies, finish))
return -EBUSY;
cpu_relax();
} while (!(val & COMP_DONE));
return 0;
return readb_relaxed_poll_timeout(miphy_phy->base + MIPHY_COMP_FSM_6,
val, val & COMP_DONE, 1, 5 * USEC_PER_SEC);
}
@ -809,7 +802,6 @@ static inline void miphy28lp_configure_usb3(struct miphy28lp_phy *miphy_phy)
static inline int miphy_is_ready(struct miphy28lp_phy *miphy_phy)
{
unsigned long finish = jiffies + 5 * HZ;
u8 mask = HFC_PLL | HFC_RDY;
u8 val;
@ -820,21 +812,14 @@ static inline int miphy_is_ready(struct miphy28lp_phy *miphy_phy)
if (miphy_phy->type == PHY_TYPE_SATA)
mask |= PHY_RDY;
do {
val = readb_relaxed(miphy_phy->base + MIPHY_STATUS_1);
if ((val & mask) != mask)
cpu_relax();
else
return 0;
} while (!time_after_eq(jiffies, finish));
return -EBUSY;
return readb_relaxed_poll_timeout(miphy_phy->base + MIPHY_STATUS_1,
val, (val & mask) == mask, 1,
5 * USEC_PER_SEC);
}
static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
{
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
unsigned long finish = jiffies + 5 * HZ;
u32 val;
if (!miphy_phy->osc_rdy)
@ -843,17 +828,10 @@ static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
if (!miphy_phy->syscfg_reg[SYSCFG_STATUS])
return -EINVAL;
do {
regmap_read(miphy_dev->regmap,
miphy_phy->syscfg_reg[SYSCFG_STATUS], &val);
if ((val & MIPHY_OSC_RDY) != MIPHY_OSC_RDY)
cpu_relax();
else
return 0;
} while (!time_after_eq(jiffies, finish));
return -EBUSY;
return regmap_read_poll_timeout(miphy_dev->regmap,
miphy_phy->syscfg_reg[SYSCFG_STATUS],
val, val & MIPHY_OSC_RDY, 1,
5 * USEC_PER_SEC);
}
static int miphy28lp_get_resource_byname(struct device_node *child,

View file

@ -1551,7 +1551,7 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
return POWER_SUPPLY_HEALTH_GOOD;
}
void bq27xxx_battery_update(struct bq27xxx_device_info *di)
static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
{
struct bq27xxx_reg_cache cache = {0, };
bool has_ci_flag = di->opts & BQ27XXX_O_ZERO;
@ -1599,6 +1599,16 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
di->cache = cache;
di->last_update = jiffies;
if (!di->removed && poll_interval > 0)
mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
}
void bq27xxx_battery_update(struct bq27xxx_device_info *di)
{
mutex_lock(&di->lock);
bq27xxx_battery_update_unlocked(di);
mutex_unlock(&di->lock);
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
@ -1609,9 +1619,6 @@ static void bq27xxx_battery_poll(struct work_struct *work)
work.work);
bq27xxx_battery_update(di);
if (poll_interval > 0)
schedule_delayed_work(&di->work, poll_interval * HZ);
}
/*
@ -1772,10 +1779,8 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
mutex_lock(&di->lock);
if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
cancel_delayed_work_sync(&di->work);
bq27xxx_battery_poll(&di->work.work);
}
if (time_is_before_jiffies(di->last_update + 5 * HZ))
bq27xxx_battery_update_unlocked(di);
mutex_unlock(&di->lock);
if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
@ -1912,22 +1917,18 @@ EXPORT_SYMBOL_GPL(bq27xxx_battery_setup);
void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
{
/*
* power_supply_unregister call bq27xxx_battery_get_property which
* call bq27xxx_battery_poll.
* Make sure that bq27xxx_battery_poll will not call
* schedule_delayed_work again after unregister (which cause OOPS).
*/
poll_interval = 0;
cancel_delayed_work_sync(&di->work);
power_supply_unregister(di->bat);
mutex_lock(&bq27xxx_list_lock);
list_del(&di->list);
mutex_unlock(&bq27xxx_list_lock);
/* Set removed to avoid bq27xxx_battery_update() re-queuing the work */
mutex_lock(&di->lock);
di->removed = true;
mutex_unlock(&di->lock);
cancel_delayed_work_sync(&di->work);
power_supply_unregister(di->bat);
mutex_destroy(&di->lock);
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);

View file

@ -187,7 +187,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, di);
if (client->irq) {
ret = devm_request_threaded_irq(&client->dev, client->irq,
ret = request_threaded_irq(client->irq,
NULL, bq27xxx_battery_irq_handler_thread,
IRQF_ONESHOT,
di->name, di);
@ -217,6 +217,7 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
free_irq(client->irq, di);
bq27xxx_battery_teardown(di);
mutex_lock(&battery_mutex);

View file

@ -35,8 +35,9 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
led_trigger_event(psy->charging_full_trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_FULL);
led_trigger_event(psy->charging_blink_full_solid_trig,
LED_FULL);
/* Going from blink to LED on requires a LED_OFF event to stop blink */
led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF);
led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL);
break;
case POWER_SUPPLY_STATUS_CHARGING:
led_trigger_event(psy->charging_full_trig, LED_FULL);

View file

@ -29,7 +29,7 @@
#define SBS_CHARGER_REG_STATUS 0x13
#define SBS_CHARGER_REG_ALARM_WARNING 0x16
#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(1)
#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(0)
#define SBS_CHARGER_STATUS_RES_COLD BIT(9)
#define SBS_CHARGER_STATUS_RES_HOT BIT(10)
#define SBS_CHARGER_STATUS_BATTERY_PRESENT BIT(14)

View file

@ -88,15 +88,15 @@ enum qdio_irq_states {
static inline int do_sqbs(u64 token, unsigned char state, int queue,
int *start, int *count)
{
register unsigned long _ccq asm ("0") = *count;
register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _ccq = *count;
asm volatile(
" .insn rsy,0xeb000000008A,%1,0,0(%2)"
: "+d" (_ccq), "+d" (_queuestart)
: "d" ((unsigned long)state), "d" (_token)
: "memory", "cc");
" lgr 1,%[token]\n"
" .insn rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])"
: [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart)
: [state] "a" ((unsigned long)state), [token] "d" (token)
: "memory", "cc", "1");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
@ -106,16 +106,17 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue,
static inline int do_eqbs(u64 token, unsigned char *state, int queue,
int *start, int *count, int ack)
{
register unsigned long _ccq asm ("0") = *count;
register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _state = (unsigned long)ack << 63;
unsigned long _ccq = *count;
asm volatile(
" .insn rrf,0xB99c0000,%1,%2,0,0"
: "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
: "d" (_token)
: "memory", "cc");
" lgr 1,%[token]\n"
" .insn rrf,0xb99c0000,%[qs],%[state],%[ccq],0"
: [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart),
[state] "+&d" (_state)
: [token] "d" (token)
: "memory", "cc", "1");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
*state = _state & 0xff;

View file

@ -31,38 +31,41 @@ MODULE_DESCRIPTION("QDIO base support");
MODULE_LICENSE("GPL");
static inline int do_siga_sync(unsigned long schid,
unsigned int out_mask, unsigned int in_mask,
unsigned long out_mask, unsigned long in_mask,
unsigned int fc)
{
register unsigned long __fc asm ("0") = fc;
register unsigned long __schid asm ("1") = schid;
register unsigned long out asm ("2") = out_mask;
register unsigned long in asm ("3") = in_mask;
int cc;
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[schid]\n"
" lgr 2,%[out]\n"
" lgr 3,%[in]\n"
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc)
: [fc] "d" (fc), [schid] "d" (schid),
[out] "d" (out_mask), [in] "d" (in_mask)
: "cc", "0", "1", "2", "3");
return cc;
}
static inline int do_siga_input(unsigned long schid, unsigned int mask,
unsigned int fc)
static inline int do_siga_input(unsigned long schid, unsigned long mask,
unsigned long fc)
{
register unsigned long __fc asm ("0") = fc;
register unsigned long __schid asm ("1") = schid;
register unsigned long __mask asm ("2") = mask;
int cc;
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[schid]\n"
" lgr 2,%[mask]\n"
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc)
: [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
: "cc", "0", "1", "2");
return cc;
}
@ -78,23 +81,24 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
unsigned int *bb, unsigned int fc,
unsigned int *bb, unsigned long fc,
unsigned long aob)
{
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
register unsigned long __mask asm("2") = mask;
register unsigned long __aob asm("3") = aob;
int cc;
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[schid]\n"
" lgr 2,%[mask]\n"
" lgr 3,%[aob]\n"
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc), "+d" (__fc), "+d" (__aob)
: "d" (__schid), "d" (__mask)
: "cc");
*bb = __fc >> 31;
" lgr %[fc],0\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc), [fc] "+&d" (fc)
: [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
: "cc", "0", "1", "2", "3");
*bb = fc >> 31;
return cc;
}

View file

@ -25,6 +25,7 @@
#include <linux/spi/spi.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/byteorder/generic.h>
#include "spi-fsl-cpm.h"
#include "spi-fsl-lib.h"
@ -124,6 +125,21 @@ int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
mspi->rx_dma = mspi->dma_dummy_rx;
mspi->map_rx_dma = 0;
}
if (t->bits_per_word == 16 && t->tx_buf) {
const u16 *src = t->tx_buf;
u16 *dst;
int i;
dst = kmalloc(t->len, GFP_KERNEL);
if (!dst)
return -ENOMEM;
for (i = 0; i < t->len >> 1; i++)
dst[i] = cpu_to_le16p(src + i);
mspi->tx = dst;
mspi->map_tx_dma = 1;
}
if (mspi->map_tx_dma) {
void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
@ -177,6 +193,13 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
if (mspi->map_rx_dma)
dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
mspi->xfer_in_progress = NULL;
if (t->bits_per_word == 16 && t->rx_buf) {
int i;
for (i = 0; i < t->len; i += 2)
le16_to_cpus(t->rx_buf + i);
}
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);

View file

@ -201,26 +201,6 @@ static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
return bits_per_word;
}
static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
struct spi_device *spi,
int bits_per_word)
{
/* CPM/QE uses Little Endian for words > 8
* so transform 16 and 32 bits words into 8 bits
* Unfortnatly that doesn't work for LSB so
* reject these for now */
/* Note: 32 bits word, LSB works iff
* tfcr/rfcr is set to CPMFCR_GBL */
if (spi->mode & SPI_LSB_FIRST &&
bits_per_word > 8)
return -EINVAL;
if (bits_per_word <= 8)
return bits_per_word;
if (bits_per_word == 16 || bits_per_word == 32)
return 8; /* pretend its 8 bits */
return -EINVAL;
}
static int fsl_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
@ -248,9 +228,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
mpc8xxx_spi,
bits_per_word);
else
bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
bits_per_word);
if (bits_per_word < 0)
return bits_per_word;
@ -357,12 +334,44 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
static int fsl_spi_do_one_msg(struct spi_master *master,
struct spi_message *m)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
struct spi_device *spi = m->spi;
struct spi_transfer *t, *first;
unsigned int cs_change;
const int nsecs = 50;
int status;
/*
* In CPU mode, optimize large byte transfers to use larger
* bits_per_word values to reduce number of interrupts taken.
*/
list_for_each_entry(t, &m->transfers, transfer_list) {
if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
if (t->len < 256 || t->bits_per_word != 8)
continue;
if ((t->len & 3) == 0)
t->bits_per_word = 32;
else if ((t->len & 1) == 0)
t->bits_per_word = 16;
} else {
/*
* CPM/QE uses Little Endian for words > 8
* so transform 16 and 32 bits words into 8 bits
* Unfortnatly that doesn't work for LSB so
* reject these for now
* Note: 32 bits word, LSB works iff
* tfcr/rfcr is set to CPMFCR_GBL
*/
if (m->spi->mode & SPI_LSB_FIRST && t->bits_per_word > 8)
return -EINVAL;
if (t->bits_per_word == 16 || t->bits_per_word == 32)
t->bits_per_word = 8; /* pretend its 8 bits */
if (t->bits_per_word == 8 && t->len >= 256 &&
(mpc8xxx_spi->flags & SPI_CPM1))
t->bits_per_word = 16;
}
}
/* Don't allow changes if CS is active */
first = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
@ -642,8 +651,14 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
if (mpc8xxx_spi->type == TYPE_GRLIB)
fsl_spi_grlib_probe(dev);
master->bits_per_word_mask =
(SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
master->bits_per_word_mask =
(SPI_BPW_RANGE_MASK(4, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32));
else
master->bits_per_word_mask =
(SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32));
master->bits_per_word_mask &=
SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)

View file

@ -237,6 +237,18 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
return true;
}
/*
* Note the number of natively supported chip selects for MX51 is 4. Some
* devices may have less actual SS pins but the register map supports 4. When
* using gpio chip selects the cs values passed into the macros below can go
* outside the range 0 - 3. We therefore need to limit the cs value to avoid
* corrupting bits outside the allocated locations.
*
* The simplest way to do this is to just mask the cs bits to 2 bits. This
* still allows all 4 native chip selects to work as well as gpio chip selects
* (which can use any of the 4 chip select configurations).
*/
#define MX51_ECSPI_CTRL 0x08
#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
#define MX51_ECSPI_CTRL_XCH (1 << 2)
@ -245,16 +257,16 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
#define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
#define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18)
#define MX51_ECSPI_CTRL_BL_OFFSET 20
#define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
#define MX51_ECSPI_CONFIG 0x0c
#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20))
#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0))
#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4))
#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8))
#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12))
#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20))
#define MX51_ECSPI_INT 0x10
#define MX51_ECSPI_INT_TEEN (1 << 0)

View file

@ -61,9 +61,9 @@ static const struct rtl819x_ops rtl819xp_ops = {
};
static struct pci_device_id rtl8192_pci_id_tbl[] = {
{RTL_PCI_DEVICE(0x10ec, 0x8192, rtl819xp_ops)},
{RTL_PCI_DEVICE(0x07aa, 0x0044, rtl819xp_ops)},
{RTL_PCI_DEVICE(0x07aa, 0x0047, rtl819xp_ops)},
{PCI_DEVICE(0x10ec, 0x8192)},
{PCI_DEVICE(0x07aa, 0x0044)},
{PCI_DEVICE(0x07aa, 0x0047)},
{}
};

View file

@ -67,11 +67,6 @@
#define IS_HARDWARE_TYPE_8192SE(_priv) \
(((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192SE)
#define RTL_PCI_DEVICE(vend, dev, cfg) \
.vendor = (vend), .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
.driver_data = (kernel_ulong_t)&(cfg)
#define TOTAL_CAM_ENTRY 32
#define CAM_CONTENT_COUNT 8

View file

@ -1125,6 +1125,7 @@ void serial8250_unregister_port(int line)
uart->port.type = PORT_UNKNOWN;
uart->port.dev = &serial8250_isa_devs->dev;
uart->capabilities = 0;
serial8250_init_port(uart);
serial8250_apply_quirks(uart);
uart_add_one_port(&serial8250_reg, &uart->port);
} else {

View file

@ -1648,6 +1648,8 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530
#define PCI_VENDOR_ID_ADVANTECH 0x13fe
#define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
#define PCI_DEVICE_ID_ADVANTECH_PCI1600 0x1600
#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611 0x1611
#define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
#define PCI_DEVICE_ID_ADVANTECH_PCI3618 0x3618
#define PCI_DEVICE_ID_ADVANTECH_PCIf618 0xf618
@ -3840,6 +3842,9 @@ static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one,
pciserial_resume_one);
static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI1600,
PCI_DEVICE_ID_ADVANTECH_PCI1600_1611, PCI_ANY_ID, 0, 0,
pbn_b0_4_921600 },
/* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
{ PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0,

View file

@ -613,10 +613,11 @@ static int arc_serial_probe(struct platform_device *pdev)
}
uart->baud = val;
port->membase = of_iomap(np, 0);
if (!port->membase)
port->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(port->membase)) {
/* No point of dev_err since UART itself is hosed here */
return -ENXIO;
return PTR_ERR(port->membase);
}
port->irq = irq_of_parse_and_map(np, 0);

View file

@ -182,39 +182,47 @@ vcs_vc(struct inode *inode, int *viewed)
return vc_cons[currcons].d;
}
/*
* Returns size for VC carried by inode.
/**
* vcs_size -- return size for a VC in @vc
* @vc: which VC
* @attr: does it use attributes?
* @unicode: is it unicode?
*
* Must be called with console_lock.
*/
static int
vcs_size(struct inode *inode)
static int vcs_size(const struct vc_data *vc, bool attr, bool unicode)
{
int size;
struct vc_data *vc;
WARN_CONSOLE_UNLOCKED();
vc = vcs_vc(inode, NULL);
if (!vc)
return -ENXIO;
size = vc->vc_rows * vc->vc_cols;
if (use_attributes(inode)) {
if (use_unicode(inode))
if (attr) {
if (unicode)
return -EOPNOTSUPP;
size = 2*size + HEADER_SIZE;
} else if (use_unicode(inode))
size = 2 * size + HEADER_SIZE;
} else if (unicode)
size *= 4;
return size;
}
static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
{
struct inode *inode = file_inode(file);
struct vc_data *vc;
int size;
console_lock();
size = vcs_size(file_inode(file));
vc = vcs_vc(inode, NULL);
if (!vc) {
console_unlock();
return -ENXIO;
}
size = vcs_size(vc, use_attributes(inode), use_unicode(inode));
console_unlock();
if (size < 0)
return size;
@ -276,7 +284,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
* as copy_to_user at the end of this loop
* could sleep.
*/
size = vcs_size(inode);
size = vcs_size(vc, attr, uni_mode);
if (size < 0) {
ret = size;
break;
@ -457,7 +465,11 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
if (!vc)
goto unlock_out;
size = vcs_size(inode);
size = vcs_size(vc, attr, false);
if (size < 0) {
ret = size;
goto unlock_out;
}
ret = -EINVAL;
if (pos < 0 || pos > size)
goto unlock_out;
@ -492,11 +504,18 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
}
}
/* The vcs_size might have changed while we slept to grab
* the user buffer, so recheck.
/* The vc might have been freed or vcs_size might have changed
* while we slept to grab the user buffer, so recheck.
* Return data written up to now on failure.
*/
size = vcs_size(inode);
vc = vcs_vc(inode, &viewed);
if (!vc) {
if (written)
break;
ret = -ENXIO;
goto unlock_out;
}
size = vcs_size(vc, attr, false);
if (size < 0) {
if (written)
break;

View file

@ -209,6 +209,82 @@ int usb_find_common_endpoints_reverse(struct usb_host_interface *alt,
}
EXPORT_SYMBOL_GPL(usb_find_common_endpoints_reverse);
/**
* usb_find_endpoint() - Given an endpoint address, search for the endpoint's
* usb_host_endpoint structure in an interface's current altsetting.
* @intf: the interface whose current altsetting should be searched
* @ep_addr: the endpoint address (number and direction) to find
*
* Search the altsetting's list of endpoints for one with the specified address.
*
* Return: Pointer to the usb_host_endpoint if found, %NULL otherwise.
*/
static const struct usb_host_endpoint *usb_find_endpoint(
const struct usb_interface *intf, unsigned int ep_addr)
{
int n;
const struct usb_host_endpoint *ep;
n = intf->cur_altsetting->desc.bNumEndpoints;
ep = intf->cur_altsetting->endpoint;
for (; n > 0; (--n, ++ep)) {
if (ep->desc.bEndpointAddress == ep_addr)
return ep;
}
return NULL;
}
/**
* usb_check_bulk_endpoints - Check whether an interface's current altsetting
* contains a set of bulk endpoints with the given addresses.
* @intf: the interface whose current altsetting should be searched
* @ep_addrs: 0-terminated array of the endpoint addresses (number and
* direction) to look for
*
* Search for endpoints with the specified addresses and check their types.
*
* Return: %true if all the endpoints are found and are bulk, %false otherwise.
*/
bool usb_check_bulk_endpoints(
const struct usb_interface *intf, const u8 *ep_addrs)
{
const struct usb_host_endpoint *ep;
for (; *ep_addrs; ++ep_addrs) {
ep = usb_find_endpoint(intf, *ep_addrs);
if (!ep || !usb_endpoint_xfer_bulk(&ep->desc))
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(usb_check_bulk_endpoints);
/**
* usb_check_int_endpoints - Check whether an interface's current altsetting
* contains a set of interrupt endpoints with the given addresses.
* @intf: the interface whose current altsetting should be searched
* @ep_addrs: 0-terminated array of the endpoint addresses (number and
* direction) to look for
*
* Search for endpoints with the specified addresses and check their types.
*
* Return: %true if all the endpoints are found and are interrupt,
* %false otherwise.
*/
bool usb_check_int_endpoints(
const struct usb_interface *intf, const u8 *ep_addrs)
{
const struct usb_host_endpoint *ep;
for (; *ep_addrs; ++ep_addrs) {
ep = usb_find_endpoint(intf, *ep_addrs);
if (!ep || !usb_endpoint_xfer_int(&ep->desc))
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(usb_check_int_endpoints);
/**
* usb_find_alt_setting() - Given a configuration, find the alternate setting
* for the given interface.

View file

@ -17,6 +17,8 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/string_helpers.h>
#include <linux/usb/composite.h>
#include "u_ether.h"
@ -102,41 +104,6 @@ static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
/*-------------------------------------------------------------------------*/
/* REVISIT there must be a better way than having two sets
* of debug calls ...
*/
#undef DBG
#undef VDBG
#undef ERROR
#undef INFO
#define xprintk(d, level, fmt, args...) \
printk(level "%s: " fmt , (d)->net->name , ## args)
#ifdef DEBUG
#undef DEBUG
#define DBG(dev, fmt, args...) \
xprintk(dev , KERN_DEBUG , fmt , ## args)
#else
#define DBG(dev, fmt, args...) \
do { } while (0)
#endif /* DEBUG */
#ifdef VERBOSE_DEBUG
#define VDBG DBG
#else
#define VDBG(dev, fmt, args...) \
do { } while (0)
#endif /* DEBUG */
#define ERROR(dev, fmt, args...) \
xprintk(dev , KERN_ERR , fmt , ## args)
#define INFO(dev, fmt, args...) \
xprintk(dev , KERN_INFO , fmt , ## args)
/*-------------------------------------------------------------------------*/
/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
@ -974,6 +941,8 @@ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
dev = netdev_priv(net);
snprintf(host_addr, len, "%pm", dev->host_mac);
string_upper(host_addr, host_addr);
return strlen(host_addr);
}
EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);

View file

@ -3016,6 +3016,20 @@ static int sisusb_probe(struct usb_interface *intf,
struct usb_device *dev = interface_to_usbdev(intf);
struct sisusb_usb_data *sisusb;
int retval = 0, i;
static const u8 ep_addresses[] = {
SISUSB_EP_GFX_IN | USB_DIR_IN,
SISUSB_EP_GFX_OUT | USB_DIR_OUT,
SISUSB_EP_GFX_BULK_OUT | USB_DIR_OUT,
SISUSB_EP_GFX_LBULK_OUT | USB_DIR_OUT,
SISUSB_EP_BRIDGE_IN | USB_DIR_IN,
SISUSB_EP_BRIDGE_OUT | USB_DIR_OUT,
0};
/* Are the expected endpoints present? */
if (!usb_check_bulk_endpoints(intf, ep_addresses)) {
dev_err(&intf->dev, "Invalid USB2VGA device\n");
return -EINVAL;
}
dev_info(&dev->dev, "USB2VGA dongle found at address %d\n",
dev->devnum);

View file

@ -392,22 +392,25 @@ static DEF_SCSI_QCMD(queuecommand)
***********************************************************************/
/* Command timeout and abort */
static int command_abort(struct scsi_cmnd *srb)
static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match)
{
struct us_data *us = host_to_us(srb->device->host);
usb_stor_dbg(us, "%s called\n", __func__);
/*
* us->srb together with the TIMED_OUT, RESETTING, and ABORTING
* bits are protected by the host lock.
*/
scsi_lock(us_to_host(us));
/* Is this command still active? */
if (us->srb != srb) {
/* is there any active pending command to abort ? */
if (!us->srb) {
scsi_unlock(us_to_host(us));
usb_stor_dbg(us, "-- nothing to abort\n");
return SUCCESS;
}
/* Does the command match the passed srb if any ? */
if (srb_match && us->srb != srb_match) {
scsi_unlock(us_to_host(us));
usb_stor_dbg(us, "-- pending command mismatch\n");
return FAILED;
}
@ -430,6 +433,14 @@ static int command_abort(struct scsi_cmnd *srb)
return SUCCESS;
}
static int command_abort(struct scsi_cmnd *srb)
{
struct us_data *us = host_to_us(srb->device->host);
usb_stor_dbg(us, "%s called\n", __func__);
return command_abort_matching(us, srb);
}
/*
* This invokes the transport reset mechanism to reset the state of the
* device
@ -441,6 +452,9 @@ static int device_reset(struct scsi_cmnd *srb)
usb_stor_dbg(us, "%s called\n", __func__);
/* abort any pending command before reset */
command_abort_matching(us, NULL);
/* lock the device pointers and do the reset */
mutex_lock(&(us->dev_mutex));
result = us->transport_reset(us);

View file

@ -501,6 +501,10 @@ static ssize_t pin_assignment_show(struct device *dev,
mutex_unlock(&dp->lock);
/* get_current_pin_assignments can return 0 when no matching pin assignments are found */
if (len == 0)
len++;
buf[len - 1] = '\n';
return len;
}

View file

@ -1006,7 +1006,21 @@ static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
pmdata->svids[pmdata->nsvids++] = svid;
tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
}
return true;
/*
* PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
* 6-43), and can be returned maximum 6 VDOs per response (see Figure
* 6-19). If the Respondersupports 12 or more SVID then the Discover
* SVIDs Command Shall be executed multiple times until a Discover
* SVIDs VDO is returned ending either with a SVID value of 0x0000 in
* the last part of the last VDO or with a VDO containing two SVIDs
* with values of 0x0000.
*
* However, some odd dockers support SVIDs less than 12 but without
* 0x0000 in the last VDO, so we need to break the Discover SVIDs
* request and return false here.
*/
return cnt == 7;
abort:
tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
return false;

View file

@ -330,8 +330,10 @@ static struct sock_mapping *pvcalls_new_active_socket(
void *page;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL)
if (map == NULL) {
sock_release(sock);
return NULL;
}
map->fedata = fedata;
map->sock = sock;
@ -423,10 +425,8 @@ static int pvcalls_back_connect(struct xenbus_device *dev,
req->u.connect.ref,
req->u.connect.evtchn,
sock);
if (!map) {
if (!map)
ret = -EFAULT;
sock_release(sock);
}
out:
rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
@ -567,7 +567,6 @@ static void __pvcalls_back_accept(struct work_struct *work)
sock);
if (!map) {
ret = -EFAULT;
sock_release(sock);
goto out_error;
}

View file

@ -4348,7 +4348,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
*/
inode = igrab(&btrfs_inode->vfs_inode);
if (inode) {
unsigned int nofs_flag;
nofs_flag = memalloc_nofs_save();
invalidate_inode_pages2(inode->i_mapping);
memalloc_nofs_restore(nofs_flag);
iput(inode);
}
spin_lock(&root->delalloc_lock);
@ -4466,7 +4470,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
inode = cache->io_ctl.inode;
if (inode) {
unsigned int nofs_flag;
nofs_flag = memalloc_nofs_save();
invalidate_inode_pages2(inode->i_mapping);
memalloc_nofs_restore(nofs_flag);
BTRFS_I(inode)->generation = 0;
cache->io_ctl.inode = NULL;
iput(inode);

View file

@ -783,15 +783,16 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
}
spin_lock(&ctl->tree_lock);
ret = link_free_space(ctl, e);
ctl->total_bitmaps++;
ctl->op->recalc_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
if (ret) {
spin_unlock(&ctl->tree_lock);
btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
}
ctl->total_bitmaps++;
ctl->op->recalc_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
list_add_tail(&e->list, &bitmaps);
}

View file

@ -6620,7 +6620,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@ -6684,7 +6684,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@ -6837,7 +6837,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_fail;
@ -9819,7 +9819,7 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
u64 objectid;
u64 index;
ret = btrfs_find_free_ino(root, &objectid);
ret = btrfs_find_free_objectid(root, &objectid);
if (ret)
return ret;
@ -10316,7 +10316,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@ -10600,7 +10600,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_find_free_ino(root, &objectid);
ret = btrfs_find_free_objectid(root, &objectid);
if (ret)
goto out;

View file

@ -976,6 +976,19 @@ skip_inode:
continue;
adjust_snap_realm_parent(mdsc, child, realm->ino);
}
} else {
/*
* In the non-split case both 'num_split_inos' and
* 'num_split_realms' should be 0, making this a no-op.
* However the MDS happens to populate 'split_realms' list
* in one of the UPDATE op cases by mistake.
*
* Skip both lists just in case to ensure that 'p' is
* positioned at the start of realm info, as expected by
* ceph_update_snap_trace().
*/
p += sizeof(u64) * num_split_inos;
p += sizeof(u64) * num_split_realms;
}
/*

View file

@ -177,6 +177,7 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
#define EXT2_MIN_BLOCK_SIZE 1024
#define EXT2_MAX_BLOCK_SIZE 4096
#define EXT2_MIN_BLOCK_LOG_SIZE 10
#define EXT2_MAX_BLOCK_LOG_SIZE 16
#define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize)
#define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
#define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)

View file

@ -978,6 +978,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
if (le32_to_cpu(es->s_log_block_size) >
(EXT2_MAX_BLOCK_LOG_SIZE - BLOCK_SIZE_BITS)) {
ext2_msg(sb, KERN_ERR,
"Invalid log block size: %u",
le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {

View file

@ -3089,6 +3089,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
struct ext4_allocation_request *ar)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_super_block *es = sbi->s_es;
int bsbits, max;
ext4_lblk_t end;
loff_t size, start_off;
@ -3269,18 +3270,21 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
/* define goal start in order to merge */
if (ar->pright && (ar->lright == (start + size))) {
if (ar->pright && (ar->lright == (start + size)) &&
ar->pright >= size &&
ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
/* merge to the right */
ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
&ac->ac_f_ex.fe_group,
&ac->ac_f_ex.fe_start);
&ac->ac_g_ex.fe_group,
&ac->ac_g_ex.fe_start);
ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
}
if (ar->pleft && (ar->lleft + 1 == start)) {
if (ar->pleft && (ar->lleft + 1 == start) &&
ar->pleft + 1 < ext4_blocks_count(es)) {
/* merge to the left */
ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
&ac->ac_f_ex.fe_group,
&ac->ac_f_ex.fe_start);
&ac->ac_g_ex.fe_group,
&ac->ac_g_ex.fe_start);
ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
}
@ -3372,6 +3376,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
BUG_ON(start < pa->pa_pstart);
BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
BUG_ON(pa->pa_free < len);
BUG_ON(ac->ac_b_ex.fe_len <= 0);
pa->pa_free -= len;
mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
@ -3676,10 +3681,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
return -ENOMEM;
if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
int winl;
int wins;
int win;
int offs;
int new_bex_start;
int new_bex_end;
/* we can't allocate as much as normalizer wants.
* so, found space must get proper lstart
@ -3687,26 +3690,40 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
/* we're limited by original request in that
* logical block must be covered any way
* winl is window we can move our chunk within */
winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
/*
* Use the below logic for adjusting best extent as it keeps
* fragmentation in check while ensuring logical range of best
* extent doesn't overflow out of goal extent:
*
* 1. Check if best ex can be kept at end of goal and still
* cover original start
* 2. Else, check if best ex can be kept at start of goal and
* still cover original start
* 3. Else, keep the best ex at start of original request.
*/
new_bex_end = ac->ac_g_ex.fe_logical +
EXT4_C2B(sbi, ac->ac_g_ex.fe_len);
new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
if (ac->ac_o_ex.fe_logical >= new_bex_start)
goto adjust_bex;
/* also, we should cover whole original request */
wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
new_bex_start = ac->ac_g_ex.fe_logical;
new_bex_end =
new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
if (ac->ac_o_ex.fe_logical < new_bex_end)
goto adjust_bex;
/* the smallest one defines real window */
win = min(winl, wins);
new_bex_start = ac->ac_o_ex.fe_logical;
new_bex_end =
new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
offs = ac->ac_o_ex.fe_logical %
EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
if (offs && offs < win)
win = offs;
adjust_bex:
ac->ac_b_ex.fe_logical = new_bex_start;
ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
EXT4_NUM_B2C(sbi, win);
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical +
EXT4_C2B(sbi, ac->ac_g_ex.fe_len)));
}
/* preallocation can change ac_b_ex, thus we store actually

View file

@ -301,8 +301,15 @@ static int __f2fs_write_meta_page(struct page *page,
trace_f2fs_writepage(page, META);
if (unlikely(f2fs_cp_error(sbi)))
if (unlikely(f2fs_cp_error(sbi))) {
if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
ClearPageUptodate(page);
dec_page_count(sbi, F2FS_DIRTY_META);
unlock_page(page);
return 0;
}
goto redirty_out;
}
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
@ -1269,7 +1276,8 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
if (!get_pages(sbi, type))
break;
if (unlikely(f2fs_cp_error(sbi)))
if (unlikely(f2fs_cp_error(sbi) &&
!is_sbi_flag_set(sbi, SBI_IS_CLOSE)))
break;
if (type == F2FS_DIRTY_META)

View file

@ -2792,7 +2792,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
* don't drop any dirty dentry pages for keeping lastest
* directory structure.
*/
if (S_ISDIR(inode->i_mode))
if (S_ISDIR(inode->i_mode) &&
!is_sbi_flag_set(sbi, SBI_IS_CLOSE))
goto redirty_out;
goto out;
}

View file

@ -339,6 +339,7 @@ static int inode_go_demote_ok(const struct gfs2_glock *gl)
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
const struct gfs2_dinode *str = buf;
struct timespec64 atime;
u16 height, depth;
@ -378,7 +379,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
gfs2_set_inode_flags(&ip->i_inode);
height = be16_to_cpu(str->di_height);
if (unlikely(height > GFS2_MAX_META_HEIGHT))
if (unlikely(height > sdp->sd_max_height))
goto corrupt;
ip->i_height = (u8)height;

View file

@ -476,7 +476,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
if (type == HFSPLUS_FOLDER) {
struct hfsplus_cat_folder *folder = &entry.folder;
WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder));
if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) {
pr_err("bad catalog folder entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_folder));
hfsplus_get_perms(inode, &folder->permissions, 1);
@ -496,7 +500,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
} else if (type == HFSPLUS_FILE) {
struct hfsplus_cat_file *file = &entry.file;
WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file));
if (fd->entrylength < sizeof(struct hfsplus_cat_file)) {
pr_err("bad catalog file entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_file));
@ -527,6 +535,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
pr_err("bad catalog entry used to create inode\n");
res = -EIO;
}
out:
return res;
}
@ -535,6 +544,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
struct inode *main_inode = inode;
struct hfs_find_data fd;
hfsplus_cat_entry entry;
int res = 0;
if (HFSPLUS_IS_RSRC(inode))
main_inode = HFSPLUS_I(inode)->rsrc_inode;
@ -553,7 +563,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
if (S_ISDIR(main_inode->i_mode)) {
struct hfsplus_cat_folder *folder = &entry.folder;
WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder));
if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
pr_err("bad catalog folder entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
/* simple node checks? */
@ -578,7 +592,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
} else {
struct hfsplus_cat_file *file = &entry.file;
WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file));
if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
pr_err("bad catalog file entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_inode_write_fork(inode, &file->data_fork);
@ -599,5 +617,5 @@ int hfsplus_cat_write_inode(struct inode *inode)
set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
out:
hfs_find_exit(&fd);
return 0;
return res;
}

View file

@ -930,6 +930,7 @@ void nilfs_evict_inode(struct inode *inode)
struct nilfs_transaction_info ti;
struct super_block *sb = inode->i_sb;
struct nilfs_inode_info *ii = NILFS_I(inode);
struct the_nilfs *nilfs;
int ret;
if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
@ -942,6 +943,23 @@ void nilfs_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
nilfs = sb->s_fs_info;
if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
/*
* If this inode is about to be disposed after the file system
* has been degraded to read-only due to file system corruption
* or after the writer has been detached, do not make any
* changes that cause writes, just clear it.
* Do this check after read-locking ns_segctor_sem by
* nilfs_transaction_begin() in order to avoid a race with
* the writer detach operation.
*/
clear_inode(inode);
nilfs_clear_inode(inode);
nilfs_transaction_abort(sb);
return;
}
/* TODO: some of the following operations may fail. */
nilfs_truncate_bmap(ii, 0);
nilfs_mark_inode_dirty(inode);

View file

@ -114,6 +114,7 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
if (sizeof(buf) == sizeof(*st))
memcpy(&buf, st, sizeof(*st));
else {
memset(&buf, 0, sizeof(buf));
if (sizeof buf.f_blocks == 4) {
if ((st->f_blocks | st->f_bfree | st->f_bavail |
st->f_bsize | st->f_frsize) &
@ -142,7 +143,6 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
buf.f_namelen = st->f_namelen;
buf.f_frsize = st->f_frsize;
buf.f_flags = st->f_flags;
memset(buf.f_spare, 0, sizeof(buf.f_spare));
}
if (copy_to_user(p, &buf, sizeof(buf)))
return -EFAULT;
@ -155,6 +155,7 @@ static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
if (sizeof(buf) == sizeof(*st))
memcpy(&buf, st, sizeof(*st));
else {
memset(&buf, 0, sizeof(buf));
buf.f_type = st->f_type;
buf.f_bsize = st->f_bsize;
buf.f_blocks = st->f_blocks;
@ -166,7 +167,6 @@ static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
buf.f_namelen = st->f_namelen;
buf.f_frsize = st->f_frsize;
buf.f_flags = st->f_flags;
memset(buf.f_spare, 0, sizeof(buf.f_spare));
}
if (copy_to_user(p, &buf, sizeof(buf)))
return -EFAULT;

View file

@ -113,7 +113,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,

View file

@ -51,6 +51,9 @@ extern struct device platform_bus;
extern void arch_setup_pdev_archdata(struct platform_device *);
extern struct resource *platform_get_resource(struct platform_device *,
unsigned int, unsigned int);
extern void __iomem *
devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index);
extern int platform_get_irq(struct platform_device *, unsigned int);
extern int platform_irq_count(struct platform_device *);
extern struct resource *platform_get_resource_byname(struct platform_device *,

View file

@ -63,6 +63,7 @@ struct bq27xxx_device_info {
struct bq27xxx_access_methods bus;
struct bq27xxx_reg_cache cache;
int charge_design_full;
bool removed;
unsigned long last_update;
struct delayed_work work;
struct power_supply *bat;

View file

@ -23,7 +23,7 @@ static inline void *task_stack_page(const struct task_struct *task)
#define setup_thread_stack(new,old) do { } while(0)
static inline unsigned long *end_of_stack(const struct task_struct *task)
static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
{
#ifdef CONFIG_STACK_GROWSUP
return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;

View file

@ -2,6 +2,7 @@
#ifndef _LINUX_STRING_HELPERS_H_
#define _LINUX_STRING_HELPERS_H_
#include <linux/ctype.h>
#include <linux/types.h>
struct file;
@ -72,6 +73,20 @@ static inline int string_escape_str_any_np(const char *src, char *dst,
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
}
static inline void string_upper(char *dst, const char *src)
{
do {
*dst++ = toupper(*src);
} while (*src++);
}
static inline void string_lower(char *dst, const char *src)
{
do {
*dst++ = tolower(*src);
} while (*src++);
}
char *kstrdup_quotable(const char *src, gfp_t gfp);
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
char *kstrdup_quotable_file(struct file *file, gfp_t gfp);

View file

@ -285,6 +285,11 @@ void usb_put_intf(struct usb_interface *intf);
#define USB_MAXINTERFACES 32
#define USB_MAXIADS (USB_MAXINTERFACES/2)
bool usb_check_bulk_endpoints(
const struct usb_interface *intf, const u8 *ep_addrs);
bool usb_check_int_endpoints(
const struct usb_interface *intf, const u8 *ep_addrs);
/*
* USB Resume Timer: Every Host controller driver should drive the resume
* signalling on the bus for the amount of time defined by this macro.

View file

@ -57,7 +57,7 @@ struct ip6_tnl {
/* These fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */
atomic_t o_seqno; /* The last output seqno */
int hlen; /* tun_hlen + encap_hlen */
int tun_hlen; /* Precalculated header length */
int encap_hlen; /* Encap header length (FOU,GUE) */

View file

@ -113,7 +113,7 @@ struct ip_tunnel {
/* These four fields used only by GRE */
u32 i_seqno; /* The last seen seqno */
u32 o_seqno; /* The last output seqno */
atomic_t o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
/* These four fields used only by ERSPAN */

View file

@ -191,14 +191,13 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
}
int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
unsigned int nft_parse_register(const struct nlattr *attr);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
int nft_validate_register_load(enum nft_registers reg, unsigned int len);
int nft_validate_register_store(const struct nft_ctx *ctx,
enum nft_registers reg,
const struct nft_data *data,
enum nft_data_types type, unsigned int len);
int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len);
int nft_parse_register_store(const struct nft_ctx *ctx,
const struct nlattr *attr, u8 *dreg,
const struct nft_data *data,
enum nft_data_types type, unsigned int len);
/**
* struct nft_userdata - user defined data associated with an object
@ -226,6 +225,10 @@ struct nft_set_elem {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} key;
union {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} data;
void *priv;
};

View file

@ -21,13 +21,14 @@ void nf_tables_core_module_exit(void);
struct nft_cmp_fast_expr {
u32 data;
enum nft_registers sreg:8;
u32 mask;
u8 sreg;
u8 len;
};
struct nft_immediate_expr {
struct nft_data data;
enum nft_registers dreg:8;
u8 dreg;
u8 dlen;
};
@ -47,14 +48,14 @@ struct nft_payload {
enum nft_payload_bases base:8;
u8 offset;
u8 len;
enum nft_registers dreg:8;
u8 dreg;
};
struct nft_payload_set {
enum nft_payload_bases base:8;
u8 offset;
u8 len;
enum nft_registers sreg:8;
u8 sreg;
u8 csum_type;
u8 csum_offset;
u8 csum_flags;

View file

@ -3,7 +3,7 @@
#define _NFT_FIB_H_
struct nft_fib {
enum nft_registers dreg:8;
u8 dreg;
u8 result;
u32 flags;
};

View file

@ -4,8 +4,8 @@
struct nft_masq {
u32 flags;
enum nft_registers sreg_proto_min:8;
enum nft_registers sreg_proto_max:8;
u8 sreg_proto_min;
u8 sreg_proto_max;
};
extern const struct nla_policy nft_masq_policy[];

View file

@ -3,8 +3,8 @@
#define _NFT_REDIR_H_
struct nft_redir {
enum nft_registers sreg_proto_min:8;
enum nft_registers sreg_proto_max:8;
u8 sreg_proto_min;
u8 sreg_proto_max;
u16 flags;
};

View file

@ -1148,7 +1148,7 @@ struct proto {
unsigned int inuse_idx;
#endif
bool (*stream_memory_free)(const struct sock *sk);
bool (*stream_memory_free)(const struct sock *sk, int wake);
bool (*stream_memory_read)(const struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
@ -1230,19 +1230,29 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
static inline bool sk_stream_memory_free(const struct sock *sk)
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (sk->sk_wmem_queued >= sk->sk_sndbuf)
return false;
return sk->sk_prot->stream_memory_free ?
sk->sk_prot->stream_memory_free(sk) : true;
sk->sk_prot->stream_memory_free(sk, wake) : true;
}
static inline bool sk_stream_memory_free(const struct sock *sk)
{
return __sk_stream_memory_free(sk, 0);
}
static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
{
return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
__sk_stream_memory_free(sk, wake);
}
static inline bool sk_stream_is_writeable(const struct sock *sk)
{
return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
sk_stream_memory_free(sk);
return __sk_stream_is_writeable(sk, 0);
}
static inline int sk_under_cgroup_hierarchy(struct sock *sk,
@ -2424,7 +2434,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
__sock_recv_ts_and_drops(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP))
sock_write_timestamp(sk, 0);
}

View file

@ -389,6 +389,7 @@ void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
void tcp_metrics_init(void);
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void __tcp_close(struct sock *sk, long timeout);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
@ -1882,12 +1883,16 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
}
static inline bool tcp_stream_memory_free(const struct sock *sk)
/* @wake is one when sk_stream_write_space() calls us.
* This sends EPOLLOUT only if notsent_bytes is half the limit.
* This mimics the strategy used in sock_def_write_space().
*/
static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
{
const struct tcp_sock *tp = tcp_sk(sk);
u32 notsent_bytes = READ_ONCE(tp->write_seq) - tp->snd_nxt;
return notsent_bytes < tcp_notsent_lowat(tp);
return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
}
#ifdef CONFIG_PROC_FS

View file

@ -64,7 +64,8 @@ enum skl_ch_cfg {
SKL_CH_CFG_DUAL_MONO = 9,
SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
SKL_CH_CFG_4_CHANNEL = 12,
SKL_CH_CFG_7_1 = 12,
SKL_CH_CFG_4_CHANNEL = SKL_CH_CFG_7_1,
SKL_CH_CFG_INVALID
};

View file

@ -5936,7 +5936,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
insn->dst_reg,
shift);
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
(1ULL << size * 8) - 1);
}
}

View file

@ -235,7 +235,8 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
for (index = 0; index < rmap->used; index++) {
glue = rmap->obj[index];
irq_set_affinity_notifier(glue->notify.irq, NULL);
if (glue)
irq_set_affinity_notifier(glue->notify.irq, NULL);
}
cpu_rmap_put(rmap);
@ -271,6 +272,7 @@ static void irq_cpu_rmap_release(struct kref *ref)
container_of(ref, struct irq_glue, notify.kref);
cpu_rmap_put(glue->rmap);
glue->rmap->obj[glue->index] = NULL;
kfree(glue);
}
@ -300,6 +302,7 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
rc = irq_set_affinity_notifier(irq, &glue->notify);
if (rc) {
cpu_rmap_put(glue->rmap);
rmap->obj[glue->index] = NULL;
kfree(glue);
}
return rc;

View file

@ -115,8 +115,8 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
* NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
* OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
*/
if (veth->h_vlan_proto != vlan->vlan_proto ||
vlan->flags & VLAN_FLAG_REORDER_HDR) {
if (vlan->flags & VLAN_FLAG_REORDER_HDR ||
veth->h_vlan_proto != vlan->vlan_proto) {
u16 vlan_tci;
vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);

View file

@ -4392,7 +4392,6 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
chan = l2cap_get_chan_by_scid(conn, scid);
if (!chan) {
mutex_unlock(&conn->chan_lock);
return 0;
}

View file

@ -837,18 +837,21 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
{
struct sock *sk = sock->sk;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
/* exceptional events? */
if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
if (READ_ONCE(sk->sk_err) ||
!skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
shutdown = READ_ONCE(sk->sk_shutdown);
if (shutdown & RCV_SHUTDOWN)
mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
if (shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
/* readable? */
@ -857,10 +860,12 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
/* Connection-based need to check for termination and startup */
if (connection_based(sk)) {
if (sk->sk_state == TCP_CLOSE)
int state = READ_ONCE(sk->sk_state);
if (state == TCP_CLOSE)
mask |= EPOLLHUP;
/* connection hasn't started yet? */
if (sk->sk_state == TCP_SYN_SENT)
if (state == TCP_SYN_SENT)
return mask;
}

View file

@ -2303,6 +2303,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
bool active = false;
unsigned int nr_ids;
WARN_ON_ONCE(index >= dev->num_tx_queues);
if (dev->num_tc) {
/* Do not allow XPS on subordinate device directly */
num_tc = dev->num_tc;

Some files were not shown because too many files have changed in this diff Show more