This is the 4.19.296 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmUlqccACgkQONu9yGCS
 aT4gqQ/+MgTjBf9wQ4KfZI5RZaQJhRVuubL58TmHD0HMkGhaQhPgsswQ7Sil5nQ2
 889+PhxTJ6p9tNFYdYz/urv0qM197vWFOpWvkKqBlLLkHEIU14e7OiLdZuydYPVV
 iyFXNrEr5xVerYo7tTsmuOYNzgArwVxmEa/GNlpy/AJl7uP/wxt5g8sbChziM1k2
 erSmRrBp0tCG2xVjLWx1LEIWqmB11rTuP0Kl5j86THnS5czzCmdQyvWypMDB+M1o
 UX6SF1bFMdvh59ultJQN+SYfq+HSo66xxKNDCRRiqvBi2BvBOYKwnDZYwLuf9H8/
 ELOQ//RbWv42wrhosoj637748CwWlgJQCNYR1RiV09CA/bHqlKDwfZM7sUbzeebM
 5/Z+ODM/WtJ1/jdbvu1KkkurVLFaKGOmDKefiosZt+4KMXPbyy6jg6J6/moLZqJ8
 hbym4x8n6KWYMBrvxQt9Ukyo/SBkcoFAJfCdks1hqtkEL7L+VAxaC1mfUqcNzhlY
 RXopvFhEoMlBQ2pOQzK1lDy2m3rZS+md5UUO8G+DZ0keerK7oKVLKVstBTBzx++k
 d2SZ7ijRHqqvSfCYbtNrzgBdc06Ou9zT5vOK9KuWR5CQxIwW3NTu23umg7AmMcdT
 WkdxqcpO1YZCCbH9oK40ynbP4Ap0fYzZ0SGIoNuclknGX+NJ1E0=
 =LBDu
 -----END PGP SIGNATURE-----

Merge 4.19.296 into android-4.19-stable

Changes in 4.19.296
	NFS/pNFS: Report EINVAL errors from connect() to the server
	ata: ahci: Drop pointless VPRINTK() calls and convert the remaining ones
	ata: libahci: clear pending interrupt status
	netfilter: nf_tables: disallow element removal on anonymous sets
	selftests/tls: Add {} to avoid static checker warning
	selftests: tls: swap the TX and RX sockets in some tests
	ipv4: fix null-deref in ipv4_link_failure
	powerpc/perf/hv-24x7: Update domain value check
	net: hns3: add 5ms delay before clear firmware reset irq source
	net: add atomic_long_t to net_device_stats fields
	net: bridge: use DEV_STATS_INC()
	team: fix null-ptr-deref when team device type is changed
	gpio: tb10x: Fix an error handling path in tb10x_gpio_probe()
	i2c: mux: demux-pinctrl: check the return value of devm_kstrdup()
	Input: i8042 - add quirk for TUXEDO Gemini 17 Gen1/Clevo PD70PN
	scsi: qla2xxx: Add protection mask module parameters
	scsi: qla2xxx: Remove unsupported ql2xenabledif option
	scsi: megaraid_sas: Load balance completions across all MSI-X
	scsi: megaraid_sas: Fix deadlock on firmware crashdump
	ext4: remove the 'group' parameter of ext4_trim_extent
	ext4: add new helper interface ext4_try_to_trim_range()
	ext4: scope ret locally in ext4_try_to_trim_range()
	ext4: change s_last_trim_minblks type to unsigned long
	ext4: mark group as trimmed only if it was fully scanned
	ext4: replace the traditional ternary conditional operator with with max()/min()
	ext4: move setting of trimmed bit into ext4_try_to_trim_range()
	ext4: do not let fstrim block system suspend
	MIPS: Alchemy: only build mmc support helpers if au1xmmc is enabled
	clk: tegra: fix error return case for recalc_rate
	ARM: dts: ti: omap: motorola-mapphone: Fix abe_clkctrl warning on boot
	gpio: pmic-eic-sprd: Add can_sleep flag for PMIC EIC chip
	parisc: sba: Fix compile warning wrt list of SBA devices
	parisc: iosapic.c: Fix sparse warnings
	parisc: drivers: Fix sparse warning
	parisc: irq: Make irq_stack_union static to avoid sparse warning
	selftests/ftrace: Correctly enable event in instance-event.tc
	ring-buffer: Avoid softlockup in ring_buffer_resize()
	ata: libata-eh: do not clear ATA_PFLAG_EH_PENDING in ata_eh_reset()
	bpf: Clarify error expectations from bpf_clone_redirect
	fbdev/sh7760fb: Depend on FB=y
	nvme-pci: do not set the NUMA node of device if it has none
	watchdog: iTCO_wdt: No need to stop the timer in probe
	watchdog: iTCO_wdt: Set NO_REBOOT if the watchdog is not already running
	net: Fix unwanted sign extension in netdev_stats_to_stats64()
	scsi: megaraid_sas: Enable msix_load_balance for Invader and later controllers
	Smack:- Use overlay inode label in smack_inode_copy_up()
	smack: Retrieve transmuting information in smack_inode_getsecurity()
	smack: Record transmuting in smk_transmuted
	serial: 8250_port: Check IRQ data before use
	nilfs2: fix potential use after free in nilfs_gccache_submit_read_data()
	ALSA: hda: Disable power save for solving pop issue on Lenovo ThinkCentre M70q
	ata: libata-scsi: ignore reserved bits for REPORT SUPPORTED OPERATION CODES
	i2c: i801: unregister tco_pdev in i801_probe() error path
	btrfs: properly report 0 avail for very full file systems
	net: thunderbolt: Fix TCPv6 GSO checksum calculation
	ata: libata-core: Fix ata_port_request_pm() locking
	ata: libata-core: Fix port and device removal
	ata: libata-core: Do not register PM operations for SAS ports
	ata: libata-sata: increase PMP SRST timeout to 10s
	fs: binfmt_elf_efpic: fix personality for ELF-FDPIC
	ext4: fix rec_len verify error
	ata: libata: disallow dev-initiated LPM transitions to unsupported states
	Revert "drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions"
	media: dvb: symbol fixup for dvb_attach() - again
	Revert "PCI: qcom: Disable write access to read only registers for IP v2.3.3"
	scsi: zfcp: Fix a double put in zfcp_port_enqueue()
	qed/red_ll2: Fix undefined behavior bug in struct qed_ll2_info
	wifi: mwifiex: Fix tlv_buf_left calculation
	net: replace calls to sock->ops->connect() with kernel_connect()
	ubi: Refuse attaching if mtd's erasesize is 0
	wifi: mwifiex: Fix oob check condition in mwifiex_process_rx_packet
	drivers/net: process the result of hdlc_open() and add call of hdlc_close() in uhdlc_close()
	regmap: rbtree: Fix wrong register marked as in-cache when creating new node
	scsi: target: core: Fix deadlock due to recursive locking
	modpost: add missing else to the "of" check
	ipv4, ipv6: Fix handling of transhdrlen in __ip{,6}_append_data()
	net: usb: smsc75xx: Fix uninit-value access in __smsc75xx_read_reg
	net: stmmac: dwmac-stm32: fix resume on STM32 MCU
	tcp: fix quick-ack counting to count actual ACKs of new data
	tcp: fix delayed ACKs for MSS boundary condition
	sctp: update transport state when processing a dupcook packet
	sctp: update hb timer immediately after users change hb_interval
	cpupower: add Makefile dependencies for install targets
	IB/mlx4: Fix the size of a buffer in add_port_entries()
	gpio: aspeed: fix the GPIO number passed to pinctrl_gpio_set_config()
	gpio: pxa: disable pinctrl calls for MMP_GPIO
	RDMA/cma: Fix truncation compilation warning in make_cma_ports
	RDMA/mlx5: Fix NULL string error
	parisc: Restore __ldcw_align for PA-RISC 2.0 processors
	dccp: fix dccp_v4_err()/dccp_v6_err() again
	Revert "rtnetlink: Reject negative ifindexes in RTM_NEWLINK"
	rtnetlink: Reject negative ifindexes in RTM_NEWLINK
	xen/events: replace evtchn_rwlock with RCU
	Linux 4.19.296

Change-Id: I4f76959ead91691fe9a242cb5b158fc8dc67bf39
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-10-11 19:17:39 +00:00
commit 7f2e810705
109 changed files with 749 additions and 492 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 295
SUBLEVEL = 296
EXTRAVERSION =
NAME = "People's Front"

View file

@ -647,12 +647,12 @@
/* Configure pwm clock source for timers 8 & 9 */
&timer8 {
assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
assigned-clock-parents = <&sys_clkin_ck>;
assigned-clock-parents = <&sys_32k_ck>;
};
&timer9 {
assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
assigned-clock-parents = <&sys_clkin_ck>;
assigned-clock-parents = <&sys_32k_ck>;
};
/*

View file

@ -173,6 +173,7 @@ static struct platform_device db1x00_audio_dev = {
/******************************************************************************/
#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
{
mmc_detect_change(ptr, msecs_to_jiffies(500));
@ -380,6 +381,7 @@ static struct platform_device db1100_mmc1_dev = {
.num_resources = ARRAY_SIZE(au1100_mmc1_res),
.resource = au1100_mmc1_res,
};
#endif /* CONFIG_MMC_AU1X */
/******************************************************************************/
@ -497,9 +499,11 @@ static struct platform_device *db1000_devs[] = {
static struct platform_device *db1100_devs[] = {
&au1100_lcd_device,
#ifdef CONFIG_MMC_AU1X
&db1100_mmc0_dev,
&db1100_mmc1_dev,
&db1000_irda_dev,
#endif
};
int __init db1000_dev_setup(void)

View file

@ -341,6 +341,7 @@ static struct platform_device db1200_ide_dev = {
/**********************************************************************/
#ifdef CONFIG_MMC_AU1X
/* SD carddetects: they're supposed to be edge-triggered, but ack
* doesn't seem to work (CPLD Rev 2). Instead, the screaming one
* is disabled and its counterpart enabled. The 200ms timeout is
@ -601,6 +602,7 @@ static struct platform_device pb1200_mmc1_dev = {
.num_resources = ARRAY_SIZE(au1200_mmc1_res),
.resource = au1200_mmc1_res,
};
#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
@ -768,7 +770,9 @@ static struct platform_device db1200_audiodma_dev = {
static struct platform_device *db1200_devs[] __initdata = {
NULL, /* PSC0, selected by S6.8 */
&db1200_ide_dev,
#ifdef CONFIG_MMC_AU1X
&db1200_mmc0_dev,
#endif
&au1200_lcd_dev,
&db1200_eth_dev,
&db1200_nand_dev,
@ -779,7 +783,9 @@ static struct platform_device *db1200_devs[] __initdata = {
};
static struct platform_device *pb1200_devs[] __initdata = {
#ifdef CONFIG_MMC_AU1X
&pb1200_mmc1_dev,
#endif
};
/* Some peripheral base addresses differ on the PB1200 */

View file

@ -448,6 +448,7 @@ static struct platform_device db1300_ide_dev = {
/**********************************************************************/
#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
{
disable_irq_nosync(irq);
@ -626,6 +627,7 @@ static struct platform_device db1300_sd0_dev = {
.resource = au1300_sd0_res,
.num_resources = ARRAY_SIZE(au1300_sd0_res),
};
#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
@ -756,8 +758,10 @@ static struct platform_device *db1300_dev[] __initdata = {
&db1300_5waysw_dev,
&db1300_nand_dev,
&db1300_ide_dev,
#ifdef CONFIG_MMC_AU1X
&db1300_sd0_dev,
&db1300_sd1_dev,
#endif
&db1300_lcd_dev,
&db1300_ac97_dev,
&db1300_i2s_dev,

View file

@ -2,14 +2,28 @@
#ifndef __PARISC_LDCW_H
#define __PARISC_LDCW_H
#ifndef CONFIG_PA20
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
and GCC only guarantees 8-byte alignment for stack locals, we can't
be assured of 16-byte alignment for atomic lock data even if we
specify "__attribute ((aligned(16)))" in the type declaration. So,
we use a struct containing an array of four ints for the atomic lock
type and dynamically select the 16-byte aligned int from the array
for the semaphore. */
for the semaphore. */
/* From: "Jim Hull" <jim.hull of hp.com>
I've attached a summary of the change, but basically, for PA 2.0, as
long as the ",CO" (coherent operation) completer is implemented, then the
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
they only require "natural" alignment (4-byte for ldcw, 8-byte for
ldcd).
Although the cache control hint is accepted by all PA 2.0 processors,
it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
require 16-byte alignment. If the address is unaligned, the operation
of the instruction is undefined. The ldcw instruction does not generate
unaligned data reference traps so misaligned accesses are not detected.
This hid the problem for years. So, restore the 16-byte alignment dropped
by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
#define __PA_LDCW_ALIGNMENT 16
#define __PA_LDCW_ALIGN_ORDER 4
@ -19,22 +33,12 @@
& ~(__PA_LDCW_ALIGNMENT - 1); \
(volatile unsigned int *) __ret; \
})
#define __LDCW "ldcw"
#else /*CONFIG_PA20*/
/* From: "Jim Hull" <jim.hull of hp.com>
I've attached a summary of the change, but basically, for PA 2.0, as
long as the ",CO" (coherent operation) completer is specified, then the
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
they only require "natural" alignment (4-byte for ldcw, 8-byte for
ldcd). */
#define __PA_LDCW_ALIGNMENT 4
#define __PA_LDCW_ALIGN_ORDER 2
#define __ldcw_align(a) (&(a)->slock)
#ifdef CONFIG_PA20
#define __LDCW "ldcw,co"
#endif /*!CONFIG_PA20*/
#else
#define __LDCW "ldcw"
#endif
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
We don't explicitly expose that "*a" may be written as reload

View file

@ -86,6 +86,9 @@ struct sba_device {
struct ioc ioc[MAX_IOC];
};
/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
extern struct sba_device *sba_list;
#define ASTRO_RUNWAY_PORT 0x582
#define IKE_MERCED_PORT 0x803
#define REO_MERCED_PORT 0x804

View file

@ -3,13 +3,8 @@
#define __ASM_SPINLOCK_TYPES_H
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
} arch_spinlock_t;
typedef struct {

View file

@ -903,9 +903,9 @@ static __init void qemu_header(void)
pr_info("#define PARISC_MODEL \"%s\"\n\n",
boot_cpu_data.pdc.sys_model_name);
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
#undef p

View file

@ -392,7 +392,7 @@ union irq_stack_union {
volatile unsigned int lock[1];
};
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif

View file

@ -1326,7 +1326,7 @@ static int h_24x7_event_init(struct perf_event *event)
}
domain = event_get_domain(event);
if (domain >= HV_PERF_DOMAIN_MAX) {
if (domain == 0 || domain >= HV_PERF_DOMAIN_MAX) {
pr_devel("invalid domain %d\n", domain);
return -EINVAL;
}

View file

@ -694,7 +694,7 @@ static void ahci_pci_init_controller(struct ata_host *host)
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
dev_dbg(&pdev->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
}
@ -1504,7 +1504,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
u32 irq_stat, irq_masked;
unsigned int handled = 1;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
irq_stat = readl(mmio + HOST_IRQ_STAT);
@ -1521,7 +1520,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
irq_stat = readl(mmio + HOST_IRQ_STAT);
spin_unlock(&host->lock);
} while (irq_stat);
VPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
@ -1866,6 +1864,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
if (!(hpriv->cap & HOST_CAP_PART))
host->flags |= ATA_HOST_NO_PART;
if (!(hpriv->cap & HOST_CAP_SSC))
host->flags |= ATA_HOST_NO_SSC;
if (!(hpriv->cap2 & HOST_CAP2_SDS))
host->flags |= ATA_HOST_NO_DEVSLP;
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);

View file

@ -601,8 +601,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
void __iomem *mmio;
u32 irq_stat, irq_masked;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
@ -625,8 +623,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
return IRQ_RETVAL(rc);
}

View file

@ -1210,6 +1210,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
return sprintf(buf, "%d\n", emp->blink_policy);
}
static void ahci_port_clear_pending_irq(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
}
static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
@ -1224,18 +1244,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
if (rc)
dev_warn(dev, "%s (%d)\n", emsg, rc);
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << port_no, mmio + HOST_IRQ_STAT);
ahci_port_clear_pending_irq(ap);
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
@ -1262,10 +1271,10 @@ void ahci_init_controller(struct ata_host *host)
}
tmp = readl(mmio + HOST_CTL);
VPRINTK("HOST_CTL 0x%x\n", tmp);
dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
tmp = readl(mmio + HOST_CTL);
VPRINTK("HOST_CTL 0x%x\n", tmp);
dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
}
EXPORT_SYMBOL_GPL(ahci_init_controller);
@ -1565,6 +1574,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
tf.command = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
ahci_port_clear_pending_irq(ap);
rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
@ -1916,8 +1927,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
void __iomem *port_mmio = ahci_port_base(ap);
u32 status;
VPRINTK("ENTER\n");
status = readl(port_mmio + PORT_IRQ_STAT);
writel(status, port_mmio + PORT_IRQ_STAT);
@ -1925,8 +1934,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
ahci_handle_port_interrupt(ap, port_mmio, status);
spin_unlock(ap->lock);
VPRINTK("EXIT\n");
return IRQ_HANDLED;
}
@ -1943,9 +1950,7 @@ u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
ap = host->ports[i];
if (ap) {
ahci_port_intr(ap);
VPRINTK("port %u\n", i);
} else {
VPRINTK("port %u (no irq)\n", i);
if (ata_ratelimit())
dev_warn(host->dev,
"interrupt on disabled port %u\n", i);
@ -1966,8 +1971,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
void __iomem *mmio;
u32 irq_stat, irq_masked;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
@ -1995,8 +1998,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
return IRQ_RETVAL(rc);
}

View file

@ -3997,10 +3997,23 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
case ATA_LPM_MED_POWER_WITH_DIPM:
case ATA_LPM_MIN_POWER_WITH_PARTIAL:
case ATA_LPM_MIN_POWER:
if (ata_link_nr_enabled(link) > 0)
/* no restrictions on LPM transitions */
if (ata_link_nr_enabled(link) > 0) {
/* assume no restrictions on LPM transitions */
scontrol &= ~(0x7 << 8);
else {
/*
* If the controller does not support partial, slumber,
* or devsleep, then disallow these transitions.
*/
if (link->ap->host->flags & ATA_HOST_NO_PART)
scontrol |= (0x1 << 8);
if (link->ap->host->flags & ATA_HOST_NO_SSC)
scontrol |= (0x2 << 8);
if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
scontrol |= (0x4 << 8);
} else {
/* empty port, power off */
scontrol &= ~0xf;
scontrol |= (0x1 << 2);
@ -5756,17 +5769,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
struct ata_link *link;
unsigned long flags;
/* Previous resume operation might still be in
* progress. Wait for PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
/* request PM ops to EH */
spin_lock_irqsave(ap->lock, flags);
/*
* A previous PM operation might still be in progress. Wait for
* ATA_PFLAG_PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
spin_lock_irqsave(ap->lock, flags);
}
/* Request PM operation to EH */
ap->pm_mesg = mesg;
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
@ -5778,10 +5793,8 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
if (!async) {
if (!async)
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
}
/*
@ -5947,7 +5960,7 @@ void ata_host_resume(struct ata_host *host)
#endif
const struct device_type ata_port_type = {
.name = "ata_port",
.name = ATA_PORT_TYPE_NAME,
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
#endif
@ -6750,11 +6763,30 @@ static void ata_port_detach(struct ata_port *ap)
if (!ap->ops->error_handler)
goto skip_eh;
/* tell EH we're leaving & flush EH */
/* Wait for any ongoing EH */
ata_port_wait_eh(ap);
mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
/* Remove scsi devices */
ata_for_each_link(link, ap, HOST_FIRST) {
ata_for_each_dev(dev, link, ALL) {
if (dev->sdev) {
spin_unlock_irqrestore(ap->lock, flags);
scsi_remove_device(dev->sdev);
spin_lock_irqsave(ap->lock, flags);
dev->sdev = NULL;
}
}
}
/* Tell EH to disable all devices */
ap->pflags |= ATA_PFLAG_UNLOADING;
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_scan_mutex);
/* wait till EH commits suicide */
ata_port_wait_eh(ap);

View file

@ -2922,18 +2922,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
postreset(slave, classes);
}
/*
* Some controllers can't be frozen very well and may set spurious
* error conditions during reset. Clear accumulated error
* information and re-thaw the port if frozen. As reset is the
* final recovery action and we cross check link onlineness against
* device classification later, no hotplug event is lost by this.
*/
/* clear cached SError */
spin_lock_irqsave(link->ap->lock, flags);
memset(&link->eh_info, 0, sizeof(link->eh_info));
link->eh_info.serror = 0;
if (slave)
memset(&slave->eh_info, 0, sizeof(link->eh_info));
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
slave->eh_info.serror = 0;
spin_unlock_irqrestore(link->ap->lock, flags);
if (ap->pflags & ATA_PFLAG_FROZEN)

View file

@ -4561,7 +4561,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
case MAINTENANCE_IN:
if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
else
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);

View file

@ -266,6 +266,10 @@ void ata_tport_delete(struct ata_port *ap)
put_device(dev);
}
static const struct device_type ata_port_sas_type = {
.name = ATA_PORT_TYPE_NAME,
};
/** ata_tport_add - initialize a transport ATA port structure
*
* @parent: parent device
@ -283,7 +287,10 @@ int ata_tport_add(struct device *parent,
struct device *dev = &ap->tdev;
device_initialize(dev);
dev->type = &ata_port_type;
if (ap->flags & ATA_FLAG_SAS_HOST)
dev->type = &ata_port_sas_type;
else
dev->type = &ata_port_type;
dev->parent = parent;
ata_host_get(ap->host);

View file

@ -46,6 +46,8 @@ enum {
ATA_DNXFER_QUIET = (1 << 31),
};
#define ATA_PORT_TYPE_NAME "ata_port"
extern atomic_t ata_print_id;
extern int atapi_passthru16;
extern int libata_fua;

View file

@ -68,7 +68,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
}
static void update_topology_flags_workfn(struct work_struct *work);

View file

@ -372,7 +372,7 @@ static ssize_t size_show(struct device *dev,
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
return sprintf(buf, "%uK\n", this_leaf->size >> 10);
}
static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
@ -402,11 +402,11 @@ static ssize_t type_show(struct device *dev,
switch (this_leaf->type) {
case CACHE_TYPE_DATA:
return sysfs_emit(buf, "Data\n");
return sprintf(buf, "Data\n");
case CACHE_TYPE_INST:
return sysfs_emit(buf, "Instruction\n");
return sprintf(buf, "Instruction\n");
case CACHE_TYPE_UNIFIED:
return sysfs_emit(buf, "Unified\n");
return sprintf(buf, "Unified\n");
default:
return -EINVAL;
}
@ -420,11 +420,11 @@ static ssize_t allocation_policy_show(struct device *dev,
int n = 0;
if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
n = sysfs_emit(buf, "ReadWriteAllocate\n");
n = sprintf(buf, "ReadWriteAllocate\n");
else if (ci_attr & CACHE_READ_ALLOCATE)
n = sysfs_emit(buf, "ReadAllocate\n");
n = sprintf(buf, "ReadAllocate\n");
else if (ci_attr & CACHE_WRITE_ALLOCATE)
n = sysfs_emit(buf, "WriteAllocate\n");
n = sprintf(buf, "WriteAllocate\n");
return n;
}
@ -436,9 +436,9 @@ static ssize_t write_policy_show(struct device *dev,
int n = 0;
if (ci_attr & CACHE_WRITE_THROUGH)
n = sysfs_emit(buf, "WriteThrough\n");
n = sprintf(buf, "WriteThrough\n");
else if (ci_attr & CACHE_WRITE_BACK)
n = sysfs_emit(buf, "WriteBack\n");
n = sprintf(buf, "WriteBack\n");
return n;
}

View file

@ -1416,7 +1416,7 @@ ssize_t device_show_ulong(struct device *dev,
char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_ulong);
@ -1441,7 +1441,7 @@ ssize_t device_show_int(struct device *dev,
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_int);
@ -1462,7 +1462,7 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_bool);
@ -1695,7 +1695,7 @@ static ssize_t online_show(struct device *dev, struct device_attribute *attr,
device_lock(dev);
val = !dev->offline;
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
return sprintf(buf, "%u\n", val);
}
static ssize_t online_store(struct device *dev, struct device_attribute *attr,

View file

@ -156,7 +156,7 @@ static ssize_t show_crash_notes(struct device *dev, struct device_attribute *att
* operation should be safe. No locking required.
*/
addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
rc = sysfs_emit(buf, "%Lx\n", addr);
rc = sprintf(buf, "%Lx\n", addr);
return rc;
}
static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
@ -167,7 +167,7 @@ static ssize_t show_crash_notes_size(struct device *dev,
{
ssize_t rc;
rc = sysfs_emit(buf, "%zu\n", sizeof(note_buf_t));
rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
return rc;
}
static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
@ -264,7 +264,7 @@ static ssize_t print_cpus_offline(struct device *dev,
nr_cpu_ids, total_cpus-1);
}
n += sysfs_emit(&buf[n], "\n");
n += snprintf(&buf[n], len - n, "\n");
return n;
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
@ -272,7 +272,7 @@ static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
int n = 0;
int n = 0, len = PAGE_SIZE-2;
cpumask_var_t isolated;
if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
@ -280,7 +280,7 @@ static ssize_t print_cpus_isolated(struct device *dev,
cpumask_andnot(isolated, cpu_possible_mask,
housekeeping_cpumask(HK_FLAG_DOMAIN));
n = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated));
free_cpumask_var(isolated);
@ -292,9 +292,9 @@ static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
static ssize_t print_cpus_nohz_full(struct device *dev,
struct device_attribute *attr, char *buf)
{
int n = 0;
int n = 0, len = PAGE_SIZE-2;
n = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
return n;
}
@ -328,7 +328,7 @@ static ssize_t print_cpu_modalias(struct device *dev,
ssize_t n;
u32 i;
n = sysfs_emit(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
CPU_FEATURE_TYPEVAL);
for (i = 0; i < MAX_CPU_FEATURES; i++)
@ -521,56 +521,56 @@ static void __init cpu_dev_register_generic(void)
ssize_t __weak cpu_show_meltdown(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_mds(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "Not affected\n");
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_srbds(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,

View file

@ -215,7 +215,7 @@ static ssize_t firmware_loading_show(struct device *dev,
loading = fw_sysfs_loading(fw_sysfs->fw_priv);
mutex_unlock(&fw_lock);
return sysfs_emit(buf, "%d\n", loading);
return sprintf(buf, "%d\n", loading);
}
/* one pages buffer should be mapped/unmapped only once */

View file

@ -121,7 +121,7 @@ static ssize_t show_mem_start_phys_index(struct device *dev,
unsigned long phys_index;
phys_index = mem->start_section_nr / sections_per_block;
return sysfs_emit(buf, "%08lx\n", phys_index);
return sprintf(buf, "%08lx\n", phys_index);
}
/*
@ -145,7 +145,7 @@ static ssize_t show_mem_removable(struct device *dev,
}
out:
return sysfs_emit(buf, "%d\n", ret);
return sprintf(buf, "%d\n", ret);
}
/*
@ -163,17 +163,17 @@ static ssize_t show_mem_state(struct device *dev,
*/
switch (mem->state) {
case MEM_ONLINE:
len = sysfs_emit(buf, "online\n");
len = sprintf(buf, "online\n");
break;
case MEM_OFFLINE:
len = sysfs_emit(buf, "offline\n");
len = sprintf(buf, "offline\n");
break;
case MEM_GOING_OFFLINE:
len = sysfs_emit(buf, "going-offline\n");
len = sprintf(buf, "going-offline\n");
break;
default:
len = sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n",
mem->state);
len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
mem->state);
WARN_ON(1);
break;
}
@ -384,7 +384,7 @@ static ssize_t show_phys_device(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
return sysfs_emit(buf, "%d\n", mem->phys_device);
return sprintf(buf, "%d\n", mem->phys_device);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
@ -422,7 +422,7 @@ static ssize_t show_valid_zones(struct device *dev,
*/
if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
&valid_start_pfn, &valid_end_pfn))
return sysfs_emit(buf, "none\n");
return sprintf(buf, "none\n");
start_pfn = valid_start_pfn;
strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
goto out;
@ -456,7 +456,7 @@ static ssize_t
print_block_size(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%lx\n", get_memory_block_size());
return sprintf(buf, "%lx\n", get_memory_block_size());
}
static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
@ -470,9 +470,9 @@ show_auto_online_blocks(struct device *dev, struct device_attribute *attr,
char *buf)
{
if (memhp_auto_online)
return sysfs_emit(buf, "online\n");
return sprintf(buf, "online\n");
else
return sysfs_emit(buf, "offline\n");
return sprintf(buf, "offline\n");
}
static ssize_t

View file

@ -99,7 +99,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
#ifdef CONFIG_HIGHMEM
n += sysfs_emit(buf + n,
n += sprintf(buf + n,
"Node %d HighTotal: %8lu kB\n"
"Node %d HighFree: %8lu kB\n"
"Node %d LowTotal: %8lu kB\n"
@ -109,7 +109,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(i.totalram - i.totalhigh),
nid, K(i.freeram - i.freehigh));
#endif
n += sysfs_emit(buf + n,
n += sprintf(buf + n,
"Node %d Dirty: %8lu kB\n"
"Node %d Writeback: %8lu kB\n"
"Node %d FilePages: %8lu kB\n"
@ -173,19 +173,19 @@ static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
static ssize_t node_read_numastat(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf,
"numa_hit %lu\n"
"numa_miss %lu\n"
"numa_foreign %lu\n"
"interleave_hit %lu\n"
"local_node %lu\n"
"other_node %lu\n",
sum_zone_numa_state(dev->id, NUMA_HIT),
sum_zone_numa_state(dev->id, NUMA_MISS),
sum_zone_numa_state(dev->id, NUMA_FOREIGN),
sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
sum_zone_numa_state(dev->id, NUMA_LOCAL),
sum_zone_numa_state(dev->id, NUMA_OTHER));
return sprintf(buf,
"numa_hit %lu\n"
"numa_miss %lu\n"
"numa_foreign %lu\n"
"interleave_hit %lu\n"
"local_node %lu\n"
"other_node %lu\n",
sum_zone_numa_state(dev->id, NUMA_HIT),
sum_zone_numa_state(dev->id, NUMA_MISS),
sum_zone_numa_state(dev->id, NUMA_FOREIGN),
sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
sum_zone_numa_state(dev->id, NUMA_LOCAL),
sum_zone_numa_state(dev->id, NUMA_OTHER));
}
static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
@ -623,7 +623,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
{
int n;
n = sysfs_emit(buf, "%*pbl",
n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
nodemask_pr_args(&node_states[state]));
buf[n++] = '\n';
buf[n] = '\0';

View file

@ -927,7 +927,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
len = sysfs_emit(buf, "%s\n", pdev->driver_override);
len = sprintf(buf, "%s\n", pdev->driver_override);
device_unlock(dev);
return len;
}

View file

@ -102,7 +102,7 @@ static const char ctrl_on[] = "on";
static ssize_t control_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
return sprintf(buf, "%s\n",
dev->power.runtime_auto ? ctrl_auto : ctrl_on);
}
@ -128,7 +128,7 @@ static ssize_t runtime_active_time_show(struct device *dev,
int ret;
spin_lock_irq(&dev->power.lock);
update_pm_runtime_accounting(dev);
ret = sysfs_emit(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
spin_unlock_irq(&dev->power.lock);
return ret;
}
@ -141,7 +141,7 @@ static ssize_t runtime_suspended_time_show(struct device *dev,
int ret;
spin_lock_irq(&dev->power.lock);
update_pm_runtime_accounting(dev);
ret = sysfs_emit(buf, "%i\n",
ret = sprintf(buf, "%i\n",
jiffies_to_msecs(dev->power.suspended_jiffies));
spin_unlock_irq(&dev->power.lock);
return ret;
@ -176,7 +176,7 @@ static ssize_t runtime_status_show(struct device *dev,
return -EIO;
}
}
return sysfs_emit(buf, p);
return sprintf(buf, p);
}
static DEVICE_ATTR_RO(runtime_status);
@ -186,7 +186,7 @@ static ssize_t autosuspend_delay_ms_show(struct device *dev,
{
if (!dev->power.use_autosuspend)
return -EIO;
return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay);
return sprintf(buf, "%d\n", dev->power.autosuspend_delay);
}
static ssize_t autosuspend_delay_ms_store(struct device *dev,
@ -215,11 +215,11 @@ static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
s32 value = dev_pm_qos_requested_resume_latency(dev);
if (value == 0)
return sysfs_emit(buf, "n/a\n");
return sprintf(buf, "n/a\n");
if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
return sysfs_emit(buf, "%d\n", value);
return sprintf(buf, "%d\n", value);
}
static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
@ -259,11 +259,11 @@ static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
if (value < 0)
return sysfs_emit(buf, "auto\n");
return sprintf(buf, "auto\n");
if (value == PM_QOS_LATENCY_ANY)
return sysfs_emit(buf, "any\n");
return sprintf(buf, "any\n");
return sysfs_emit(buf, "%d\n", value);
return sprintf(buf, "%d\n", value);
}
static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
@ -295,8 +295,8 @@ static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
& PM_QOS_FLAG_NO_POWER_OFF));
return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
& PM_QOS_FLAG_NO_POWER_OFF));
}
static ssize_t pm_qos_no_power_off_store(struct device *dev,
@ -324,9 +324,9 @@ static const char _disabled[] = "disabled";
static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", device_can_wakeup(dev)
? (device_may_wakeup(dev) ? _enabled : _disabled)
: "");
return sprintf(buf, "%s\n", device_can_wakeup(dev)
? (device_may_wakeup(dev) ? _enabled : _disabled)
: "");
}
static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
@ -512,7 +512,7 @@ static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
static ssize_t runtime_usage_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count));
return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
}
static DEVICE_ATTR_RO(runtime_usage);
@ -520,8 +520,8 @@ static ssize_t runtime_active_kids_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", dev->power.ignore_children ?
0 : atomic_read(&dev->power.child_count));
return sprintf(buf, "%d\n", dev->power.ignore_children ?
0 : atomic_read(&dev->power.child_count));
}
static DEVICE_ATTR_RO(runtime_active_kids);
@ -529,12 +529,12 @@ static ssize_t runtime_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (dev->power.disable_depth && (dev->power.runtime_auto == false))
return sysfs_emit(buf, "disabled & forbidden\n");
return sprintf(buf, "disabled & forbidden\n");
if (dev->power.disable_depth)
return sysfs_emit(buf, "disabled\n");
return sprintf(buf, "disabled\n");
if (dev->power.runtime_auto == false)
return sysfs_emit(buf, "forbidden\n");
return sysfs_emit(buf, "enabled\n");
return sprintf(buf, "forbidden\n");
return sprintf(buf, "enabled\n");
}
static DEVICE_ATTR_RO(runtime_enabled);
@ -542,9 +542,9 @@ static DEVICE_ATTR_RO(runtime_enabled);
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
device_async_suspend_enabled(dev) ?
_enabled : _disabled);
return sprintf(buf, "%s\n",
device_async_suspend_enabled(dev) ?
_enabled : _disabled);
}
static ssize_t async_store(struct device *dev, struct device_attribute *attr,

View file

@ -467,7 +467,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
if (!rbnode)
return -ENOMEM;
regcache_rbtree_set_register(map, rbnode,
reg - rbnode->base_reg, value);
(reg - rbnode->base_reg) / map->reg_stride,
value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}

View file

@ -73,13 +73,13 @@ static ssize_t soc_info_get(struct device *dev,
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
if (attr == &dev_attr_machine)
return sysfs_emit(buf, "%s\n", soc_dev->attr->machine);
return sprintf(buf, "%s\n", soc_dev->attr->machine);
if (attr == &dev_attr_family)
return sysfs_emit(buf, "%s\n", soc_dev->attr->family);
return sprintf(buf, "%s\n", soc_dev->attr->family);
if (attr == &dev_attr_revision)
return sysfs_emit(buf, "%s\n", soc_dev->attr->revision);
return sprintf(buf, "%s\n", soc_dev->attr->revision);
if (attr == &dev_attr_soc_id)
return sysfs_emit(buf, "%s\n", soc_dev->attr->soc_id);
return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
return -EINVAL;

View file

@ -385,8 +385,6 @@ find_quicksilver(struct device *dev, void *data)
static int __init
parisc_agp_init(void)
{
extern struct sba_device *sba_list;
int err = -1;
struct parisc_device *sba = NULL, *lba = NULL;
struct lba_device *lbadev = NULL;

View file

@ -162,7 +162,7 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw,
err = tegra_bpmp_clk_transfer(clk->bpmp, &msg);
if (err < 0)
return err;
return 0;
return response.rate;
}

View file

@ -999,7 +999,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
else if (param == PIN_CONFIG_BIAS_DISABLE ||
param == PIN_CONFIG_BIAS_PULL_DOWN ||
param == PIN_CONFIG_DRIVE_STRENGTH)
return pinctrl_gpio_set_config(offset, config);
return pinctrl_gpio_set_config(chip->base + offset, config);
else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */

View file

@ -341,6 +341,7 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
pmic_eic->chip.can_sleep = true;
pmic_eic->intc.name = dev_name(&pdev->dev);
pmic_eic->intc.irq_mask = sprd_pmic_eic_irq_mask;

View file

@ -246,6 +246,7 @@ static bool pxa_gpio_has_pinctrl(void)
switch (gpio_type) {
case PXA3XX_GPIO:
case MMP2_GPIO:
case MMP_GPIO:
return false;
default:

View file

@ -246,7 +246,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
return ret;
goto err_remove_domain;
gc = tb10x_gpio->domain->gc->gc[0];
gc->reg_base = tb10x_gpio->base;
@ -260,6 +260,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
}
return 0;
err_remove_domain:
irq_domain_remove(tb10x_gpio->domain);
return ret;
}
static int tb10x_gpio_remove(struct platform_device *pdev)

View file

@ -1679,6 +1679,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"SMBus I801 adapter at %04lx", priv->smba);
err = i2c_add_adapter(&priv->adapter);
if (err) {
platform_device_unregister(priv->tco_pdev);
i801_acpi_remove(priv);
return err;
}

View file

@ -244,6 +244,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
if (!props[i].name || !props[i].value) {
err = -ENOMEM;
goto err_rollback;
}
props[i].length = 3;
of_changeset_init(&priv->chan[i].chgset);

View file

@ -215,7 +215,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
}
for (i = 0; i < ports_num; i++) {
char port_str[10];
char port_str[11];
ports[i].port_num = i + 1;
snprintf(port_str, sizeof(port_str), "%u", i + 1);

View file

@ -221,7 +221,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
{
int i;
char buff[11];
char buff[12];
struct mlx4_ib_iov_port *port = NULL;
int ret = 0 ;
struct ib_port_attr attr;

View file

@ -2005,7 +2005,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
case MLX5_IB_MMAP_DEVICE_MEM:
return "Device Memory";
default:
return NULL;
return "Unknown";
}
}

View file

@ -1188,6 +1188,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
/* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "PD5x_7xPNP_PNR_PNN_PNT"),
},
.driver_data = (void *)(SERIO_QUIRK_NOAUX)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170SM"),

View file

@ -619,4 +619,4 @@ MODULE_DESCRIPTION("Spase SP8870 DVB-T Demodulator driver");
MODULE_AUTHOR("Juergen Peitz");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(sp8870_attach);
EXPORT_SYMBOL_GPL(sp8870_attach);

View file

@ -1513,7 +1513,7 @@ fail:
return NULL;
}
EXPORT_SYMBOL(xc2028_attach);
EXPORT_SYMBOL_GPL(xc2028_attach);
MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver");
MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>");

View file

@ -878,6 +878,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
/* UBI cannot work on flashes with zero erasesize. */
if (!mtd->erasesize) {
pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
mtd->index);
return -EINVAL;
}
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)

View file

@ -2517,8 +2517,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
#define HCLGE_IMP_RESET_DELAY 5
switch (event_type) {
case HCLGE_VECTOR0_EVENT_RST:
if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
mdelay(HCLGE_IMP_RESET_DELAY);
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
case HCLGE_VECTOR0_EVENT_MBX:

View file

@ -122,9 +122,9 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
u8 tx_stats_en;
bool main_func_queue;
struct qed_ll2_cbs cbs;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
struct qed_ll2_cbs cbs;
};
/**

View file

@ -57,6 +57,7 @@ struct stm32_ops {
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
u32 syscfg_eth_mask;
bool clk_rx_enable_in_suspend;
};
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
@ -74,7 +75,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
if (ret)
return ret;
if (!dwmac->dev->power.is_suspended) {
if (!dwmac->ops->clk_rx_enable_in_suspend ||
!dwmac->dev->power.is_suspended) {
ret = clk_prepare_enable(dwmac->clk_rx);
if (ret) {
clk_disable_unprepare(dwmac->clk_tx);
@ -413,7 +415,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
.clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {

View file

@ -2095,7 +2095,12 @@ static const struct ethtool_ops team_ethtool_ops = {
static void team_setup_by_port(struct net_device *dev,
struct net_device *port_dev)
{
dev->header_ops = port_dev->header_ops;
struct team *team = netdev_priv(dev);
if (port_dev->type == ARPHRD_ETHER)
dev->header_ops = team->header_ops_cache;
else
dev->header_ops = port_dev->header_ops;
dev->type = port_dev->type;
dev->hard_header_len = port_dev->hard_header_len;
dev->needed_headroom = port_dev->needed_headroom;
@ -2142,8 +2147,11 @@ static int team_dev_type_check_change(struct net_device *dev,
static void team_setup(struct net_device *dev)
{
struct team *team = netdev_priv(dev);
ether_setup(dev);
dev->max_mtu = ETH_MAX_MTU;
team->header_ops_cache = dev->header_ops;
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;

View file

@ -961,12 +961,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0,
ip_hdr(skb)->protocol, 0);
} else if (skb_is_gso_v6(skb)) {
} else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
return false;
} else if (protocol == htons(ETH_P_IPV6)) {
tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,

View file

@ -102,7 +102,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
if (unlikely(ret < 0)) {
if (unlikely(ret < 4)) {
ret = ret < 0 ? ret : -ENODATA;
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
return ret;

View file

@ -37,6 +37,8 @@
#define TDM_PPPOHT_SLIC_MAXIN
static int uhdlc_close(struct net_device *dev);
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
.tsa = 0,
@ -661,6 +663,7 @@ static int uhdlc_open(struct net_device *dev)
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = hdlc->priv;
struct ucc_tdm *utdm = priv->utdm;
int rc = 0;
if (priv->hdlc_busy != 1) {
if (request_irq(priv->ut_info->uf_info.irq,
@ -683,10 +686,13 @@ static int uhdlc_open(struct net_device *dev)
netif_device_attach(priv->ndev);
napi_enable(&priv->napi);
netif_start_queue(dev);
hdlc_open(dev);
rc = hdlc_open(dev);
if (rc)
uhdlc_close(dev);
}
return 0;
return rc;
}
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
@ -775,6 +781,8 @@ static int uhdlc_close(struct net_device *dev)
netif_stop_queue(dev);
priv->hdlc_busy = 0;
hdlc_close(dev);
return 0;
}

View file

@ -986,8 +986,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
}
}
tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
}
}

View file

@ -98,7 +98,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
rx_pkt_off > skb->len) {
mwifiex_dbg(priv->adapter, ERROR,
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
skb->len, rx_pkt_off);
@ -107,12 +108,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return -1;
}
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
sizeof(rfc1042_header)) &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
sizeof(rfc1042_header)) &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type

View file

@ -2501,8 +2501,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
size_t alloc_size;
node = dev_to_node(&pdev->dev);
if (node == NUMA_NO_NODE)
set_dev_node(&pdev->dev, first_memory_node);
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)

View file

@ -216,9 +216,9 @@ static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 va
static DEFINE_SPINLOCK(iosapic_lock);
static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data)
{
__raw_writel(data, addr);
__raw_writel((__force u32)data, addr);
}
/*

View file

@ -132,8 +132,8 @@ struct iosapic_irt {
struct vector_info {
struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */
struct irt_entry *irte; /* IRT entry */
u32 __iomem *eoi_addr; /* precalculate EOI reg address */
u32 eoi_data; /* IA64: ? PA: swapped txn_data */
__le32 __iomem *eoi_addr; /* precalculate EOI reg address */
__le32 eoi_data; /* IA64: ? PA: swapped txn_data */
int txn_irq; /* virtual IRQ number for processor */
ulong txn_addr; /* IA64: id_eid PA: partial HPA */
u32 txn_data; /* CPU interrupt bit */

View file

@ -758,8 +758,6 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
if (IS_ERR(res->phy_ahb_reset))
return PTR_ERR(res->phy_ahb_reset);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}

View file

@ -493,12 +493,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (port) {
put_device(&port->dev);
retval = -EEXIST;
goto err_out;
goto err_put;
}
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
goto err_out;
goto err_put;
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
@ -521,7 +521,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
goto err_put;
}
retval = -EINVAL;
@ -538,8 +538,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
return port;
err_out:
err_put:
zfcp_ccw_adapter_put(adapter);
err_out:
return ERR_PTR(retval);
}

View file

@ -2193,7 +2193,8 @@ struct megasas_instance {
u32 secure_jbod_support;
u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
bool use_seqnum_jbod_fp; /* Added for PD sequence */
spinlock_t crashdump_lock;
bool smp_affinity_enable;
struct mutex crashdump_lock;
struct megasas_register_set __iomem *reg_set;
u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
@ -2210,6 +2211,7 @@ struct megasas_instance {
u16 ldio_threshold;
u16 cur_can_queue;
u32 max_sectors_per_req;
bool msix_load_balance;
struct megasas_aen_event *ev;
struct megasas_cmd **cmd_list;
@ -2246,6 +2248,7 @@ struct megasas_instance {
atomic_t sge_holes_type1;
atomic_t sge_holes_type2;
atomic_t sge_holes_type3;
atomic64_t total_io_count;
struct megasas_instance_template *instancet;
struct tasklet_struct isr_tasklet;

View file

@ -3004,14 +3004,13 @@ megasas_fw_crash_buffer_store(struct device *cdev,
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
int val = 0;
unsigned long flags;
if (kstrtoint(buf, 0, &val) != 0)
return -EINVAL;
spin_lock_irqsave(&instance->crashdump_lock, flags);
mutex_lock(&instance->crashdump_lock);
instance->fw_crash_buffer_offset = val;
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
mutex_unlock(&instance->crashdump_lock);
return strlen(buf);
}
@ -3027,17 +3026,16 @@ megasas_fw_crash_buffer_show(struct device *cdev,
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
unsigned long chunk_left_bytes;
unsigned long src_addr;
unsigned long flags;
u32 buff_offset;
spin_lock_irqsave(&instance->crashdump_lock, flags);
mutex_lock(&instance->crashdump_lock);
buff_offset = instance->fw_crash_buffer_offset;
if (!instance->crash_dump_buf ||
!((instance->fw_crash_state == AVAILABLE) ||
(instance->fw_crash_state == COPYING))) {
dev_err(&instance->pdev->dev,
"Firmware crash dump is not available\n");
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
mutex_unlock(&instance->crashdump_lock);
return -EINVAL;
}
@ -3046,7 +3044,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
dev_err(&instance->pdev->dev,
"Firmware crash dump offset is out of range\n");
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
mutex_unlock(&instance->crashdump_lock);
return 0;
}
@ -3058,7 +3056,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
(buff_offset % dmachunk);
memcpy(buf, (void *)src_addr, size);
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
mutex_unlock(&instance->crashdump_lock);
return size;
}
@ -3083,7 +3081,6 @@ megasas_fw_crash_state_store(struct device *cdev,
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
int val = 0;
unsigned long flags;
if (kstrtoint(buf, 0, &val) != 0)
return -EINVAL;
@ -3097,9 +3094,9 @@ megasas_fw_crash_state_store(struct device *cdev,
instance->fw_crash_state = val;
if ((val == COPIED) || (val == COPY_ERROR)) {
spin_lock_irqsave(&instance->crashdump_lock, flags);
mutex_lock(&instance->crashdump_lock);
megasas_free_host_crash_buffer(instance);
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
mutex_unlock(&instance->crashdump_lock);
if (val == COPY_ERROR)
dev_info(&instance->pdev->dev, "application failed to "
"copy Firmware crash dump\n");
@ -5101,6 +5098,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
&instance->irq_context[j]);
/* Retry irq register for IO_APIC*/
instance->msix_vectors = 0;
instance->msix_load_balance = false;
if (is_probe) {
pci_free_irq_vectors(instance->pdev);
return megasas_setup_irqs_ioapic(instance);
@ -5109,6 +5107,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
}
}
}
return 0;
}
@ -5364,6 +5363,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (rdpq_enable)
instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
1 : 0;
if (instance->adapter_type >= INVADER_SERIES &&
!instance->msix_combined) {
instance->msix_load_balance = true;
instance->smp_affinity_enable = false;
}
fw_msix_count = instance->msix_vectors;
/* Save 1-15 reply post index address to local memory
* Index 0 is already saved from reg offset
@ -5382,17 +5388,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->msix_vectors);
} else /* MFI adapters */
instance->msix_vectors = 1;
/* Don't bother allocating more MSI-X vectors than cpus */
instance->msix_vectors = min(instance->msix_vectors,
(unsigned int)num_online_cpus());
if (smp_affinity_enable)
if (instance->smp_affinity_enable)
irq_flags |= PCI_IRQ_AFFINITY;
i = pci_alloc_irq_vectors(instance->pdev, 1,
instance->msix_vectors, irq_flags);
if (i > 0)
if (i > 0) {
instance->msix_vectors = i;
else
} else {
instance->msix_vectors = 0;
instance->msix_load_balance = false;
}
}
/*
* MSI-X host index 0 is common for all adapter.
@ -6447,11 +6456,12 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
INIT_LIST_HEAD(&instance->internal_reset_pending_q);
atomic_set(&instance->fw_outstanding, 0);
atomic64_set(&instance->total_io_count, 0);
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
spin_lock_init(&instance->crashdump_lock);
mutex_init(&instance->crashdump_lock);
spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->stream_lock);
@ -6469,6 +6479,8 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
instance->last_time = 0;
instance->disableOnlineCtrlReset = 1;
instance->UnevenSpanSupport = 0;
instance->smp_affinity_enable = smp_affinity_enable ? true : false;
instance->msix_load_balance = false;
if (instance->adapter_type != MFI_SERIES) {
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
@ -6818,7 +6830,7 @@ megasas_resume(struct pci_dev *pdev)
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
irq_flags = PCI_IRQ_MSIX;
if (smp_affinity_enable)
if (instance->smp_affinity_enable)
irq_flags |= PCI_IRQ_AFFINITY;
}
rval = pci_alloc_irq_vectors(instance->pdev, 1,

View file

@ -2641,8 +2641,13 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
fp_possible = (io_info.fpOkForIo > 0) ? true : false;
}
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
if (instance->msix_load_balance)
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
else
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
praid_context = &io_request->RaidContext;
@ -2969,8 +2974,13 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
if (instance->msix_load_balance)
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
else
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
if (!fp_possible) {
/* system pd firmware path */

View file

@ -2088,8 +2088,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
if (ql2xenabledif == 1)
prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION

View file

@ -22,7 +22,7 @@
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
* | | | 0x302e,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |

View file

@ -277,6 +277,20 @@ MODULE_PARM_DESC(qla2xuseresexchforels,
"Reserve 1/2 of emergency exchanges for ELS.\n"
" 0 (default): disabled");
int ql2xprotmask;
module_param(ql2xprotmask, int, 0644);
MODULE_PARM_DESC(ql2xprotmask,
"Override DIF/DIX protection capabilities mask\n"
"Default is 0 which sets protection mask based on "
"capabilities reported by HBA firmware.\n");
int ql2xprotguard;
module_param(ql2xprotguard, int, 0644);
MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
" 0 -- Let HBA firmware decide\n"
" 1 -- Force T10 CRC\n"
" 2 -- Force IP checksum\n");
/*
* SCSI host template entry points
*/
@ -3055,6 +3069,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_id = ha->max_fibre_devices;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
if (ql2xenabledif && ql2xenabledif != 2) {
ql_log(ql_log_warn, base_vha, 0x302d,
"Invalid value for ql2xenabledif, resetting it to default (2)\n");
ql2xenabledif = 2;
}
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
@ -3291,15 +3312,16 @@ skip_dpc:
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
if (ql2xenabledif == 1)
prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION
| SHOST_DIX_TYPE1_PROTECTION
| SHOST_DIX_TYPE2_PROTECTION
| SHOST_DIX_TYPE3_PROTECTION);
if (ql2xprotmask)
scsi_host_set_prot(host, ql2xprotmask);
else
scsi_host_set_prot(host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION
| SHOST_DIX_TYPE1_PROTECTION
| SHOST_DIX_TYPE2_PROTECTION
| SHOST_DIX_TYPE3_PROTECTION);
guard = SHOST_DIX_GUARD_CRC;
@ -3307,7 +3329,10 @@ skip_dpc:
(ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
guard |= SHOST_DIX_GUARD_IP;
scsi_host_set_guard(host, guard);
if (ql2xprotguard)
scsi_host_set_guard(host, ql2xprotguard);
else
scsi_host_set_guard(host, guard);
} else
base_vha->flags.difdix_supported = 0;
}

View file

@ -881,7 +881,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
EXPORT_SYMBOL(target_to_linux_sector);
struct devices_idr_iter {
struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
@ -891,11 +890,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
struct config_item *item;
int ret;
config_item_put(iter->prev_item);
iter->prev_item = NULL;
/*
* We add the device early to the idr, so it can be used
* by backend modules during configuration. We do not want
@ -905,12 +902,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
if (!target_dev_configured(dev))
return 0;
iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
if (!iter->prev_item)
item = config_item_get_unless_zero(&dev->dev_group.cg_item);
if (!item)
return 0;
mutex_unlock(&device_mutex);
ret = iter->fn(dev, iter->data);
config_item_put(item);
mutex_lock(&device_mutex);
return ret;
@ -933,7 +931,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
config_item_put(iter.prev_item);
return ret;
}

View file

@ -1910,7 +1910,10 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
skip_rx = true;
if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
struct irq_data *d;
d = irq_get_irq_data(port->irq);
if (d && irqd_is_wakeup_set(d))
pm_wakeup_event(tport->tty->dev, 0);
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);

View file

@ -2085,7 +2085,7 @@ config FB_COBALT
config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA

View file

@ -401,6 +401,20 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
return time_left;
}
/* Returns true if the watchdog was running */
static bool iTCO_wdt_set_running(struct iTCO_wdt_private *p)
{
u16 val;
/* Bit 11: TCO Timer Halt -> 0 = The TCO timer is enabled */
val = inw(TCO1_CNT(p));
if (!(val & BIT(11))) {
set_bit(WDOG_HW_RUNNING, &p->wddev.status);
return true;
}
return false;
}
/*
* Kernel Interfaces
*/
@ -476,9 +490,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
return -ENODEV; /* Cannot reset NO_REBOOT bit */
}
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
p->update_no_reboot_bit(p->no_reboot_priv, true);
/* The TCO logic uses the TCO_EN bit in the SMI_EN register */
if (!devm_request_region(dev, p->smi_res->start,
resource_size(p->smi_res),
@ -537,8 +548,13 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&p->wddev, p);
platform_set_drvdata(pdev, p);
/* Make sure the watchdog is not running */
iTCO_wdt_stop(&p->wddev);
if (!iTCO_wdt_set_running(p)) {
/*
* If the watchdog was not running set NO_REBOOT now to
* prevent later reboots.
*/
p->update_no_reboot_bit(p->no_reboot_priv, true);
}
/* Check that the heartbeat value is within it's range;
if not reset to the default */

View file

@ -82,23 +82,13 @@ const struct evtchn_ops *evtchn_ops;
*/
static DEFINE_MUTEX(irq_mapping_update_lock);
/*
* Lock protecting event handling loop against removing event channels.
* Adding of event channels is no issue as the associated IRQ becomes active
* only after everything is setup (before request_[threaded_]irq() the handler
* can't be entered for an event, as the event channel will be unmasked only
* then).
*/
static DEFINE_RWLOCK(evtchn_rwlock);
/*
* Lock hierarchy:
*
* irq_mapping_update_lock
* evtchn_rwlock
* IRQ-desc lock
* percpu eoi_list_lock
* irq_info->lock
* IRQ-desc lock
* percpu eoi_list_lock
* irq_info->lock
*/
static LIST_HEAD(xen_irq_list_head);
@ -213,6 +203,22 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
irq_set_chip_data(irq, info);
}
static void delayed_free_irq(struct work_struct *work)
{
struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
rwork);
unsigned int irq = info->irq;
/* Remove the info pointer only now, with no potential users left. */
set_info_for_irq(irq, NULL);
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
if (irq >= nr_legacy_irqs())
irq_free_desc(irq);
}
/* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
@ -547,33 +553,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
read_lock_irqsave(&evtchn_rwlock, flags);
rcu_read_lock();
while (true) {
spin_lock(&eoi->eoi_list_lock);
spin_lock_irqsave(&eoi->eoi_list_lock, flags);
info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
eoi_list);
if (info == NULL || now < info->eoi_time) {
spin_unlock(&eoi->eoi_list_lock);
if (info == NULL)
break;
if (now < info->eoi_time) {
mod_delayed_work_on(info->eoi_cpu, system_wq,
&eoi->delayed,
info->eoi_time - now);
break;
}
list_del_init(&info->eoi_list);
spin_unlock(&eoi->eoi_list_lock);
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
info->eoi_time = 0;
xen_irq_lateeoi_locked(info, false);
}
if (info)
mod_delayed_work_on(info->eoi_cpu, system_wq,
&eoi->delayed, info->eoi_time - now);
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
read_unlock_irqrestore(&evtchn_rwlock, flags);
rcu_read_unlock();
}
static void xen_cpu_init_eoi(unsigned int cpu)
@ -588,16 +597,15 @@ static void xen_cpu_init_eoi(unsigned int cpu)
void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
{
struct irq_info *info;
unsigned long flags;
read_lock_irqsave(&evtchn_rwlock, flags);
rcu_read_lock();
info = info_for_irq(irq);
if (info)
xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
read_unlock_irqrestore(&evtchn_rwlock, flags);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
@ -616,6 +624,7 @@ static void xen_irq_init(unsigned irq)
info->type = IRQT_UNBOUND;
info->refcnt = -1;
INIT_RCU_WORK(&info->rwork, delayed_free_irq);
set_info_for_irq(irq, info);
@ -668,31 +677,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
static void xen_free_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
unsigned long flags;
if (WARN_ON(!info))
return;
write_lock_irqsave(&evtchn_rwlock, flags);
if (!list_empty(&info->eoi_list))
lateeoi_list_del(info);
list_del(&info->list);
set_info_for_irq(irq, NULL);
WARN_ON(info->refcnt > 0);
write_unlock_irqrestore(&evtchn_rwlock, flags);
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
if (irq < nr_legacy_irqs())
return;
irq_free_desc(irq);
queue_rcu_work(system_wq, &info->rwork);
}
static void xen_evtchn_close(unsigned int port)
@ -1603,7 +1599,14 @@ static void __xen_evtchn_do_upcall(void)
unsigned count;
struct evtchn_loop_ctrl ctrl = { 0 };
read_lock(&evtchn_rwlock);
/*
* When closing an event channel the associated IRQ must not be freed
* until all cpus have left the event handling loop. This is ensured
* by taking the rcu_read_lock() while handling events, as freeing of
* the IRQ is handled via queue_rcu_work() _after_ closing the event
* channel.
*/
rcu_read_lock();
do {
vcpu_info->evtchn_upcall_pending = 0;
@ -1620,7 +1623,7 @@ static void __xen_evtchn_do_upcall(void)
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
out:
read_unlock(&evtchn_rwlock);
rcu_read_unlock();
/*
* Increment irq_epoch only now to defer EOIs only for

View file

@ -8,6 +8,7 @@
*/
#ifndef __EVENTS_INTERNAL_H__
#define __EVENTS_INTERNAL_H__
#include <linux/rcupdate.h>
/* Interrupt types. */
enum xen_irq_type {
@ -33,6 +34,7 @@ enum xen_irq_type {
struct irq_info {
struct list_head list;
struct list_head eoi_list;
struct rcu_work rwork;
short refcnt;
short spurious_cnt;
short type; /* type */

View file

@ -349,10 +349,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
/* there's now no turning back... the old userspace image is dead,
* defunct, deceased, etc.
*/
SET_PERSONALITY(exec_params.hdr);
if (elf_check_fdpic(&exec_params.hdr))
set_personality(PER_LINUX_FDPIC);
else
set_personality(PER_LINUX);
current->personality |= PER_LINUX_FDPIC;
if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
current->personality |= READ_IMPLIES_EXEC;

View file

@ -2196,7 +2196,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
* calculated f_bavail.
*/
if (!mixed && block_rsv->space_info->full &&
total_free_meta - thresh < block_rsv->size)
(total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
buf->f_bavail = 0;
buf->f_type = BTRFS_SUPER_MAGIC;

View file

@ -1507,7 +1507,7 @@ struct ext4_sb_info {
struct task_struct *s_mmp_tsk;
/* record the last minlen when FITRIM is called. */
atomic_t s_last_trim_minblks;
unsigned long s_last_trim_minblks;
/* Reference to checksum algorithm driver via cryptoapi */
struct crypto_shash *s_chksum_driver;

View file

@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/nospec.h>
#include <linux/backing-dev.h>
#include <linux/freezer.h>
#include <trace/events/ext4.h>
#ifdef CONFIG_EXT4_DEBUG
@ -5149,19 +5150,19 @@ error_return:
* @sb: super block for the file system
* @start: starting block of the free extent in the alloc. group
* @count: number of blocks to TRIM
* @group: alloc. group we are working with
* @e4b: ext4 buddy for the group
*
* Trim "count" blocks starting at "start" in the "group". To assure that no
* one will allocate those blocks, mark it as used in buddy bitmap. This must
* be called with under the group lock.
*/
static int ext4_trim_extent(struct super_block *sb, int start, int count,
ext4_group_t group, struct ext4_buddy *e4b)
static int ext4_trim_extent(struct super_block *sb,
int start, int count, struct ext4_buddy *e4b)
__releases(bitlock)
__acquires(bitlock)
{
struct ext4_free_extent ex;
ext4_group_t group = e4b->bd_group;
int ret = 0;
trace_ext4_trim_extent(sb, group, start, count);
@ -5184,6 +5185,71 @@ __acquires(bitlock)
return ret;
}
static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
ext4_group_t grp)
{
if (grp < ext4_get_groups_count(sb))
return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
ext4_group_first_block_no(sb, grp) - 1) >>
EXT4_CLUSTER_BITS(sb);
}
static bool ext4_trim_interrupted(void)
{
return fatal_signal_pending(current) || freezing(current);
}
static int ext4_try_to_trim_range(struct super_block *sb,
struct ext4_buddy *e4b, ext4_grpblk_t start,
ext4_grpblk_t max, ext4_grpblk_t minblocks)
{
ext4_grpblk_t next, count, free_count;
bool set_trimmed = false;
void *bitmap;
bitmap = e4b->bd_bitmap;
if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
set_trimmed = true;
start = max(e4b->bd_info->bb_first_free, start);
count = 0;
free_count = 0;
while (start <= max) {
start = mb_find_next_zero_bit(bitmap, max + 1, start);
if (start > max)
break;
next = mb_find_next_bit(bitmap, max + 1, start);
if ((next - start) >= minblocks) {
int ret = ext4_trim_extent(sb, start, next - start, e4b);
if (ret && ret != -EOPNOTSUPP)
return count;
count += next - start;
}
free_count += next - start;
start = next + 1;
if (ext4_trim_interrupted())
return count;
if (need_resched()) {
ext4_unlock_group(sb, e4b->bd_group);
cond_resched();
ext4_lock_group(sb, e4b->bd_group);
}
if ((e4b->bd_info->bb_free - free_count) < minblocks)
break;
}
if (set_trimmed)
EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
return count;
}
/**
* ext4_trim_all_free -- function to trim all free space in alloc. group
* @sb: super block for file system
@ -5207,10 +5273,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_grpblk_t start, ext4_grpblk_t max,
ext4_grpblk_t minblocks)
{
void *bitmap;
ext4_grpblk_t next, count = 0, free_count = 0;
struct ext4_buddy e4b;
int ret = 0;
int ret;
trace_ext4_trim_all_free(sb, group, start, max);
@ -5220,58 +5284,20 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ret, group);
return ret;
}
bitmap = e4b.bd_bitmap;
ext4_lock_group(sb, group);
if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
goto out;
start = (e4b.bd_info->bb_first_free > start) ?
e4b.bd_info->bb_first_free : start;
if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
minblocks < EXT4_SB(sb)->s_last_trim_minblks)
ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
else
ret = 0;
while (start <= max) {
start = mb_find_next_zero_bit(bitmap, max + 1, start);
if (start > max)
break;
next = mb_find_next_bit(bitmap, max + 1, start);
if ((next - start) >= minblocks) {
ret = ext4_trim_extent(sb, start,
next - start, group, &e4b);
if (ret && ret != -EOPNOTSUPP)
break;
ret = 0;
count += next - start;
}
free_count += next - start;
start = next + 1;
if (fatal_signal_pending(current)) {
count = -ERESTARTSYS;
break;
}
if (need_resched()) {
ext4_unlock_group(sb, group);
cond_resched();
ext4_lock_group(sb, group);
}
if ((e4b.bd_info->bb_free - free_count) < minblocks)
break;
}
if (!ret) {
ret = count;
EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
}
out:
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
ext4_debug("trimmed %d blocks in the group %d\n",
count, group);
ret, group);
return ret;
}
@ -5316,7 +5342,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
goto out;
}
if (end >= max_blks)
if (end >= max_blks - 1)
end = max_blks - 1;
if (end <= first_data_blk)
goto out;
@ -5333,6 +5359,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
for (group = first_group; group <= last_group; group++) {
if (ext4_trim_interrupted())
break;
grp = ext4_get_group_info(sb, group);
/* We only do this if the grp has never been initialized */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
@ -5349,10 +5377,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
*/
if (group == last_group)
end = last_cluster;
if (grp->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, group, first_cluster,
end, minlen);
end, minlen);
if (cnt < 0) {
ret = cnt;
break;
@ -5368,7 +5395,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
}
if (!ret)
atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
EXT4_SB(sb)->s_last_trim_minblks = minlen;
out:
range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
@ -5397,8 +5424,7 @@ ext4_mballoc_query_range(
ext4_lock_group(sb, group);
start = (e4b.bd_info->bb_first_free > start) ?
e4b.bd_info->bb_first_free : start;
start = max(e4b.bd_info->bb_first_free, start);
if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;

View file

@ -325,17 +325,17 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
struct ext4_dir_entry *de)
{
struct ext4_dir_entry_tail *t;
int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
#ifdef PARANOID
struct ext4_dir_entry *d, *top;
d = de;
top = (struct ext4_dir_entry *)(((void *)de) +
(EXT4_BLOCK_SIZE(inode->i_sb) -
sizeof(struct ext4_dir_entry_tail)));
while (d < top && d->rec_len)
(blocksize - sizeof(struct ext4_dir_entry_tail)));
while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize))
d = (struct ext4_dir_entry *)(((void *)d) +
le16_to_cpu(d->rec_len));
ext4_rec_len_from_disk(d->rec_len, blocksize));
if (d != top)
return NULL;
@ -346,7 +346,8 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
#endif
if (t->det_reserved_zero1 ||
le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
(ext4_rec_len_from_disk(t->det_rec_len, blocksize) !=
sizeof(struct ext4_dir_entry_tail)) ||
t->det_reserved_zero2 ||
t->det_reserved_ft != EXT4_FT_DIR_CSUM)
return NULL;
@ -428,13 +429,14 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
struct ext4_dir_entry *dp;
struct dx_root_info *root;
int count_offset;
int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize);
if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
if (rlen == blocksize)
count_offset = 8;
else if (le16_to_cpu(dirent->rec_len) == 12) {
else if (rlen == 12) {
dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
if (le16_to_cpu(dp->rec_len) !=
EXT4_BLOCK_SIZE(inode->i_sb) - 12)
if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12)
return NULL;
root = (struct dx_root_info *)(((void *)dp + 12));
if (root->reserved_zero ||
@ -1285,6 +1287,7 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
unsigned int buflen = bh->b_size;
char *base = bh->b_data;
struct dx_hash_info h = *hinfo;
int blocksize = EXT4_BLOCK_SIZE(dir->i_sb);
if (ext4_has_metadata_csum(dir->i_sb))
buflen -= sizeof(struct ext4_dir_entry_tail);
@ -1301,11 +1304,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
map_tail--;
map_tail->hash = h.hash;
map_tail->offs = ((char *) de - base)>>2;
map_tail->size = le16_to_cpu(de->rec_len);
map_tail->size = ext4_rec_len_from_disk(de->rec_len,
blocksize);
count++;
cond_resched();
}
de = ext4_next_entry(de, dir->i_sb->s_blocksize);
de = ext4_next_entry(de, blocksize);
}
return count;
}

View file

@ -1189,6 +1189,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
case -EOPNOTSUPP:
case -EINVAL:
case -ECONNREFUSED:
case -ECONNRESET:
case -EHOSTDOWN:

View file

@ -73,10 +73,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
brelse(bh);
if (unlikely(err)) /* -EIO, -ENOMEM, -ENOENT */
goto failed;
}
}
lock_buffer(bh);
@ -102,6 +100,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
failed:
unlock_page(bh->b_page);
put_page(bh->b_page);
if (unlikely(err))
brelse(bh);
return err;
}

View file

@ -196,6 +196,8 @@ struct team {
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
const struct header_ops *header_ops_cache;
struct mutex lock; /* used for overall locking, e.g. port lists write */
/*

View file

@ -278,6 +278,10 @@ enum {
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
/* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */
@ -311,7 +315,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
ATA_TMOUT_PMP_SRST_WAIT = 5000,
ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that

View file

@ -163,31 +163,38 @@ static inline bool dev_xmit_complete(int rc)
* (unsigned long) so they can be read and written atomically.
*/
#define NET_DEV_STAT(FIELD) \
union { \
unsigned long FIELD; \
atomic_long_t __##FIELD; \
}
struct net_device_stats {
unsigned long rx_packets;
unsigned long tx_packets;
unsigned long rx_bytes;
unsigned long tx_bytes;
unsigned long rx_errors;
unsigned long tx_errors;
unsigned long rx_dropped;
unsigned long tx_dropped;
unsigned long multicast;
unsigned long collisions;
unsigned long rx_length_errors;
unsigned long rx_over_errors;
unsigned long rx_crc_errors;
unsigned long rx_frame_errors;
unsigned long rx_fifo_errors;
unsigned long rx_missed_errors;
unsigned long tx_aborted_errors;
unsigned long tx_carrier_errors;
unsigned long tx_fifo_errors;
unsigned long tx_heartbeat_errors;
unsigned long tx_window_errors;
unsigned long rx_compressed;
unsigned long tx_compressed;
NET_DEV_STAT(rx_packets);
NET_DEV_STAT(tx_packets);
NET_DEV_STAT(rx_bytes);
NET_DEV_STAT(tx_bytes);
NET_DEV_STAT(rx_errors);
NET_DEV_STAT(tx_errors);
NET_DEV_STAT(rx_dropped);
NET_DEV_STAT(tx_dropped);
NET_DEV_STAT(multicast);
NET_DEV_STAT(collisions);
NET_DEV_STAT(rx_length_errors);
NET_DEV_STAT(rx_over_errors);
NET_DEV_STAT(rx_crc_errors);
NET_DEV_STAT(rx_frame_errors);
NET_DEV_STAT(rx_fifo_errors);
NET_DEV_STAT(rx_missed_errors);
NET_DEV_STAT(tx_aborted_errors);
NET_DEV_STAT(tx_carrier_errors);
NET_DEV_STAT(tx_fifo_errors);
NET_DEV_STAT(tx_heartbeat_errors);
NET_DEV_STAT(tx_window_errors);
NET_DEV_STAT(rx_compressed);
NET_DEV_STAT(tx_compressed);
};
#undef NET_DEV_STAT
#include <linux/cache.h>
@ -4889,4 +4896,9 @@ do { \
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
#define DEV_STATS_ADD(DEV, FIELD, VAL) \
atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
#endif /* _LINUX_NETDEVICE_H */

View file

@ -362,9 +362,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
struct net *net)
{
/* TODO : stats should be SMP safe */
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
DEV_STATS_INC(dev, rx_packets);
DEV_STATS_ADD(dev, rx_bytes, skb->len);
__skb_tunnel_rx(skb, dev, net);
}

View file

@ -346,12 +346,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
static inline void tcp_dec_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ack.quick) {
/* How many ACKs S/ACKing new data have we sent? */
const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;
/* Leaving quickack mode we deflate ATO. */

View file

@ -695,7 +695,9 @@ union bpf_attr {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
* 0 on success, or a negative error in case of failure.
* 0 on success, or a negative error in case of failure. Positive
* error indicates a potential drop or congestion in the target
* device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Return

View file

@ -1753,6 +1753,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
err = -ENOMEM;
goto out_err;
}
cond_resched();
}
get_online_cpus();

View file

@ -122,7 +122,7 @@ static int deliver_clone(const struct net_bridge_port *prev,
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb) {
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
return -ENOMEM;
}
@ -261,7 +261,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb) {
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
return;
}

View file

@ -146,12 +146,12 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if ((mdst && mdst->host_joined) ||
br_multicast_is_router(br)) {
local_rcv = true;
br->dev->stats.multicast++;
DEV_STATS_INC(br->dev, multicast);
}
mcast_hit = true;
} else {
local_rcv = true;
br->dev->stats.multicast++;
DEV_STATS_INC(br->dev, multicast);
}
break;
case BR_PKT_UNICAST:

View file

@ -9050,24 +9050,16 @@ void netdev_run_todo(void)
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats)
{
#if BITS_PER_LONG == 64
BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
/* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + sizeof(*netdev_stats), 0,
sizeof(*stats64) - sizeof(*netdev_stats));
#else
size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
const unsigned long *src = (const unsigned long *)netdev_stats;
size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
const atomic_long_t *src = (atomic_long_t *)netdev_stats;
u64 *dst = (u64 *)stats64;
BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
for (i = 0; i < n; i++)
dst[i] = src[i];
dst[i] = (unsigned long)atomic_long_read(&src[i]);
/* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + n * sizeof(u64), 0,
sizeof(*stats64) - n * sizeof(u64));
#endif
}
EXPORT_SYMBOL(netdev_stats_to_stats64);

View file

@ -2702,10 +2702,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (ifm->ifi_index < 0) {
NL_SET_ERR_MSG(extack, "ifindex can't be negative");
return -EINVAL;
} else if (tb[IFLA_IFNAME])
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
else
goto errout;
@ -2973,9 +2970,12 @@ replay:
ifname[0] = '\0';
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
if (ifm->ifi_index > 0) {
dev = __dev_get_by_index(net, ifm->ifi_index);
else {
} else if (ifm->ifi_index < 0) {
NL_SET_ERR_MSG(extack, "ifindex can't be negative");
return -EINVAL;
} else {
if (ifname[0])
dev = __dev_get_by_name(net, ifname);
else

View file

@ -247,13 +247,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
int err;
struct net *net = dev_net(skb->dev);
/* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
* which is in byte 7 of the dccp header.
* Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
*
* Later on, we want to access the sequence number fields, which are
* beyond 8 bytes, so we have to pskb_may_pull() ourselves.
*/
if (!pskb_may_pull(skb, offset + sizeof(*dh)))
return;
dh = (struct dccp_hdr *)(skb->data + offset);
if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
return;

View file

@ -80,13 +80,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
__u64 seq;
struct net *net = dev_net(skb->dev);
/* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
* which is in byte 7 of the dccp header.
* Our caller (icmpv6_notify()) already pulled 8 bytes for us.
*
* Later on, we want to access the sequence number fields, which are
* beyond 8 bytes, so we have to pskb_may_pull() ourselves.
*/
if (!pskb_may_pull(skb, offset + sizeof(*dh)))
return;
dh = (struct dccp_hdr *)(skb->data + offset);
if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
return;

View file

@ -1215,6 +1215,7 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
static void ipv4_send_dest_unreach(struct sk_buff *skb)
{
struct net_device *dev;
struct ip_options opt;
int res;
@ -1232,7 +1233,8 @@ static void ipv4_send_dest_unreach(struct sk_buff *skb)
opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
rcu_read_lock();
res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
rcu_read_unlock();
if (res)

View file

@ -172,6 +172,19 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
if (unlikely(len > icsk->icsk_ack.rcv_mss +
MAX_TCP_OPTION_SPACE))
tcp_gro_dev_warn(sk, skb, len);
/* If the skb has a len of exactly 1*MSS and has the PSH bit
* set then it is likely the end of an application write. So
* more data may not be arriving soon, and yet the data sender
* may be waiting for an ACK if cwnd-bound or using TX zero
* copy. So we set ICSK_ACK_PUSHED here so that
* tcp_cleanup_rbuf() will send an ACK immediately if the app
* reads all of the data and is not ping-pong. If len > MSS
* then this logic does not matter (and does not hurt) because
* tcp_cleanup_rbuf() will always ACK immediately if the app
* reads data and there is more than an MSS of unACKed data.
*/
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.

View file

@ -164,8 +164,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
}
/* Account for an ACK we sent. */
static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
u32 rcv_nxt)
static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
{
struct tcp_sock *tp = tcp_sk(sk);
@ -179,7 +178,7 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
if (unlikely(rcv_nxt != tp->rcv_nxt))
return; /* Special ACK sent by DCTCP to reflect ECN */
tcp_dec_quickack_mode(sk, pkts);
tcp_dec_quickack_mode(sk);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
@ -1139,7 +1138,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
icsk->icsk_af_ops->send_check(sk, skb);
if (likely(tcb->tcp_flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
tcp_event_ack_sent(sk, rcv_nxt);
if (skb->len != tcp_header_size) {
tcp_event_data_sent(tp, sk);

View file

@ -525,7 +525,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
*/
if (len > INT_MAX - transhdrlen)
return -EMSGSIZE;
ulen = len + transhdrlen;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
@ -649,6 +648,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
back_from_confirm:
lock_sock(sk);
ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, &ipc6,
&fl6, (struct rt6_info *)dst,

View file

@ -1510,8 +1510,8 @@ static int make_send_sock(struct netns_ipvs *ipvs, int id,
}
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
salen, 0);
result = kernel_connect(sock, (struct sockaddr *)&mcast_addr,
salen, 0);
if (result < 0) {
pr_err("Error connecting to the multicast addr\n");
goto error;

View file

@ -993,8 +993,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
if (!nft_is_active_next(ctx->net, set))
continue;
if (nft_set_is_anonymous(set) &&
!list_empty(&set->bindings))
if (nft_set_is_anonymous(set))
continue;
err = nft_delset(ctx, set);
@ -4902,8 +4901,10 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
if (IS_ERR(set))
return PTR_ERR(set);
if (!list_empty(&set->bindings) &&
(set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
if (nft_set_is_anonymous(set))
return -EOPNOTSUPP;
if (!list_empty(&set->bindings) && (set->flags & NFT_SET_CONSTANT))
return -EBUSY;
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) {

View file

@ -169,7 +169,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
* own the socket
*/
rds_tcp_set_callbacks(sock, cp);
ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK);
ret = kernel_connect(sock, addr, addrlen, O_NONBLOCK);
rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)

View file

@ -1181,8 +1181,7 @@ int sctp_assoc_update(struct sctp_association *asoc,
/* Add any peer addresses from the new association. */
list_for_each_entry(trans, &new->peer.transport_addr_list,
transports)
if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
!sctp_assoc_add_peer(asoc, &trans->ipaddr,
if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
GFP_ATOMIC, trans->state))
return -ENOMEM;

View file

@ -2578,6 +2578,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
if (trans) {
trans->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
sctp_transport_reset_hb_timer(trans);
} else if (asoc) {
asoc->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);

Some files were not shown because too many files have changed in this diff Show more