This is the 4.19.173 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmAbIscACgkQONu9yGCS
 aT7PvhAAt5DPifFfFpsdbdQ5/H3tuQvgnl/WUu2bXEId0WJ3k9onw3sXbFbG0JvE
 Ar+xB8ODKlsutHGx9+CzfowQSOdHqrtvSpyaKJ9fy6+shEUJaFEKXqLICic/N2t+
 5ngkj29mhqCqeG7VuSQ5aJyg9zabzUiXwETuZxcJY43oA7zK1/ajNoSLM/TQUkMF
 HoK1LXd1fcOQrO0Rf4ZsRqPI9tHNho+7rtIFyJscO24JJRqi9f+qWCOZFHA0+NQG
 2aQr/nECkaF1kWZEzz8zIUt6e7edjY+47I0tiIwg3uR2ZDo5tJUUU94y4DAahBwZ
 41hkS6gWw6/c38KtircKtACfe8qsSLFrUKF+2MDv9i5Ao1g5+EDINqi2fKbE0xpn
 avs+r0gnyyW29LcyAfEu3foIHAi4pCppnNjmLjmAG1HZK9w6KcjrcBDO6o2GWbur
 eD6VcvndDJGreOXtS1eLG6Y6JhmCBsZtUKZUp7yV5oDL+lSIIAH00hu3aGpjTG7K
 DP9364bqJIHVLgBkoMqmocnWBuraMFs65SAr3//UDwbXBky6oX9YC5ytmsRKSmyQ
 BePmrzHxCM+2GNXhZ9BTAnvKngmnRVWSVH7WlQzlVugliuKdvUYokhtba/TmjIUw
 1R5vo3Y1NM2LpmD1xRaeSm7QD1dgYrkfNdv/I8M2UY9/a8dzngU=
 =w3Q7
 -----END PGP SIGNATURE-----

Merge 4.19.173 into android-4.19-stable

Changes in 4.19.173
	nbd: freeze the queue while we're adding connections
	ACPI: sysfs: Prefer "compatible" modalias
	kernel: kexec: remove the lock operation of system_transition_mutex
	xen/privcmd: allow fetching resource sizes
	ALSA: hda/via: Apply the workaround generically for Clevo machines
	media: rc: ensure that uevent can be read directly after rc device register
	ARM: dts: imx6qdl-gw52xx: fix duplicate regulator naming
	wext: fix NULL-ptr-dereference with cfg80211's lack of commit()
	net: usb: qmi_wwan: added support for Thales Cinterion PLSx3 modem family
	PM: hibernate: flush swap writer after marking
	drivers: soc: atmel: Avoid calling at91_soc_init on non AT91 SoCs
	drivers: soc: atmel: add null entry at the end of at91_soc_allowed_list[]
	KVM: x86/pmu: Fix HW_REF_CPU_CYCLES event pseudo-encoding in intel_arch_events[]
	KVM: x86: get smi pending status correctly
	xen: Fix XenStore initialisation for XS_LOCAL
	leds: trigger: fix potential deadlock with libata
	mt7601u: fix kernel crash unplugging the device
	mt7601u: fix rx buffer refcounting
	xen-blkfront: allow discard-* nodes to be optional
	ARM: imx: build suspend-imx6.S with arm instruction set
	netfilter: nft_dynset: add timeout extension to template
	xfrm: Fix oops in xfrm_replay_advance_bmp
	xfrm: fix disable_xfrm sysctl when used on xfrm interfaces
	RDMA/cxgb4: Fix the reported max_recv_sge value
	pNFS/NFSv4: Fix a layout segment leak in pnfs_layout_process()
	iwlwifi: pcie: use jiffies for memory read spin time limit
	iwlwifi: pcie: reschedule in long-running memory reads
	mac80211: pause TX while changing interface type
	net/mlx5: Fix memory leak on flow table creation error flow
	can: dev: prevent potential information leak in can_fill_info()
	iommu/vt-d: Gracefully handle DMAR units with no supported address widths
	iommu/vt-d: Don't dereference iommu_device if IOMMU_API is not built
	rxrpc: Fix memory leak in rxrpc_lookup_local
	NFC: fix resource leak when target index is invalid
	NFC: fix possible resource leak
	team: protect features update by RCU to avoid deadlock
	tcp: fix TLP timer not set when CA_STATE changes from DISORDER to OPEN
	Linux 4.19.173

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: If4b2010c599fb95520fd0c28f9f389f35ac196cd
This commit is contained in:
Greg Kroah-Hartman 2021-02-03 23:49:39 +01:00
commit ba65a97a3b
38 changed files with 183 additions and 88 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 172
SUBLEVEL = 173
EXTRAVERSION =
NAME = "People's Front"

View file

@ -278,7 +278,7 @@
/* VDD_AUD_1P8: Audio codec */
reg_aud_1p8v: ldo3 {
regulator-name = "vdd1p8";
regulator-name = "vdd1p8a";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;

View file

@ -73,6 +73,7 @@
#define MX6Q_CCM_CCR 0x0
.align 3
.arm
.macro sync_l2_cache

View file

@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
[7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
};
/* mapping between fixed pmc index and intel_arch_events array */

View file

@ -102,6 +102,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
static void process_smi(struct kvm_vcpu *vcpu);
static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
@ -3499,6 +3500,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
process_nmi(vcpu);
if (kvm_check_request(KVM_REQ_SMI, vcpu))
process_smi(vcpu);
/*
* FIXME: pass injected and pending separately. This is only
* needed for nested virtualization, whose state cannot be

View file

@ -259,20 +259,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
env->buflen += len;
if (!adev->data.of_compatible)
return 0;
if (len > 0 && add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_of_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (adev->data.of_compatible)
len = create_of_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
else
len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len < 0)
return len;

View file

@ -966,6 +966,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
if (!sock)
return err;
/*
* We need to make sure we don't get any errant requests while we're
* reallocating the ->socks array.
*/
blk_mq_freeze_queue(nbd->disk->queue);
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_BOUND, &config->runtime_flags))
nbd->task_setup = current;
@ -1004,10 +1010,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->cookie = 0;
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
blk_mq_unfreeze_queue(nbd->disk->queue);
return 0;
put_socket:
blk_mq_unfreeze_queue(nbd->disk->queue);
sockfd_put(sock);
return err;
}

View file

@ -936,7 +936,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
if (info->feature_discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity;
rq->limits.discard_granularity = info->discard_granularity ?:
info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
@ -2169,19 +2170,12 @@ static void blkfront_closing(struct blkfront_info *info)
static void blkfront_setup_discard(struct blkfront_info *info)
{
int err;
unsigned int discard_granularity;
unsigned int discard_alignment;
info->feature_discard = 1;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"discard-granularity", "%u", &discard_granularity,
"discard-alignment", "%u", &discard_alignment,
NULL);
if (!err) {
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
"discard-granularity",
0);
info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
"discard-alignment", 0);
info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0);

View file

@ -2485,7 +2485,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;

View file

@ -1029,8 +1029,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
int agaw = 0;
int msagaw = 0;
int agaw = -1;
int msagaw = -1;
int err;
if (!drhd->reg_base_addr) {
@ -1055,17 +1055,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
err = -EINVAL;
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto err_unmap;
if (cap_sagaw(iommu->cap) == 0) {
pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
iommu->name);
drhd->ignored = 1;
}
msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) {
pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto err_unmap;
if (!drhd->ignored) {
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
drhd->ignored = 1;
}
}
if (!drhd->ignored) {
msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) {
pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
drhd->ignored = 1;
agaw = -1;
}
}
iommu->agaw = agaw;
iommu->msagaw = msagaw;
@ -1092,7 +1103,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
raw_spin_lock_init(&iommu->register_lock);
if (intel_iommu_enabled) {
/*
* This is only for hotplug; at boot time intel_iommu_enabled won't
* be set yet. When intel_iommu_init() runs, it registers the units
* present at boot time, then sets intel_iommu_enabled.
*/
if (intel_iommu_enabled && !drhd->ignored) {
err = iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
@ -1107,6 +1123,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
drhd->iommu = iommu;
iommu->drhd = drhd;
return 0;
@ -1121,7 +1138,7 @@ error:
static void free_iommu(struct intel_iommu *iommu)
{
if (intel_iommu_enabled) {
if (intel_iommu_enabled && !iommu->drhd->ignored) {
iommu_device_unregister(&iommu->iommu);
iommu_device_sysfs_remove(&iommu->iommu);
}

View file

@ -317,14 +317,15 @@ void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
unsigned long flags;
if (!trig)
return;
read_lock(&trig->leddev_list_lock);
read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
read_unlock(&trig->leddev_list_lock);
read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
@ -335,11 +336,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
int invert)
{
struct led_classdev *led_cdev;
unsigned long flags;
if (!trig)
return;
read_lock(&trig->leddev_list_lock);
read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
@ -347,7 +349,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
else
led_blink_set(led_cdev, delay_on, delay_off);
}
read_unlock(&trig->leddev_list_lock);
read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
void led_trigger_blink(struct led_trigger *trig,

View file

@ -1875,6 +1875,8 @@ int rc_register_device(struct rc_dev *dev)
goto out_raw;
}
dev->registered = true;
rc = device_add(&dev->dev);
if (rc)
goto out_rx_free;
@ -1884,8 +1886,6 @@ int rc_register_device(struct rc_dev *dev)
dev->device_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
dev->registered = true;
/*
* once the the input device is registered in rc_setup_rx_device,
* userspace can open the input device and rc_open() will be called

View file

@ -1142,7 +1142,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct can_ctrlmode cm = {.flags = priv->ctrlmode};
struct can_berr_counter bec;
struct can_berr_counter bec = { };
enum can_state state = priv->state;
if (priv->do_get_state)

View file

@ -1004,6 +1004,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
destroy_ft:
root->cmds->destroy_flow_table(root->dev, ft);
free_ft:
rhltable_destroy(&ft->fgs_hash);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);

View file

@ -998,7 +998,8 @@ static void __team_compute_features(struct team *team)
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
list_for_each_entry(port, &team->port_list, list) {
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
TEAM_VLAN_FEATURES);
@ -1012,6 +1013,7 @@ static void __team_compute_features(struct team *team)
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
rcu_read_unlock();
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
@ -1027,9 +1029,7 @@ static void __team_compute_features(struct team *team)
static void team_compute_features(struct team *team)
{
mutex_lock(&team->lock);
__team_compute_features(team);
mutex_unlock(&team->lock);
netdev_change_features(team->dev);
}

View file

@ -1284,6 +1284,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */

View file

@ -2126,7 +2126,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
while (offs < dwords) {
/* limit the time we spin here under lock to 1/2s */
ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
unsigned long end = jiffies + HZ / 2;
bool resched = false;
if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR,
@ -2137,14 +2138,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
HBUS_TARG_MEM_RDAT);
offs++;
/* calling ktime_get is expensive so
* do it once in 128 reads
*/
if (offs % 128 == 0 && ktime_after(ktime_get(),
timeout))
if (time_after(jiffies, end)) {
resched = true;
break;
}
}
iwl_trans_release_nic_access(trans, &flags);
if (resched)
cond_resched();
} else {
return -EBUSY;
}

View file

@ -160,8 +160,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
if (new_p) {
/* we have one extra ref from the allocator */
__free_pages(e->p, MT_RX_ORDER);
put_page(e->p);
e->p = new_p;
}
}
@ -318,7 +317,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
}
e = &q->e[q->end];
e->skb = skb;
usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
mt7601u_complete_tx, q);
ret = usb_submit_urb(e->urb, GFP_ATOMIC);
@ -336,6 +334,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
q->end = (q->end + 1) % q->entries;
q->used++;
e->skb = skb;
if (q->used >= q->entries)
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));

View file

@ -254,8 +254,21 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
return soc_dev;
}
static const struct of_device_id at91_soc_allowed_list[] __initconst = {
{ .compatible = "atmel,at91rm9200", },
{ .compatible = "atmel,at91sam9", },
{ .compatible = "atmel,sama5", },
{ .compatible = "atmel,samv7", },
{ }
};
static int __init atmel_soc_device_init(void)
{
struct device_node *np = of_find_node_by_path("/");
if (!of_match_node(at91_soc_allowed_list, np))
return 0;
at91_soc_init(socs);
return 0;

View file

@ -743,14 +743,15 @@ static int remap_pfn_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
return 0;
}
static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
static long privcmd_ioctl_mmap_resource(struct file *file,
struct privcmd_mmap_resource __user *udata)
{
struct privcmd_data *data = file->private_data;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct privcmd_mmap_resource kdata;
xen_pfn_t *pfns = NULL;
struct xen_mem_acquire_resource xdata;
struct xen_mem_acquire_resource xdata = { };
int rc;
if (copy_from_user(&kdata, udata, sizeof(kdata)))
@ -760,6 +761,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
return -EPERM;
/* Both fields must be set or unset */
if (!!kdata.addr != !!kdata.num)
return -EINVAL;
xdata.domid = kdata.dom;
xdata.type = kdata.type;
xdata.id = kdata.id;
if (!kdata.addr && !kdata.num) {
/* Query the size of the resource. */
rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
if (rc)
return rc;
return __put_user(xdata.nr_frames, &udata->num);
}
down_write(&mm->mmap_sem);
vma = find_vma(mm, kdata.addr);
@ -793,10 +810,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
} else
vma->vm_private_data = PRIV_VMA_LOCKED;
memset(&xdata, 0, sizeof(xdata));
xdata.domid = kdata.dom;
xdata.type = kdata.type;
xdata.id = kdata.id;
xdata.frame = kdata.idx;
xdata.nr_frames = kdata.num;
set_xen_guest_handle(xdata.frame_list, pfns);

View file

@ -714,6 +714,23 @@ static bool xs_hvm_defer_init_for_callback(void)
#endif
}
static int xenbus_probe_thread(void *unused)
{
DEFINE_WAIT(w);
/*
* We actually just want to wait for *any* trigger of xb_waitq,
* and run xenbus_probe() the moment it occurs.
*/
prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
schedule();
finish_wait(&xb_waitq, &w);
DPRINTK("probing");
xenbus_probe();
return 0;
}
static int __init xenbus_probe_initcall(void)
{
/*
@ -725,6 +742,20 @@ static int __init xenbus_probe_initcall(void)
!xs_hvm_defer_init_for_callback()))
xenbus_probe();
/*
* For XS_LOCAL, spawn a thread which will wait for xenstored
* or a xenstore-stubdom to be started, then probe. It will be
* triggered when communication starts happening, by waiting
* on xb_waitq.
*/
if (xen_store_domain_type == XS_LOCAL) {
struct task_struct *probe_task;
probe_task = kthread_run(xenbus_probe_thread, NULL,
"xenbus_probe");
if (IS_ERR(probe_task))
return PTR_ERR(probe_task);
}
return 0;
}
device_initcall(xenbus_probe_initcall);

View file

@ -2320,6 +2320,7 @@ out_forget:
spin_unlock(&ino->i_lock);
lseg->pls_layout = lo;
NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
pnfs_free_lseg_list(&free_me);
return ERR_PTR(-EAGAIN);
}

View file

@ -472,6 +472,8 @@ struct intel_iommu {
struct iommu_device iommu; /* IOMMU core code handle */
int node;
u32 flags; /* Software defined flags */
struct dmar_drhd_unit *drhd;
};
/* PCI domain-device relationship */

View file

@ -1961,7 +1961,7 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
u32 reo_wnd);
extern void tcp_rack_mark_lost(struct sock *sk);
extern bool tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);

View file

@ -1130,7 +1130,6 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
lock_system_sleep();
pm_prepare_console();
error = freeze_processes();
if (error) {
@ -1193,7 +1192,6 @@ int kernel_kexec(void)
thaw_processes();
Restore_console:
pm_restore_console();
unlock_system_sleep();
}
#endif

View file

@ -491,10 +491,10 @@ static int swap_writer_finish(struct swap_map_handle *handle,
unsigned int flags, int error)
{
if (!error) {
flush_swap_writer(handle);
pr_info("S");
error = mark_swapfiles(handle, flags);
pr_cont("|\n");
flush_swap_writer(handle);
}
if (error)

View file

@ -2750,7 +2750,8 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
} else if (tcp_is_rack(sk)) {
u32 prior_retrans = tp->retrans_out;
tcp_rack_mark_lost(sk);
if (tcp_rack_mark_lost(sk))
*ack_flag &= ~FLAG_SET_XMIT_TIMER;
if (prior_retrans > tp->retrans_out)
*ack_flag |= FLAG_LOST_RETRANS;
}
@ -3693,9 +3694,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
/* If needed, reset TLP/RTO timer; RACK may later override this. */
if (flag & FLAG_SET_XMIT_TIMER)
tcp_set_xmit_timer(sk);
if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
@ -3703,6 +3701,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
&rexmit);
}
/* If needed, reset TLP/RTO timer when RACK doesn't set. */
if (flag & FLAG_SET_XMIT_TIMER)
tcp_set_xmit_timer(sk);
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
sk_dst_confirm(sk);

View file

@ -109,13 +109,13 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
}
}
void tcp_rack_mark_lost(struct sock *sk)
bool tcp_rack_mark_lost(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 timeout;
if (!tp->rack.advanced)
return;
return false;
/* Reset the advanced flag to avoid unnecessary queue scanning */
tp->rack.advanced = 0;
@ -125,6 +125,7 @@ void tcp_rack_mark_lost(struct sock *sk)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
timeout, inet_csk(sk)->icsk_rto);
}
return !!timeout;
}
/* Record the most recently (re)sent time among the (s)acked packets

View file

@ -1051,6 +1051,7 @@ enum queue_stop_reason {
IEEE80211_QUEUE_STOP_REASON_FLUSH,
IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID,
IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE,
IEEE80211_QUEUE_STOP_REASONS,
};

View file

@ -1542,6 +1542,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
if (ret)
return ret;
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
synchronize_net();
ieee80211_do_stop(sdata, false);
ieee80211_teardown_sdata(sdata);
@ -1562,6 +1566,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
err = ieee80211_do_open(&sdata->wdev, false);
WARN(err, "type change: do_open returned %d", err);
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
return ret;
}

View file

@ -213,8 +213,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR,
priv->expr->ops->size);
if (set->flags & NFT_SET_TIMEOUT) {
if (timeout || set->timeout)
if (timeout || set->timeout) {
nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
}
}
priv->timeout = timeout;

View file

@ -871,6 +871,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
if (!dev->polling) {
device_unlock(&dev->dev);
nfc_put_device(dev);
return -EINVAL;
}

View file

@ -117,7 +117,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
if (addr->target_idx > dev->target_next_idx - 1 ||
addr->target_idx < dev->target_next_idx - dev->n_targets) {
rc = -EINVAL;
goto error;
goto put_dev;
}
rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);

View file

@ -211,6 +211,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
tail = b->peer_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_peer *peer = b->peer_backlog[tail];
rxrpc_put_local(peer->local);
kfree(peer);
tail = (tail + 1) & (size - 1);
}

View file

@ -896,8 +896,9 @@ out:
int call_commit_handler(struct net_device *dev)
{
#ifdef CONFIG_WIRELESS_EXT
if ((netif_running(dev)) &&
(dev->wireless_handlers->standard[0] != NULL))
if (netif_running(dev) &&
dev->wireless_handlers &&
dev->wireless_handlers->standard[0])
/* Call the commit handler on the driver */
return dev->wireless_handlers->standard[0](dev, NULL,
NULL, NULL);

View file

@ -420,7 +420,7 @@ resume:
/* only the first xfrm gets the encap type */
encap_type = 0;
if (async && x->repl->recheck(x, skb, seq)) {
if (x->repl->recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}

View file

@ -2101,8 +2101,8 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
xflo.flags = flags;
/* To accelerate a bit... */
if ((dst_orig->flags & DST_NOXFRM) ||
!net->xfrm.policy_count[XFRM_POLICY_OUT])
if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
!net->xfrm.policy_count[XFRM_POLICY_OUT]))
goto nopol;
xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);

View file

@ -1056,7 +1056,7 @@ static const struct hda_fixup via_fixups[] = {
static const struct snd_pci_quirk vt2002p_fixups[] = {
SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
{}
};