Merge tag 'ASB-2023-01-05_4.19-stable' of https://android.googlesource.com/kernel/common into android13-4.19-kona

https://source.android.com/docs/security/bulletin/2023-01-01
CVE-2022-42719
CVE-2022-42720
CVE-2022-42721
CVE-2022-2959
CVE-2022-41674
CVE-2023-20928

* tag 'ASB-2023-01-05_4.19-stable' of https://android.googlesource.com/kernel/common:
  ANDROID: Add allowed symbols required from Qualcomm drivers
  BACKPORT: lib: introduce copy_struct_from_user() helper
  ANDROID: fix BIT() redefinition
  Linux 4.19.269
  can: esd_usb: Allow REC and TEC to return to zero
  net: mvneta: Fix an out of bounds check
  ipv6: avoid use-after-free in ip6_fragment()
  net: plip: don't call kfree_skb/dev_kfree_skb() under spin_lock_irq()
  xen/netback: fix build warning
  ethernet: aeroflex: fix potential skb leak in greth_init_rings()
  tipc: Fix potential OOB in tipc_link_proto_rcv()
  net: hisilicon: Fix potential use-after-free in hix5hd2_rx()
  net: hisilicon: Fix potential use-after-free in hisi_femac_rx()
  net: stmmac: fix "snps,axi-config" node property parsing
  nvme initialize core quirks before calling nvme_init_subsystem
  NFC: nci: Bounds check struct nfc_target arrays
  i40e: Disallow ip4 and ip6 l4_4_bytes
  i40e: Fix for VF MAC address 0
  i40e: Fix not setting default xps_cpus after reset
  net: mvneta: Prevent out of bounds read in mvneta_config_rss()
  xen-netfront: Fix NULL sring after live migration
  net: encx24j600: Fix invalid logic in reading of MISTAT register
  net: encx24j600: Add parentheses to fix precedence
  mac802154: fix missing INIT_LIST_HEAD in ieee802154_if_add()
  selftests: rtnetlink: correct xfrm policy rule in kci_test_ipsec_offload
  Bluetooth: Fix not cleanup led when bt_init fails
  Bluetooth: 6LoWPAN: add missing hci_dev_put() in get_l2cap_conn()
  igb: Allocate MSI-X vector when testing
  e1000e: Fix TX dispatch condition
  gpio: amd8111: Fix PCI device reference count leak
  ca8210: Fix crash by zero initializing data
  ieee802154: cc2520: Fix error return code in cc2520_hw_init()
  HID: core: fix shift-out-of-bounds in hid_report_raw_event
  HID: hid-lg4ff: Add check for empty lbuf
  KVM: s390: vsie: Fix the initialization of the epoch extension (epdx) field
  memcg: fix possible use-after-free in memcg_write_event_control()
  media: v4l2-dv-timings.c: fix too strict blanking sanity checks
  rcutorture: Automatically create initrd directory
  xen/netback: don't call kfree_skb() with interrupts disabled
  xen/netback: do some code cleanup
  xen/netback: Ensure protocol headers don't fall in the non-linear area
  net: usb: qmi_wwan: add u-blox 0x1342 composition
  9p/xen: check logical size for buffer size
  fbcon: Use kzalloc() in fbcon_prepare_logo()
  regulator: twl6030: fix get status of twl6032 regulators
  ASoC: soc-pcm: Add NULL check in BE reparenting
  ALSA: seq: Fix function prototype mismatch in snd_seq_expand_var_event
  9p/fd: Use P9_HDRSZ for header size
  ARM: dts: rockchip: disable arm_global_timer on rk3066 and rk3188
  ARM: 9266/1: mm: fix no-MMU ZERO_PAGE() implementation
  ARM: 9251/1: perf: Fix stacktraces for tracepoint events in THUMB2 kernels
  ARM: dts: rockchip: fix ir-receiver node names
  arm: dts: rockchip: fix node name for hym8563 rtc
  ANDROID: Add allowed symbols required from Qualcomm drivers

Change-Id: I95885c78b0a71492bb368fc15a4416f643f3d7ca
This commit is contained in:
Michael Bestas 2023-01-06 17:20:12 +02:00
commit a83a355de3
No known key found for this signature in database
GPG key ID: CC95044519BE6669
62 changed files with 72119 additions and 70893 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 268
SUBLEVEL = 269
EXTRAVERSION =
NAME = "People's Front"

File diff suppressed because it is too large Load diff

View file

@ -1579,6 +1579,7 @@
# required by msm_adreno.ko
bpf_trace_run10
check_zeroed_user
__clk_get_name
devfreq_cooling_unregister
device_show_int
@ -2102,6 +2103,7 @@
# required by qcom_glink_native.ko
__irq_set_affinity
sched_setaffinity
strscpy_pad
# required by qcom_hwspinlock.ko
devm_regmap_field_alloc

View file

@ -31,7 +31,7 @@
&i2c1 {
status = "okay";
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View file

@ -67,7 +67,7 @@
#sound-dai-cells = <0>;
};
ir_recv: gpio-ir-receiver {
ir_recv: ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";

View file

@ -509,7 +509,6 @@
&global_timer {
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
status = "disabled";
};
&local_timer {

View file

@ -53,7 +53,7 @@
vin-supply = <&vcc_sys>;
};
hym8563@51 {
rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;

View file

@ -233,7 +233,7 @@
vin-supply = <&vcc_sys>;
};
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View file

@ -146,7 +146,7 @@
vin-supply = <&vcc_sys>;
};
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View file

@ -166,7 +166,7 @@
};
&i2c0 {
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View file

@ -108,6 +108,13 @@
reg = <0x1013c200 0x20>;
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
clocks = <&cru CORE_PERI>;
status = "disabled";
/* The clock source and the sched_clock provided by the arm_global_timer
* on Rockchip rk3066a/rk3188 are quite unstable because their rates
* depend on the CPU frequency.
* Keep the arm_global_timer disabled in order to have the
* DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default.
*/
};
local_timer: local-timer@1013c600 {

View file

@ -21,7 +21,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ARM_pc = (__ip); \
(regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
(regs)->ARM_sp = current_stack_pointer; \
(regs)->ARM_cpsr = SVC_MODE; \
}

View file

@ -54,12 +54,6 @@
typedef pte_t *pte_addr_t;
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
#define ZERO_PAGE(vaddr) (virt_to_page(0))
/*
* Mark the prot value as uncacheable and unbufferable.
*/

View file

@ -13,6 +13,15 @@
#include <linux/const.h>
#include <asm/proc-fns.h>
#ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
#endif
#ifndef CONFIG_MMU
#include <asm-generic/4level-fixup.h>
@ -166,13 +175,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];

View file

@ -25,6 +25,13 @@
unsigned long vectors_base;
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
*/
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_ARM_MPU
struct mpu_rgn_info mpu_rgn_info;
#endif
@ -147,9 +154,21 @@ void __init adjust_lowmem_bounds(void)
*/
void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
early_trap_init((void *)vectors_base);
mpu_setup();
/* allocate the zero page. */
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
flush_dcache_page(empty_zero_page);
}
/*

View file

@ -376,8 +376,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
scb_s->eca |= scb_o->eca & ECA_CEI;
/* Epoch Extension */
if (test_kvm_facility(vcpu->kvm, 139))
if (test_kvm_facility(vcpu->kvm, 139)) {
scb_s->ecd |= scb_o->ecd & ECD_MEF;
scb_s->epdx = scb_o->epdx;
}
/* etoken */
if (test_kvm_facility(vcpu->kvm, 156))

View file

@ -231,7 +231,10 @@ found:
ioport_unmap(gp.pm);
goto out;
}
return 0;
out:
pci_dev_put(pdev);
return err;
}
@ -239,6 +242,7 @@ static void __exit amd_gpio_exit(void)
{
gpiochip_remove(&gp.chip);
ioport_unmap(gp.pm);
pci_dev_put(gp.pdev);
}
module_init(amd_gpio_init);

View file

@ -1131,6 +1131,9 @@ static s32 snto32(__u32 value, unsigned n)
if (!value || !n)
return 0;
if (n > 32)
n = 32;
switch (n) {
case 8: return ((__s8)value);
case 16: return ((__s16)value);

View file

@ -878,6 +878,12 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att
return -ENOMEM;
i = strlen(lbuf);
if (i == 0) {
kfree(lbuf);
return -EINVAL;
}
if (lbuf[i-1] == '\n') {
if (i == 1) {
kfree(lbuf);

View file

@ -145,6 +145,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
const struct v4l2_bt_timings *bt = &t->bt;
const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
u32 caps = cap->capabilities;
const u32 max_vert = 10240;
u32 max_hor = 3 * bt->width;
if (t->type != V4L2_DV_BT_656_1120)
return false;
@ -166,14 +168,20 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
if (!bt->interlaced &&
(bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
return false;
if (bt->hfrontporch > 2 * bt->width ||
bt->hsync > 1024 || bt->hbackporch > 1024)
/*
* Some video receivers cannot properly separate the frontporch,
* backporch and sync values, and instead they only have the total
* blanking. That can be assigned to any of these three fields.
* So just check that none of these are way out of range.
*/
if (bt->hfrontporch > max_hor ||
bt->hsync > max_hor || bt->hbackporch > max_hor)
return false;
if (bt->vfrontporch > 4096 ||
bt->vsync > 128 || bt->vbackporch > 4096)
if (bt->vfrontporch > max_vert ||
bt->vsync > max_vert || bt->vbackporch > max_vert)
return false;
if (bt->interlaced && (bt->il_vfrontporch > 4096 ||
bt->il_vsync > 128 || bt->il_vbackporch > 4096))
if (bt->interlaced && (bt->il_vfrontporch > max_vert ||
bt->il_vsync > max_vert || bt->il_vbackporch > max_vert))
return false;
return fnc == NULL || fnc(t, fnc_handle);
}

View file

@ -239,6 +239,10 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
u8 rxerr = msg->msg.rx.data[2];
u8 txerr = msg->msg.rx.data[3];
netdev_dbg(priv->netdev,
"CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n",
msg->msg.rx.dlc, state, ecc, rxerr, txerr);
skb = alloc_can_err_skb(priv->netdev, &cf);
if (skb == NULL) {
stats->rx_dropped++;
@ -265,6 +269,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
break;
default:
priv->can.state = CAN_STATE_ERROR_ACTIVE;
txerr = 0;
rxerr = 0;
break;
}
} else {

View file

@ -262,6 +262,7 @@ static int greth_init_rings(struct greth_private *greth)
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
dev_kfree_skb(skb);
goto cleanup;
}
greth->rx_skbuff[i] = skb;

View file

@ -295,7 +295,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_bytes += len;
next:
pos = (pos + 1) % rxq->num;
if (rx_pkts_num >= limit)

View file

@ -554,7 +554,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_bytes += len;
next:
pos = dma_ring_incr(pos, RX_DESC_NUM);
}

View file

@ -5877,9 +5877,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(tx_ring,
(MAX_SKB_FRAGS *
((MAX_SKB_FRAGS + 1) *
DIV_ROUND_UP(PAGE_SIZE,
adapter->tx_fifo_limit) + 2));
adapter->tx_fifo_limit) + 4));
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {

View file

@ -3850,11 +3850,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
/* First 4 bytes of L4 header */
if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
else if (!usr_ip4_spec->l4_4_bytes)
new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
else
if (usr_ip4_spec->l4_4_bytes)
return -EOPNOTSUPP;
/* Filtering on Type of Service is not supported. */

View file

@ -9367,6 +9367,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
return 0;
}
/**
* i40e_clean_xps_state - clean xps state for every tx_ring
* @vsi: ptr to the VSI
**/
static void i40e_clean_xps_state(struct i40e_vsi *vsi)
{
int i;
if (vsi->tx_rings)
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->tx_rings[i])
clear_bit(__I40E_TX_XPS_INIT_DONE,
vsi->tx_rings[i]->state);
}
/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
@ -9398,8 +9413,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
rtnl_unlock();
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
if (pf->vsi[v]) {
i40e_clean_xps_state(pf->vsi[v]);
pf->vsi[v]->seid = 0;
}
}
i40e_shutdown_adminq(&pf->hw);

View file

@ -1269,6 +1269,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_cleanup_reset_vf(vf);
i40e_flush(hw);
usleep_range(20000, 40000);
clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
return true;
@ -1392,6 +1393,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
}
i40e_flush(hw);
usleep_range(20000, 40000);
clear_bit(__I40E_VF_DISABLE, pf->state);
return true;

View file

@ -1399,6 +1399,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
*data = 1;
return -1;
}
wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8);
wr32(E1000_EIMS, BIT(0));
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq,

View file

@ -3620,7 +3620,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
/* Use the cpu associated to the rxq when it is online, in all
* the other cases, use the cpu 0 which can't be offline.
*/
if (cpu_online(pp->rxq_def))
if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
elected_cpu = pp->rxq_def;
max_cpu = num_present_cpus();

View file

@ -367,7 +367,7 @@ static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,
goto err_out;
usleep_range(26, 100);
while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
(mistat & BUSY))
cpu_relax();
@ -405,7 +405,7 @@ static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,
goto err_out;
usleep_range(26, 100);
while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
(mistat & BUSY))
cpu_relax();

View file

@ -114,10 +114,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe");
axi->axi_fb = of_property_read_bool(np, "snps,fb");
axi->axi_mb = of_property_read_bool(np, "snps,mb");
axi->axi_rb = of_property_read_bool(np, "snps,rb");
if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
axi->axi_wr_osr_lmt = 1;

View file

@ -925,7 +925,7 @@ static int ca8210_spi_transfer(
dev_dbg(&spi->dev, "%s called\n", __func__);
cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC);
cas_ctl = kzalloc(sizeof(*cas_ctl), GFP_ATOMIC);
if (!cas_ctl)
return -ENOMEM;

View file

@ -978,7 +978,7 @@ static int cc2520_hw_init(struct cc2520_private *priv)
if (timeout-- <= 0) {
dev_err(&priv->spi->dev, "oscillator start failed!\n");
return ret;
return -ETIMEDOUT;
}
udelay(1);
} while (!(status & CC2520_STATUS_XOSC32M_STABLE));

View file

@ -448,12 +448,12 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
}
rcv->state = PLIP_PK_DONE;
if (rcv->skb) {
kfree_skb(rcv->skb);
dev_kfree_skb_irq(rcv->skb);
rcv->skb = NULL;
}
snd->state = PLIP_PK_DONE;
if (snd->skb) {
dev_kfree_skb(snd->skb);
dev_consume_skb_irq(snd->skb);
snd->skb = NULL;
}
spin_unlock_irq(&nl->lock);

View file

@ -1377,6 +1377,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View file

@ -48,7 +48,6 @@
#include <linux/debugfs.h>
typedef unsigned int pending_ring_idx_t;
#define INVALID_PENDING_RING_IDX (~0U)
struct pending_tx_info {
struct xen_netif_tx_request req; /* tx request */
@ -82,8 +81,6 @@ struct xenvif_rx_meta {
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF
#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
/* The maximum number of frags is derived from the size of a grant (same
@ -346,11 +343,6 @@ void xenvif_free(struct xenvif *vif);
int xenvif_xenbus_init(void);
void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif);
int xenvif_queue_stopped(struct xenvif_queue *queue);
void xenvif_wake_queue(struct xenvif_queue *queue);
/* (Un)Map communication rings. */
void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
@ -373,17 +365,13 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
void xenvif_rx_action(struct xenvif_queue *queue);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
/* Unmap a pending page and release it back to the guest */
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{
return MAX_PENDING_REQS -

View file

@ -70,7 +70,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
wake_up(&queue->dealloc_wq);
}
int xenvif_schedulable(struct xenvif *vif)
static int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) &&
test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
@ -178,20 +178,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
struct net_device *dev = queue->vif->dev;
unsigned int id = queue->id;
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
}
void xenvif_wake_queue(struct xenvif_queue *queue)
{
struct net_device *dev = queue->vif->dev;
unsigned int id = queue->id;
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
@ -269,14 +255,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
skb_clear_hash(skb);
xenvif_rx_queue_tail(queue, skb);
if (!xenvif_rx_queue_tail(queue, skb))
goto drop;
xenvif_kick_thread(queue);
return NETDEV_TX_OK;
drop:
vif->dev->stats.tx_dropped++;
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -105,6 +105,8 @@ static void make_tx_response(struct xenvif_queue *queue,
s8 st);
static void push_tx_responses(struct xenvif_queue *queue);
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
static inline int tx_work_todo(struct xenvif_queue *queue);
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
@ -323,10 +325,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
struct xenvif_tx_cb {
u16 pending_idx;
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
u8 copy_count;
};
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
u16 pending_idx,
@ -361,31 +366,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
return skb;
}
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
struct gnttab_map_grant_ref *gop,
unsigned int frag_overflow,
struct sk_buff *nskb)
static void xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txfrags,
unsigned *copy_ops,
unsigned *map_ops,
unsigned int frag_overflow,
struct sk_buff *nskb,
unsigned int extra_count,
unsigned int data_len)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start;
u16 pending_idx;
pending_ring_idx_t index;
unsigned int nr_slots;
struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
struct xen_netif_tx_request *txp = first;
nr_slots = shinfo->nr_frags;
nr_slots = shinfo->nr_frags + 1;
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
copy_count(skb) = 0;
for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, txp++, gop++) {
/* Create copy ops for exactly data_len bytes into the skb head. */
__skb_put(skb, data_len);
while (data_len > 0) {
int amount = data_len > txp->size ? txp->size : data_len;
cop->source.u.ref = txp->gref;
cop->source.domid = queue->vif->domid;
cop->source.offset = txp->offset;
cop->dest.domid = DOMID_SELF;
cop->dest.offset = (offset_in_page(skb->data +
skb_headlen(skb) -
data_len)) & ~XEN_PAGE_MASK;
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
- data_len);
cop->len = amount;
cop->flags = GNTCOPY_source_gref;
index = pending_index(queue->pending_cons);
pending_idx = queue->pending_ring[index];
callback_param(queue, pending_idx).ctx = NULL;
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
copy_count(skb)++;
cop++;
data_len -= amount;
if (amount == txp->size) {
/* The copy op covered the full tx_request */
memcpy(&queue->pending_tx_info[pending_idx].req,
txp, sizeof(*txp));
queue->pending_tx_info[pending_idx].extra_count =
(txp == first) ? extra_count : 0;
if (txp == first)
txp = txfrags;
else
txp++;
queue->pending_cons++;
nr_slots--;
} else {
/* The copy op partially covered the tx_request.
* The remainder will be mapped.
*/
txp->offset += amount;
txp->size -= amount;
}
}
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
xenvif_tx_create_map_op(queue, pending_idx, txp,
txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
if (txp == first)
txp = txfrags;
else
txp++;
}
if (frag_overflow) {
@ -406,7 +473,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
skb_shinfo(skb)->frag_list = nskb;
}
return gop;
(*copy_ops) = cop - queue->tx_copy_ops;
(*map_ops) = gop - queue->tx_map_ops;
}
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
@ -442,7 +510,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct gnttab_copy **gopp_copy)
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
u16 pending_idx;
/* This always points to the shinfo of the skb being checked, which
* could be either the first or the one on the frag_list
*/
@ -453,24 +521,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct skb_shared_info *first_shinfo = NULL;
int nr_frags = shinfo->nr_frags;
const bool sharedslot = nr_frags &&
frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
int i, err;
frag_get_pending_idx(&shinfo->frags[0]) ==
copy_pending_idx(skb, copy_count(skb) - 1);
int i, err = 0;
/* Check status of header. */
err = (*gopp_copy)->status;
if (unlikely(err)) {
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
/* The first frag might still have this slot mapped */
if (!sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
for (i = 0; i < copy_count(skb); i++) {
int newerr;
/* Check status of header. */
pending_idx = copy_pending_idx(skb, i);
newerr = (*gopp_copy)->status;
if (likely(!newerr)) {
/* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
} else {
err = newerr;
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
/* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
}
(*gopp_copy)++;
}
(*gopp_copy)++;
check_frags:
for (i = 0; i < nr_frags; i++, gop_map++) {
@ -517,14 +598,6 @@ check_frags:
if (err)
continue;
/* First error: if the header haven't shared a slot with the
* first frag, release it as well.
*/
if (!sharedslot)
xenvif_idx_release(queue,
XENVIF_TX_CB(skb)->pending_idx,
XEN_NETIF_RSP_OKAY);
/* Invalidate preceding fragments of this skb. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
@ -794,7 +867,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
unsigned *copy_ops,
unsigned *map_ops)
{
struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
struct sk_buff *skb, *nskb;
int ret;
unsigned int frag_overflow;
@ -876,8 +948,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
continue;
}
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
XEN_NETBACK_TX_COPY_LEN : txreq.size;
ret = xenvif_count_requests(queue, &txreq, extra_count,
txfrags, work_to_do);
if (unlikely(ret < 0))
break;
@ -903,9 +979,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
index = pending_index(queue->pending_cons);
pending_idx = queue->pending_ring[index];
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
XEN_NETBACK_TX_COPY_LEN : txreq.size;
if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
data_len = txreq.size;
skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
@ -916,8 +991,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
}
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size)
skb_shinfo(skb)->nr_frags++;
/* At this point shinfo->nr_frags is in fact the number of
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
@ -979,54 +1052,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
type);
}
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
virt_to_gfn(skb->data);
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
queue->tx_copy_ops[*copy_ops].dest.offset =
offset_in_page(skb->data) & ~XEN_PAGE_MASK;
queue->tx_copy_ops[*copy_ops].len = data_len;
queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
(*copy_ops)++;
if (data_len < txreq.size) {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq,
extra_count, gop);
gop++;
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
memcpy(&queue->pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
queue->pending_tx_info[pending_idx].extra_count =
extra_count;
}
queue->pending_cons++;
gop = xenvif_get_requests(queue, skb, txfrags, gop,
frag_overflow, nskb);
xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
map_ops, frag_overflow, nskb, extra_count,
data_len);
__skb_queue_tail(&queue->tx_queue, skb);
queue->tx.req_cons = idx;
if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
(*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
break;
}
(*map_ops) = gop - queue->tx_map_ops;
return;
}
@ -1105,9 +1143,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
u16 pending_idx;
unsigned data_len;
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
pending_idx = copy_pending_idx(skb, 0);
txp = &queue->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
@ -1126,18 +1163,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
continue;
}
data_len = skb->len;
callback_param(queue, pending_idx).ctx = NULL;
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
if (txp->flags & XEN_NETTXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
else if (txp->flags & XEN_NETTXF_data_validated)
@ -1314,7 +1339,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
{
unsigned nr_mops, nr_cops = 0;
unsigned nr_mops = 0, nr_cops = 0;
int work_done, ret;
if (unlikely(!tx_work_todo(queue)))
@ -1401,7 +1426,7 @@ static void push_tx_responses(struct xenvif_queue *queue)
notify_remote_via_irq(queue->tx_irq);
}
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
{
int ret;
struct gnttab_unmap_grant_ref tx_unmap_op;

View file

@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
return false;
}
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
{
unsigned long flags;
bool ret = true;
spin_lock_irqsave(&queue->rx_queue.lock, flags);
@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
struct net_device *dev = queue->vif->dev;
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
kfree_skb(skb);
queue->vif->dev->stats.rx_dropped++;
ret = false;
} else {
if (skb_queue_empty(&queue->rx_queue))
xenvif_update_needed_slots(queue, skb);
@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
}
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return ret;
}
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
@ -473,7 +475,7 @@ static void xenvif_rx_skb(struct xenvif_queue *queue)
#define RX_BATCH_SIZE 64
void xenvif_rx_action(struct xenvif_queue *queue)
static void xenvif_rx_action(struct xenvif_queue *queue)
{
struct sk_buff_head completed_skbs;
unsigned int work_done = 0;

View file

@ -1624,6 +1624,12 @@ static int netfront_resume(struct xenbus_device *dev)
netif_tx_unlock_bh(info->netdev);
xennet_disconnect_backend(info);
rtnl_lock();
if (info->queues)
xennet_destroy_queues(info);
rtnl_unlock();
return 0;
}

View file

@ -2463,10 +2463,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (!ctrl->identified) {
int i;
ret = nvme_init_subsystem(ctrl, id);
if (ret)
goto out_free;
/*
* Check for quirks. Quirk can depend on firmware version,
* so, in principle, the set of quirks present can change
@ -2479,6 +2475,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (quirk_matches(id, &core_quirks[i]))
ctrl->quirks |= core_quirks[i].quirks;
}
ret = nvme_init_subsystem(ctrl, id);
if (ret)
goto out_free;
}
memcpy(ctrl->subsys->firmware_rev, id->fr,
sizeof(ctrl->subsys->firmware_rev));

View file

@ -71,6 +71,7 @@ struct twlreg_info {
#define TWL6030_CFG_STATE_SLEEP 0x03
#define TWL6030_CFG_STATE_GRP_SHIFT 5
#define TWL6030_CFG_STATE_APP_SHIFT 2
#define TWL6030_CFG_STATE_MASK 0x03
#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
TWL6030_CFG_STATE_APP_SHIFT)
@ -131,13 +132,14 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev)
if (grp < 0)
return grp;
grp &= P1_GRP_6030;
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
val = TWL6030_CFG_STATE_APP(val);
} else {
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
val &= TWL6030_CFG_STATE_MASK;
grp = 1;
}
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
val = TWL6030_CFG_STATE_APP(val);
return grp && (val == TWL6030_CFG_STATE_ON);
}
@ -190,7 +192,12 @@ static int twl6030reg_get_status(struct regulator_dev *rdev)
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
switch (TWL6030_CFG_STATE_APP(val)) {
if (info->features & TWL6032_SUBCLASS)
val &= TWL6030_CFG_STATE_MASK;
else
val = TWL6030_CFG_STATE_APP(val);
switch (val) {
case TWL6030_CFG_STATE_ON:
return REGULATOR_STATUS_NORMAL;

View file

@ -579,7 +579,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
if (scr_readw(r) != vc->vc_video_erase_char)
break;
if (r != q && new_rows >= rows + logo_lines) {
save = kmalloc(array3_size(logo_lines, new_cols, 2),
save = kzalloc(array3_size(logo_lines, new_cols, 2),
GFP_KERNEL);
if (save) {
int i = cols < new_cols ? cols : new_cols;

View file

@ -3,8 +3,8 @@
#define __LINUX_BITS_H
#include <linux/const.h>
#ifdef __GENKSYMS__
#include <vdso/bits.h>
#ifdef __GENKSYMS__
/*
* Old version of this macro to preserve the CRC signatures of some drm symbols.
* Crazy but true...
@ -14,7 +14,6 @@
#else
#include <asm/bitsperlong.h>
#define BIT(nr) (UL(1) << (nr))
#define BIT_ULL(nr) (ULL(1) << (nr))
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#endif

View file

@ -69,6 +69,7 @@ struct css_task_iter {
struct list_head iters_node; /* css_set->task_iters */
};
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;

View file

@ -2,6 +2,16 @@
#ifndef __VDSO_BITS_H
#define __VDSO_BITS_H
#ifdef __GENKSYMS__
/*
* Old version of this macro to preserve the CRC signatures of some drm symbols.
* Crazy but true...
*/
#define BIT(nr) (1UL << (nr))
#else
#include <vdso/const.h>
#define BIT(nr) (UL(1) << (nr))
#endif
#endif /* __VDSO_BITS_H */

View file

@ -148,7 +148,6 @@ extern struct mutex cgroup_mutex;
extern spinlock_t css_set_lock;
extern struct cgroup_subsys *cgroup_subsys[];
extern struct list_head cgroup_roots;
extern struct file_system_type cgroup_fs_type;
/* iterate across the hierarchies */
#define for_each_root(root) \

View file

@ -4120,6 +4120,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
unsigned int efd, cfd;
struct fd efile;
struct fd cfile;
struct dentry *cdentry;
const char *name;
char *endp;
int ret;
@ -4170,6 +4171,16 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
if (ret < 0)
goto out_put_cfile;
/*
* The control file must be a regular cgroup1 file. As a regular cgroup
* file can't be renamed, it's safe to access its name afterwards.
*/
cdentry = cfile.file->f_path.dentry;
if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
ret = -EINVAL;
goto out_put_cfile;
}
/*
* Determine the event callbacks and set them in @event. This used
* to be done via struct cftype but cgroup core no longer knows
@ -4178,7 +4189,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
*
* DO NOT ADD NEW FILES.
*/
name = cfile.file->f_path.dentry->d_name.name;
name = cdentry->d_name.name;
if (!strcmp(name, "memory.usage_in_bytes")) {
event->register_event = mem_cgroup_usage_register_event;
@ -4202,7 +4213,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
* automatically removed on cgroup destruction but the removal is
* asynchronous, so take an extra ref on @css.
*/
cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
&memory_cgrp_subsys);
ret = -EINVAL;
if (IS_ERR(cfile_css))

View file

@ -133,7 +133,7 @@ struct p9_conn {
struct list_head unsent_req_list;
struct p9_req_t *rreq;
struct p9_req_t *wreq;
char tmp_buf[7];
char tmp_buf[P9_HDRSZ];
struct p9_fcall rc;
int wpos;
int wsize;
@ -306,7 +306,7 @@ static void p9_read_work(struct work_struct *work)
if (!m->rc.sdata) {
m->rc.sdata = m->tmp_buf;
m->rc.offset = 0;
m->rc.capacity = 7; /* start by reading header */
m->rc.capacity = P9_HDRSZ; /* start by reading header */
}
clear_bit(Rpending, &m->wsched);
@ -329,7 +329,7 @@ static void p9_read_work(struct work_struct *work)
p9_debug(P9_DEBUG_TRANS, "got new header\n");
/* Header size */
m->rc.size = 7;
m->rc.size = P9_HDRSZ;
err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
if (err) {
p9_debug(P9_DEBUG_ERROR,

View file

@ -230,6 +230,14 @@ static void p9_xen_response(struct work_struct *work)
continue;
}
if (h.size > req->rc.capacity) {
dev_warn(&priv->dev->dev,
"requested packet size too big: %d for tag %d with capacity %zd\n",
h.size, h.tag, req->rc.capacity);
req->status = REQ_STATUS_ERROR;
goto recv_error;
}
memcpy(&req->rc, &h, sizeof(h));
req->rc.offset = 0;
@ -239,6 +247,7 @@ static void p9_xen_response(struct work_struct *work)
masked_prod, &masked_cons,
XEN_9PFS_RING_SIZE);
recv_error:
virt_mb();
cons += h.size;
ring->intf->in_cons = cons;

View file

@ -1014,6 +1014,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
hci_dev_unlock(hdev);
hci_dev_put(hdev);
if (!hcon)
return -ENOENT;

View file

@ -772,7 +772,7 @@ static int __init bt_init(void)
err = bt_sysfs_init();
if (err < 0)
return err;
goto cleanup_led;
err = sock_register(&bt_sock_family_ops);
if (err)
@ -808,6 +808,8 @@ unregister_socket:
sock_unregister(PF_BLUETOOTH);
cleanup_sysfs:
bt_sysfs_cleanup();
cleanup_led:
bt_leds_cleanup();
return err;
}

View file

@ -734,6 +734,9 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
ipv6_hdr(skb)->payload_len = htons(first_len -
sizeof(struct ipv6hdr));
/* We prevent @rt from being freed. */
rcu_read_lock();
for (;;) {
/* Prepare header of the next frame,
* before previous one went down. */
@ -776,6 +779,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (err == 0) {
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGOKS);
rcu_read_unlock();
return 0;
}
@ -783,6 +787,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGFAILS);
rcu_read_unlock();
return err;
slow_path_clean:

View file

@ -669,6 +669,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
sdata->dev = ndev;
sdata->wpan_dev.wpan_phy = local->hw.phy;
sdata->local = local;
INIT_LIST_HEAD(&sdata->wpan_dev.list);
/* setup type-dependent data */
ret = ieee802154_setup_sdata(sdata, type);

View file

@ -230,6 +230,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
target->sens_res = nfca_poll->sens_res;
target->sel_res = nfca_poll->sel_res;
target->nfcid1_len = nfca_poll->nfcid1_len;
if (target->nfcid1_len > ARRAY_SIZE(target->nfcid1))
return -EPROTO;
if (target->nfcid1_len > 0) {
memcpy(target->nfcid1, nfca_poll->nfcid1,
target->nfcid1_len);
@ -238,6 +240,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params;
target->sensb_res_len = nfcb_poll->sensb_res_len;
if (target->sensb_res_len > ARRAY_SIZE(target->sensb_res))
return -EPROTO;
if (target->sensb_res_len > 0) {
memcpy(target->sensb_res, nfcb_poll->sensb_res,
target->sensb_res_len);
@ -246,6 +250,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params;
target->sensf_res_len = nfcf_poll->sensf_res_len;
if (target->sensf_res_len > ARRAY_SIZE(target->sensf_res))
return -EPROTO;
if (target->sensf_res_len > 0) {
memcpy(target->sensf_res, nfcf_poll->sensf_res,
target->sensf_res_len);

View file

@ -1595,7 +1595,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (tipc_own_addr(l->net) > msg_prevnode(hdr))
l->net_plane = msg_net_plane(hdr);
skb_linearize(skb);
if (skb_linearize(skb))
goto exit;
hdr = buf_msg(skb);
data = msg_data(hdr);

View file

@ -126,15 +126,19 @@ EXPORT_SYMBOL(snd_seq_dump_var_event);
* expand the variable length event to linear buffer space.
*/
static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
static int seq_copy_in_kernel(void *ptr, void *src, int size)
{
char **bufptr = ptr;
memcpy(*bufptr, src, size);
*bufptr += size;
return 0;
}
static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
static int seq_copy_in_user(void *ptr, void *src, int size)
{
char __user **bufptr = ptr;
if (copy_to_user(*bufptr, src, size))
return -EFAULT;
*bufptr += size;
@ -163,8 +167,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
return newlen;
}
err = snd_seq_dump_var_event(event,
in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
(snd_seq_dump_func_t)seq_copy_in_user,
in_kernel ? seq_copy_in_kernel : seq_copy_in_user,
&buf);
return err < 0 ? err : newlen;
}

View file

@ -1369,6 +1369,8 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
return;
be_substream = snd_soc_dpcm_get_substream(be, stream);
if (!be_substream)
return;
list_for_each_entry(dpcm, &be->dpcm[stream].fe_clients, list_fe) {
if (dpcm->fe == fe)

View file

@ -681,7 +681,7 @@ kci_test_ipsec_offload()
tmpl proto esp src $srcip dst $dstip spi 9 \
mode transport reqid 42
check_err $?
ip x p add dir out src $dstip/24 dst $srcip/24 \
ip x p add dir in src $dstip/24 dst $srcip/24 \
tmpl proto esp src $dstip dst $srcip spi 9 \
mode transport reqid 42
check_err $?

View file

@ -194,6 +194,14 @@ do
shift
done
if test -z "$TORTURE_INITRD" || tools/testing/selftests/rcutorture/bin/mkinitrd.sh
then
:
else
echo No initrd and unable to create one, aborting test >&2
exit 1
fi
CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG
if test -z "$configs"

View file

@ -0,0 +1,60 @@
#!/bin/bash
#
# Create an initrd directory if one does not already exist.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# Copyright (C) IBM Corporation, 2013
#
# Author: Connor Shu <Connor.Shu@ibm.com>
D=tools/testing/selftests/rcutorture
# Prerequisite checks
[ -z "$D" ] && echo >&2 "No argument supplied" && exit 1
if [ ! -d "$D" ]; then
echo >&2 "$D does not exist: Malformed kernel source tree?"
exit 1
fi
if [ -d "$D/initrd" ]; then
echo "$D/initrd already exists, no need to create it"
exit 0
fi
T=${TMPDIR-/tmp}/mkinitrd.sh.$$
trap 'rm -rf $T' 0 2
mkdir $T
cat > $T/init << '__EOF___'
#!/bin/sh
while :
do
sleep 1000000
done
__EOF___
# Try using dracut to create initrd
command -v dracut >/dev/null 2>&1 || { echo >&2 "Dracut not installed"; exit 1; }
echo Creating $D/initrd using dracut.
# Filesystem creation
dracut --force --no-hostonly --no-hostonly-cmdline --module "base" $T/initramfs.img
cd $D
mkdir initrd
cd initrd
zcat $T/initramfs.img | cpio -id
cp $T/init init
echo Done creating $D/initrd using dracut
exit 0