This is the 4.19.221 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmG4YVYACgkQONu9yGCS
 aT5XZBAAsAbvO7xnyOVCN9+hwMOVJXMDUAsTm94JKdeP1wcnySgkxaELRvX3w2jD
 wVHoyysfEZskaHfvET0gnvEKIOZ/eQCR7ugI7Ry+8tOW0CBtg7vk/4Lcpyk2eHBa
 iadAzTyeCSG3y0Usn26m7CyoBJNd/GGy1tOR0D8Rwx05DMOHO3z6frjQ5yFHkFha
 /WBgWnjqh2SZ3gZ00vE/rlqsgQWiO88NGAPWstktHm4ImC4NQP5Teo9gdmUM41v1
 M4IRMBkTlBW9vyAlHMHChYOobeKUlhP9j7WCTzB0RlE9to01Eb23bL3Sq+7s+an8
 5N5j7tOyxrOUU3wOmR6M9WpkA6vIite0Ux9FLray89f5yM4sotxtFj7gY51Udc/2
 spAsp6NigEjyIJ1kVSnPRsu2iIeY4fc9xJLfhW3s110d3HvEq/9B+lcttX3KqBUm
 qr1M8lUvyy8dmYfTzfvc9bIl63db9c8Tz1d/9VmT1+zEReolafpr5LM2aII5vrLo
 VnKSvU+v9vy7AKaPnxHB9HG3EA+V56+ehoFDxyps5tGQ9VCtxIUZHPM6/b3V6c7W
 tjj/eUxKl3nzb7aHMt3rcL/VNzXuSjCjElUgQUH8Jfn9NcfnvVPPVplsxsQzTqag
 82vx+p4DoWbjrgzxHLj7MX7WFZnZTHn0qQeRG+ZqmQONEefRmgU=
 =VKlL
 -----END PGP SIGNATURE-----

Merge 4.19.221 into android-4.19-stable

Changes in 4.19.221
	HID: google: add eel USB id
	HID: add hid_is_usb() function to make it simpler for USB detection
	HID: add USB_HID dependancy to hid-prodikeys
	HID: add USB_HID dependancy to hid-chicony
	HID: add USB_HID dependancy on some USB HID drivers
	HID: wacom: fix problems when device is not a valid USB device
	HID: check for valid USB device for many HID drivers
	can: kvaser_usb: get CAN clock frequency from device
	can: sja1000: fix use after free in ems_pcmcia_add_card()
	net: core: netlink: add helper refcount dec and lock function
	net: sched: rename qdisc_destroy() to qdisc_put()
	net: sched: extend Qdisc with rcu
	net: sched: add helper function to take reference to Qdisc
	net: sched: use Qdisc rcu API instead of relying on rtnl lock
	nfc: fix potential NULL pointer deref in nfc_genl_dump_ses_done
	bpf: Fix the off-by-two error in range markings
	ice: ignore dropped packets during init
	bonding: make tx_rebalance_counter an atomic
	nfp: Fix memory leak in nfp_cpp_area_cache_add()
	seg6: fix the iif in the IPv6 socket control block
	udp: using datalen to cap max gso segments
	IB/hfi1: Correct guard on eager buffer deallocation
	mm: bdi: initialize bdi_min_ratio when bdi is unregistered
	ALSA: ctl: Fix copy of updated id with element read/write
	ALSA: pcm: oss: Fix negative period/buffer sizes
	ALSA: pcm: oss: Limit the period size to 16MB
	ALSA: pcm: oss: Handle missing errors in snd_pcm_oss_change_params*()
	tracefs: Have new files inherit the ownership of their parent
	clk: qcom: regmap-mux: fix parent clock lookup
	can: pch_can: pch_can_rx_normal: fix use after free
	can: m_can: Disable and ignore ELO interrupt
	libata: add horkage for ASMedia 1092
	wait: add wake_up_pollfree()
	binder: use wake_up_pollfree()
	signalfd: use wake_up_pollfree()
	aio: keep poll requests on waitqueue until completed
	aio: fix use-after-free due to missing POLLFREE handling
	tracefs: Set all files to the same group ownership as the mount option
	block: fix ioprio_get(IOPRIO_WHO_PGRP) vs setuid(2)
	qede: validate non LSO skb length
	ASoC: qdsp6: q6routing: Fix return value from msm_routing_put_audio_mixer
	i40e: Fix pre-set max number of queues for VF
	mtd: rawnand: fsmc: Take instruction delay into account
	tools build: Remove needless libpython-version feature check that breaks test-all fast path
	net: cdc_ncm: Allow for dwNtbOutMaxSize to be unset or zero
	net: altera: set a couple error code in probe()
	net: fec: only clear interrupt of handling queue in fec_enet_rx_queue()
	net, neigh: clear whole pneigh_entry at alloc time
	net/qla3xxx: fix an error code in ql_adapter_up()
	USB: gadget: detect too-big endpoint 0 requests
	USB: gadget: zero allocate endpoint 0 buffers
	usb: core: config: fix validation of wMaxPacketValue entries
	xhci: Remove CONFIG_USB_DEFAULT_PERSIST to prevent xHCI from runtime suspending
	usb: core: config: using bit mask instead of individual bits
	xhci: avoid race between disable slot command and host runtime suspend
	iio: trigger: Fix reference counting
	iio: trigger: stm32-timer: fix MODULE_ALIAS
	iio: stk3310: Don't return error code in interrupt handler
	iio: mma8452: Fix trigger reference couting
	iio: ltr501: Don't return error code in trigger handler
	iio: kxsd9: Don't return error code in trigger handler
	iio: itg3200: Call iio_trigger_notify_done() on error
	iio: dln2-adc: Fix lockdep complaint
	iio: dln2: Check return value of devm_iio_trigger_register()
	iio: at91-sama5d2: Fix incorrect sign extension
	iio: adc: axp20x_adc: fix charging current reporting on AXP22x
	iio: accel: kxcjk-1013: Fix possible memory leak in probe and remove
	irqchip/armada-370-xp: Fix return value of armada_370_xp_msi_alloc()
	irqchip/armada-370-xp: Fix support for Multi-MSI interrupts
	irqchip/irq-gic-v3-its.c: Force synchronisation when issuing INVALL
	irqchip: nvic: Fix offset for Interrupt Priority Offsets
	net_sched: fix a crash in tc_new_tfilter()
	net: sched: make function qdisc_free_cb() static
	Linux 4.19.221

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ie58fef73a6ccfbd581bac4a655548f92816f1cbd
This commit is contained in:
Greg Kroah-Hartman 2021-12-14 10:41:13 +01:00
commit 965798c6c9
112 changed files with 833 additions and 315 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 220
SUBLEVEL = 221
EXTRAVERSION =
NAME = "People's Front"

View file

@ -206,6 +206,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
pgrp = task_pgrp(current);
else
pgrp = find_vpid(who);
read_lock(&tasklist_lock);
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
tmpio = get_task_ioprio(p);
if (tmpio < 0)
@ -215,6 +216,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
else
ret = ioprio_best(ret, tmpio);
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
read_unlock(&tasklist_lock);
break;
case IOPRIO_WHO_USER:
uid = make_kuid(current_user_ns(), who);

View file

@ -4736,23 +4736,20 @@ static int binder_thread_release(struct binder_proc *proc,
}
/*
* If this thread used poll, make sure we remove the waitqueue
* from any epoll data structures holding it with POLLFREE.
* waitqueue_active() is safe to use here because we're holding
* the inner lock.
* If this thread used poll, make sure we remove the waitqueue from any
* poll data structures holding it.
*/
if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
waitqueue_active(&thread->wait)) {
wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
}
if (thread->looper & BINDER_LOOPER_STATE_POLL)
wake_up_pollfree(&thread->wait);
binder_inner_proc_unlock(thread->proc);
/*
* This is needed to avoid races between wake_up_poll() above and
* and ep_remove_waitqueue() called for other reasons (eg the epoll file
* descriptor being closed); ep_remove_waitqueue() holds an RCU read
* lock, so we can be sure it's done after calling synchronize_rcu().
* This is needed to avoid races between wake_up_pollfree() above and
* someone else removing the last entry from the queue for other reasons
* (e.g. ep_remove_wait_queue() being called due to an epoll file
* descriptor being closed). Such other users hold an RCU read lock, so
* we can be sure they're done after we call synchronize_rcu().
*/
if (thread->looper & BINDER_LOOPER_STATE_POLL)
synchronize_rcu();

View file

@ -4453,6 +4453,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
/* Odd clown on sil3726/4726 PMPs */
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
/* Similar story with ASMedia 1092 */
{ "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
/* Weird ATAPI devices */
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },

View file

@ -36,7 +36,7 @@ static u8 mux_get_parent(struct clk_hw *hw)
val &= mask;
if (mux->parent_map)
return qcom_find_src_index(hw, mux->parent_map, val);
return qcom_find_cfg_index(hw, mux->parent_map, val);
return val;
}

View file

@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
}
EXPORT_SYMBOL_GPL(qcom_find_src_index);
int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
{
int i, num_parents = clk_hw_get_num_parents(hw);
for (i = 0; i < num_parents; i++)
if (cfg == map[i].cfg)
return i;
return -ENOENT;
}
EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
struct regmap *
qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
{

View file

@ -47,6 +47,8 @@ extern void
qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
u8 src);
extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
u8 cfg);
extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
const char *name, unsigned long rate);

View file

@ -191,14 +191,14 @@ config HID_CHERRY
config HID_CHICONY
tristate "Chicony devices"
depends on HID
depends on USB_HID
default !EXPERT
---help---
Support for Chicony Tactical pad and special keys on Chicony keyboards.
config HID_CORSAIR
tristate "Corsair devices"
depends on HID && USB && LEDS_CLASS
depends on USB_HID && LEDS_CLASS
---help---
Support for Corsair devices that are not fully compliant with the
HID standard.
@ -219,7 +219,7 @@ config HID_COUGAR
config HID_PRODIKEYS
tristate "Prodikeys PC-MIDI Keyboard support"
depends on HID && SND
depends on USB_HID && SND
select SND_RAWMIDI
---help---
Support for Prodikeys PC-MIDI Keyboard device support.
@ -484,7 +484,7 @@ config HID_LENOVO
config HID_LOGITECH
tristate "Logitech devices"
depends on HID
depends on USB_HID
default !EXPERT
---help---
Support for Logitech devices that are not fully compliant with HID standard.
@ -833,7 +833,7 @@ config HID_SAITEK
config HID_SAMSUNG
tristate "Samsung InfraRed remote control or keyboards"
depends on HID
depends on USB_HID
---help---
Support for Samsung InfraRed remote control or keyboards.

View file

@ -622,7 +622,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
drvdata->tp = &asus_i2c_tp;
if (drvdata->quirks & QUIRK_T100_KEYBOARD) {
if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {

View file

@ -61,8 +61,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct usb_interface *intf;
if (!hid_is_usb(hdev))
return rdesc;
intf = to_usb_interface(hdev->dev.parent);
if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
/* Change usage maximum and logical maximum from 0x7fff to
* 0x2fff, so they don't exceed HID_MAX_USAGES */

View file

@ -556,7 +556,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
int ret;
unsigned long quirks = id->driver_data;
struct corsair_drvdata *drvdata;
struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
struct usb_interface *usbif;
if (!hid_is_usb(dev))
return -EINVAL;
usbif = to_usb_interface(dev->dev.parent);
drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
GFP_KERNEL);

View file

@ -54,7 +54,7 @@ struct elan_drvdata {
static int is_not_elan_touchpad(struct hid_device *hdev)
{
if (hdev->bus == BUS_USB) {
if (hid_is_usb(hdev)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
return (intf->altsetting->desc.bInterfaceNumber !=

View file

@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct elo_priv *priv;
int ret;
if (!hid_is_usb(hdev))
return -EINVAL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;

View file

@ -120,6 +120,8 @@ static int hammer_input_configured(struct hid_device *hdev,
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,

View file

@ -143,12 +143,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
static int holtek_kbd_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
int ret = hid_parse(hdev);
struct usb_interface *intf;
int ret;
if (!hid_is_usb(hdev))
return -EINVAL;
ret = hid_parse(hdev);
if (!ret)
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
intf = to_usb_interface(hdev->dev.parent);
if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
struct hid_input *hidinput;
list_for_each_entry(hidinput, &hdev->inputs, list) {

View file

@ -65,6 +65,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
static int holtek_mouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
if (!hid_is_usb(hdev))
return -EINVAL;
return 0;
}
static const struct hid_device_id holtek_mouse_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
@ -86,6 +94,7 @@ static struct hid_driver holtek_mouse_driver = {
.name = "holtek_mouse",
.id_table = holtek_mouse_devices,
.report_fixup = holtek_mouse_report_fixup,
.probe = holtek_mouse_probe,
};
module_hid_driver(holtek_mouse_driver);

View file

@ -479,6 +479,7 @@
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
#define USB_DEVICE_ID_GOOGLE_EEL 0x5057
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f

View file

@ -714,12 +714,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
__u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
struct usb_interface *iface;
__u8 iface_num;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
struct lg_drv_data *drv_data;
int ret;
if (!hid_is_usb(hdev))
return -EINVAL;
iface = to_usb_interface(hdev->dev.parent);
iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
/* G29 only work with the 1st interface */
if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
(iface_num != 0)) {

View file

@ -802,12 +802,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
struct usb_interface *intf;
unsigned short ifnum;
unsigned long quirks = id->driver_data;
struct pk_device *pk;
struct pcmidi_snd *pm = NULL;
if (!hid_is_usb(hdev))
return -EINVAL;
intf = to_usb_interface(hdev->dev.parent);
ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
pk = kzalloc(sizeof(*pk), GFP_KERNEL);
if (pk == NULL) {
hid_err(hdev, "can't alloc descriptor\n");

View file

@ -347,6 +347,9 @@ static int arvo_probe(struct hid_device *hdev,
{
int retval;
if (!hid_is_usb(hdev))
return -EINVAL;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");

View file

@ -327,6 +327,9 @@ static int isku_probe(struct hid_device *hdev,
{
int retval;
if (!hid_is_usb(hdev))
return -EINVAL;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");

View file

@ -752,6 +752,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int retval;
if (!hid_is_usb(hdev))
return -EINVAL;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");

View file

@ -434,6 +434,9 @@ static int koneplus_probe(struct hid_device *hdev,
{
int retval;
if (!hid_is_usb(hdev))
return -EINVAL;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");

View file

@ -136,6 +136,9 @@ static int konepure_probe(struct hid_device *hdev,
{
int retval;
if (!hid_is_usb(hdev))
return -EINVAL;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");

View file

@ -504,6 +504,9 @@ static int kovaplus_probe(struct hid_device *hdev,
{
int retval;
if (!hid_is_usb(hdev))
return -EINVAL;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");

View file

@ -163,6 +163,9 @@ static int lua_probe(struct hid_device *hdev,
{
int retval;
if (!hid_is_usb(hdev))
return -EINVAL;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");

View file

@ -452,6 +452,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int retval;
if (!hid_is_usb(hdev))
return -EINVAL;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");

View file

@ -144,6 +144,9 @@ static int ryos_probe(struct hid_device *hdev,
{
int retval;
if (!hid_is_usb(hdev))
return -EINVAL;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");

View file

@ -116,6 +116,9 @@ static int savu_probe(struct hid_device *hdev,
{
int retval;
if (!hid_is_usb(hdev))
return -EINVAL;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");

View file

@ -157,6 +157,9 @@ static int samsung_probe(struct hid_device *hdev,
int ret;
unsigned int cmask = HID_CONNECT_DEFAULT;
if (!hid_is_usb(hdev))
return -EINVAL;
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");

View file

@ -791,6 +791,9 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
__u8 *p;
s32 v;
if (!hid_is_usb(hdev))
return -EINVAL;
/*
* Read string descriptor containing tablet parameters. The specific
* string descriptor and data were discovered by sniffing the Windows

View file

@ -697,7 +697,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
* Skip the query for this type and modify defaults based on
* interface number.
*/
if (features->type == WIRELESS) {
if (features->type == WIRELESS && intf) {
if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
else
@ -2188,7 +2188,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
char *product_name = wacom->hdev->name;
if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
if (hid_is_usb(wacom->hdev)) {
struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
struct usb_device *dev = interface_to_usbdev(intf);
product_name = dev->product;
@ -2419,6 +2419,9 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_destroy_battery(wacom);
if (!usbdev)
return;
/* Stylus interface */
hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
wacom1 = hid_get_drvdata(hdev1);
@ -2698,8 +2701,6 @@ static void wacom_mode_change_work(struct work_struct *work)
static int wacom_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct usb_device *dev = interface_to_usbdev(intf);
struct wacom *wacom;
struct wacom_wac *wacom_wac;
struct wacom_features *features;
@ -2736,8 +2737,14 @@ static int wacom_probe(struct hid_device *hdev,
wacom_wac->hid_data.inputmode = -1;
wacom_wac->mode_report = -1;
wacom->usbdev = dev;
wacom->intf = intf;
if (hid_is_usb(hdev)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct usb_device *dev = interface_to_usbdev(intf);
wacom->usbdev = dev;
wacom->intf = intf;
}
mutex_init(&wacom->lock);
INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
INIT_WORK(&wacom->wireless_work, wacom_wireless_work);

View file

@ -1423,8 +1423,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
return 0;
err_buffer_cleanup:
if (data->dready_trig)
iio_triggered_buffer_cleanup(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
@ -1447,8 +1446,8 @@ static int kxcjk1013_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
pm_runtime_put_noidle(&client->dev);
iio_triggered_buffer_cleanup(indio_dev);
if (data->dready_trig) {
iio_triggered_buffer_cleanup(indio_dev);
iio_trigger_unregister(data->dready_trig);
iio_trigger_unregister(data->motion_trig);
}

View file

@ -227,14 +227,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
hw_values.chan,
sizeof(hw_values.chan));
if (ret) {
dev_err(st->dev,
"error reading data\n");
return ret;
dev_err(st->dev, "error reading data: %d\n", ret);
goto out;
}
iio_push_to_buffers_with_timestamp(indio_dev,
&hw_values,
iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;

View file

@ -1470,7 +1470,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
if (ret)
return ret;
indio_dev->trig = trig;
indio_dev->trig = iio_trigger_get(trig);
return 0;
}

View file

@ -1375,7 +1375,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
*val = st->conversion_value;
ret = at91_adc_adjust_val_osr(st, val);
if (chan->scan_type.sign == 's')
*val = sign_extend32(*val, 11);
*val = sign_extend32(*val,
chan->scan_type.realbits - 1);
st->conversion_done = false;
}

View file

@ -254,19 +254,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
struct axp20x_adc_iio *info = iio_priv(indio_dev);
int size;
/*
* N.B.: Unlike the Chinese datasheets tell, the charging current is
* stored on 12 bits, not 13 bits. Only discharging current is on 13
* bits.
*/
if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I)
size = 13;
else
size = 12;
*val = axp20x_read_variable_width(info->regmap, chan->address, size);
*val = axp20x_read_variable_width(info->regmap, chan->address, 12);
if (*val < 0)
return *val;
@ -389,9 +378,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CURRENT:
*val = 0;
*val2 = 500000;
return IIO_VAL_INT_PLUS_MICRO;
*val = 1;
return IIO_VAL_INT;
case IIO_TEMP:
*val = 100;

View file

@ -251,7 +251,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
{
int ret, i;
struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
u16 conflict;
__le16 value;
int olen = sizeof(value);
@ -260,13 +259,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
.chan = channel,
};
ret = iio_device_claim_direct_mode(indio_dev);
if (ret < 0)
return ret;
ret = dln2_adc_set_chan_enabled(dln2, channel, true);
if (ret < 0)
goto release_direct;
return ret;
ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
if (ret < 0) {
@ -303,8 +298,6 @@ disable_port:
dln2_adc_set_port_enabled(dln2, false, NULL);
disable_chan:
dln2_adc_set_chan_enabled(dln2, channel, false);
release_direct:
iio_device_release_direct_mode(indio_dev);
return ret;
}
@ -340,10 +333,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = iio_device_claim_direct_mode(indio_dev);
if (ret < 0)
return ret;
mutex_lock(&dln2->mutex);
ret = dln2_adc_read(dln2, chan->channel);
mutex_unlock(&dln2->mutex);
iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@ -669,7 +668,11 @@ static int dln2_adc_probe(struct platform_device *pdev)
return -ENOMEM;
}
iio_trigger_set_drvdata(dln2->trig, dln2);
devm_iio_trigger_register(dev, dln2->trig);
ret = devm_iio_trigger_register(dev, dln2->trig);
if (ret) {
dev_err(dev, "failed to register trigger: %d\n", ret);
return ret;
}
iio_trigger_set_immutable(indio_dev, dln2->trig);
ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,

View file

@ -64,9 +64,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
error_ret:
iio_trigger_notify_done(indio_dev->trig);
error_ret:
return IRQ_HANDLED;
}

View file

@ -549,7 +549,6 @@ static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
irq_modify_status(trig->subirq_base + i,
IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
}
get_device(&trig->dev);
return trig;

View file

@ -1275,7 +1275,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
(u8 *)als_buf, sizeof(als_buf));
if (ret < 0)
return ret;
goto done;
if (test_bit(0, indio_dev->active_scan_mask))
scan.channels[j++] = le16_to_cpu(als_buf[1]);
if (test_bit(1, indio_dev->active_scan_mask))

View file

@ -545,9 +545,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
mutex_lock(&data->lock);
ret = regmap_field_read(data->reg_flag_nf, &dir);
if (ret < 0) {
dev_err(&data->client->dev, "register read failed\n");
mutex_unlock(&data->lock);
return ret;
dev_err(&data->client->dev, "register read failed: %d\n", ret);
goto out;
}
event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
IIO_EV_TYPE_THRESH,
@ -559,6 +558,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
ret = regmap_field_write(data->reg_flag_psint, 0);
if (ret < 0)
dev_err(&data->client->dev, "failed to reset interrupts\n");
out:
mutex_unlock(&data->lock);
return IRQ_HANDLED;

View file

@ -884,6 +884,6 @@ static struct platform_driver stm32_timer_trigger_driver = {
};
module_platform_driver(stm32_timer_trigger_driver);
MODULE_ALIAS("platform: stm32-timer-trigger");
MODULE_ALIAS("platform:stm32-timer-trigger");
MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver");
MODULE_LICENSE("GPL v2");

View file

@ -1146,7 +1146,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
rcd->egrbufs.rcvtids = NULL;
for (e = 0; e < rcd->egrbufs.alloced; e++) {
if (rcd->egrbufs.buffers[e].dma)
if (rcd->egrbufs.buffers[e].addr)
dma_free_coherent(&dd->pcidev->dev,
rcd->egrbufs.buffers[e].len,
rcd->egrbufs.buffers[e].addr,

View file

@ -232,17 +232,13 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
int hwirq, i;
mutex_lock(&msi_used_lock);
hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
0, nr_irqs, 0);
if (hwirq >= PCI_MSI_DOORBELL_NR) {
mutex_unlock(&msi_used_lock);
return -ENOSPC;
}
bitmap_set(msi_used, hwirq, nr_irqs);
hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
order_base_2(nr_irqs));
mutex_unlock(&msi_used_lock);
if (hwirq < 0)
return -ENOSPC;
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i,
&armada_370_xp_msi_bottom_irq_chip,
@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
NULL, NULL);
}
return hwirq;
return 0;
}
static void armada_370_xp_msi_free(struct irq_domain *domain,
@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain,
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
mutex_lock(&msi_used_lock);
bitmap_clear(msi_used, d->hwirq, nr_irqs);
bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
mutex_unlock(&msi_used_lock);
}

View file

@ -581,7 +581,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
its_fixup_cmd(cmd);
return NULL;
return desc->its_invall_cmd.col;
}
static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,

View file

@ -29,7 +29,7 @@
#define NVIC_ISER 0x000
#define NVIC_ICER 0x080
#define NVIC_IPR 0x300
#define NVIC_IPR 0x400
#define NVIC_MAX_BANKS 16
/*

View file

@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
@ -700,6 +701,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
instr->ctx.waitrdy.timeout_ms);
break;
}
if (instr->delay_ns)
ndelay(instr->delay_ns);
}
return ret;

View file

@ -1530,14 +1530,14 @@ void bond_alb_monitor(struct work_struct *work)
struct slave *slave;
if (!bond_has_slaves(bond)) {
bond_info->tx_rebalance_counter = 0;
atomic_set(&bond_info->tx_rebalance_counter, 0);
bond_info->lp_counter = 0;
goto re_arm;
}
rcu_read_lock();
bond_info->tx_rebalance_counter++;
atomic_inc(&bond_info->tx_rebalance_counter);
bond_info->lp_counter++;
/* send learning packets */
@ -1559,7 +1559,7 @@ void bond_alb_monitor(struct work_struct *work)
}
/* rebalance tx traffic */
if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
bond_for_each_slave_rcu(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == rcu_access_pointer(bond->curr_active_slave)) {
@ -1569,7 +1569,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->unbalanced_load = 0;
}
}
bond_info->tx_rebalance_counter = 0;
atomic_set(&bond_info->tx_rebalance_counter, 0);
}
if (bond_info->rlb_enabled) {
@ -1639,7 +1639,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
tlb_init_slave(slave);
/* order a rebalance ASAP */
bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
atomic_set(&bond->alb_info.tx_rebalance_counter,
BOND_TLB_REBALANCE_TICKS);
if (bond->alb_info.rlb_enabled)
bond->alb_info.rlb_rebalance = 1;
@ -1676,7 +1677,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
rlb_clear_slave(bond, slave);
} else if (link == BOND_LINK_UP) {
/* order a rebalance ASAP */
bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
atomic_set(&bond_info->tx_rebalance_counter,
BOND_TLB_REBALANCE_TICKS);
if (bond->alb_info.rlb_enabled) {
bond->alb_info.rlb_rebalance = 1;
/* If the updelay module parameter is smaller than the

View file

@ -233,15 +233,15 @@ enum m_can_mram_cfg {
/* Interrupts for version 3.0.x */
#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
IR_RF1L | IR_RF0L)
#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
IR_RF0L)
#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
/* Interrupts for version >= 3.1.x */
#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
IR_RF1L | IR_RF0L)
#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
IR_RF0L)
#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
/* Interrupt Line Select (ILS) */
@ -769,8 +769,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
{
if (irqstatus & IR_WDI)
netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
if (irqstatus & IR_ELO)
netdev_err(dev, "Error Logging Overflow\n");
if (irqstatus & IR_BEU)
netdev_err(dev, "Bit Error Uncorrected\n");
if (irqstatus & IR_BEC)

View file

@ -703,11 +703,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
cf->data[i + 1] = data_reg >> 8;
}
netif_receive_skb(skb);
rcv_pkts++;
stats->rx_packets++;
quota--;
stats->rx_bytes += cf->can_dlc;
netif_receive_skb(skb);
pch_fifo_thresh(priv, obj_num);
obj_num++;

View file

@ -243,7 +243,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
free_sja1000dev(dev);
}
err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
if (!card->channels) {
err = -ENODEV;
goto failure_cleanup;
}
err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
DRV_NAME, card);
if (!err)
return 0;

View file

@ -28,10 +28,6 @@
#include "kvaser_usb.h"
/* Forward declaration */
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
#define CAN_USB_CLOCK 8000000
#define MAX_USBCAN_NET_DEVICES 2
/* Command header size */
@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
#define CMD_LEAF_LOG_MESSAGE 106
/* Leaf frequency options */
#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60
#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0
#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
/* error factors */
#define M16C_EF_ACKE BIT(0)
#define M16C_EF_CRCE BIT(1)
@ -340,6 +342,50 @@ struct kvaser_usb_err_summary {
};
};
static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
.name = "kvaser_usb",
.tseg1_min = KVASER_USB_TSEG1_MIN,
.tseg1_max = KVASER_USB_TSEG1_MAX,
.tseg2_min = KVASER_USB_TSEG2_MIN,
.tseg2_max = KVASER_USB_TSEG2_MAX,
.sjw_max = KVASER_USB_SJW_MAX,
.brp_min = KVASER_USB_BRP_MIN,
.brp_max = KVASER_USB_BRP_MAX,
.brp_inc = KVASER_USB_BRP_INC,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
.clock = {
.freq = 8000000,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
.clock = {
.freq = 16000000,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
.clock = {
.freq = 24000000,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
.clock = {
.freq = 32000000,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
};
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct sk_buff *skb, int *frame_len,
@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
return rc;
}
static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
const struct leaf_cmd_softinfo *softinfo)
{
u32 sw_options = le32_to_cpu(softinfo->sw_options);
dev->fw_version = le32_to_cpu(softinfo->fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
break;
case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
break;
case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
break;
}
}
static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
switch (dev->card_data.leaf.family) {
case KVASER_LEAF:
dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
case KVASER_USBCAN:
dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
break;
}
@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
{
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
dev->cfg = &kvaser_usb_leaf_dev_cfg;
card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
return 0;
}
static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
.name = "kvaser_usb",
.tseg1_min = KVASER_USB_TSEG1_MIN,
.tseg1_max = KVASER_USB_TSEG1_MAX,
.tseg2_min = KVASER_USB_TSEG2_MIN,
.tseg2_max = KVASER_USB_TSEG2_MAX,
.sjw_max = KVASER_USB_SJW_MAX,
.brp_min = KVASER_USB_BRP_MIN,
.brp_max = KVASER_USB_BRP_MAX,
.brp_inc = KVASER_USB_BRP_INC,
};
static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
.dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
.clock = {
.freq = CAN_USB_CLOCK,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
};

View file

@ -1445,16 +1445,19 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->rxdescmem_busaddr = dma_res->start;
} else {
ret = -ENODEV;
goto err_free_netdev;
}
if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
dma_set_coherent_mask(priv->device,
DMA_BIT_MASK(priv->dmaops->dmamask));
else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
} else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
else
} else {
ret = -EIO;
goto err_free_netdev;
}
/* MAC address space */
ret = request_and_map(pdev, "control_port", &control_port,

View file

@ -373,6 +373,9 @@ struct bufdesc_ex {
#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \
(((X) == 1) ? FEC_ENET_RXF_1 : \
FEC_ENET_RXF_2))
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)

View file

@ -1441,7 +1441,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
break;
pkt_received++;
writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
/* Check for errors. */
status ^= BD_ENET_RX_LAST;

View file

@ -3580,11 +3580,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
/* set this flag only after making sure all inputs are sane */
vf->adq_enabled = true;
/* num_req_queues is set when user changes number of queues via ethtool
* and this causes issue for default VSI(which depends on this variable)
* when ADq is enabled, hence reset it.
*/
vf->num_req_queues = 0;
/* reset the VF in order to allocate resources */
i40e_vc_notify_vf_reset(vf);

View file

@ -4404,6 +4404,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
netif_carrier_on(vsi->netdev);
}
/* clear this now, and the first stats read will be used as baseline */
vsi->stat_offsets_loaded = false;
ice_service_task_schedule(pf);
return err;

View file

@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
return -ENOMEM;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
if (!cache) {
nfp_cpp_area_free(area);
return -ENOMEM;
}
cache->id = 0;
cache->addr = 0;

View file

@ -1606,6 +1606,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data_split = true;
}
} else {
if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
qede_free_failed_tx_pkt(txq, first_bd, 0, false);
qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
}

View file

@ -3496,20 +3496,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
err = ql_wait_for_drvr_lock(qdev);
if (err) {
err = ql_adapter_initialize(qdev);
if (err) {
netdev_err(ndev, "Unable to initialize adapter\n");
goto err_init;
}
netdev_err(ndev, "Releasing driver lock\n");
ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
} else {
if (!ql_wait_for_drvr_lock(qdev)) {
netdev_err(ndev, "Could not acquire driver lock\n");
err = -ENODEV;
goto err_lock;
}
err = ql_adapter_initialize(qdev);
if (err) {
netdev_err(ndev, "Unable to initialize adapter\n");
goto err_init;
}
ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
set_bit(QL_ADAPTER_UP, &qdev->flags);

View file

@ -177,6 +177,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
/* clamp new_tx to sane values */
min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
if (max == 0)
max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
/* some devices set dwNtbOutMaxSize too low for the above default */
min = min(min, max);

View file

@ -409,7 +409,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
* the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
* (see the end of section 5.6.3), so don't warn about them.
*/
maxp = usb_endpoint_maxp(&endpoint->desc);
maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
@ -425,9 +425,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
maxpacket_maxes = full_speed_maxpacket_maxes;
break;
case USB_SPEED_HIGH:
/* Bits 12..11 are allowed only for HS periodic endpoints */
/* Multiple-transactions bits are allowed only for HS periodic endpoints */
if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
i = maxp & (BIT(12) | BIT(11));
i = maxp & USB_EP_MAXP_MULT_MASK;
maxp &= ~i;
}
/* fallthrough */

View file

@ -625,6 +625,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
continue;
retval = xhci_disable_slot(xhci, i);
xhci_free_virt_device(xhci, i);
if (retval)
xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
i, retval);

View file

@ -1236,7 +1236,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
/* Delete default control endpoint resources */
xhci_free_device_endpoint_resources(xhci, virt_dev, true);
xhci_free_virt_device(xhci, slot_id);
}
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,

View file

@ -3810,7 +3810,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_slot_ctx *slot_ctx;
int i, ret;
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* We called pm_runtime_get_noresume when the device was attached.
* Decrement the counter here to allow controller to runtime suspend
@ -3818,7 +3817,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
*/
if (xhci->quirks & XHCI_RESET_ON_RESUME)
pm_runtime_put_noidle(hcd->self.controller);
#endif
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
/* If the host is halted due to driver unload, we still need to free the
@ -3838,9 +3836,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
xhci_debugfs_remove_slot(xhci, udev->slot_id);
virt_dev->udev = NULL;
ret = xhci_disable_slot(xhci, udev->slot_id);
if (ret)
xhci_free_virt_device(xhci, udev->slot_id);
xhci_disable_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@ -3850,7 +3847,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
u32 state;
int ret = 0;
command = xhci_alloc_command(xhci, false, GFP_KERNEL);
command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command)
return -ENOMEM;
@ -3873,6 +3870,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
wait_for_completion(command->completion);
if (command->status != COMP_SUCCESS)
xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
slot_id, command->status);
xhci_free_command(xhci, command);
return ret;
}
@ -3969,23 +3975,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_debugfs_create_slot(xhci, slot_id);
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* If resetting upon resume, we can't put the controller into runtime
* suspend if there is a device attached.
*/
if (xhci->quirks & XHCI_RESET_ON_RESUME)
pm_runtime_get_noresume(hcd->self.controller);
#endif
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
disable_slot:
ret = xhci_disable_slot(xhci, udev->slot_id);
if (ret)
xhci_free_virt_device(xhci, udev->slot_id);
xhci_disable_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
return 0;
}
@ -4114,6 +4117,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_unlock(&xhci->mutex);
ret = xhci_disable_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
if (!ret)
xhci_alloc_dev(hcd, udev);
kfree(command->completion);

184
fs/aio.c
View file

@ -176,8 +176,9 @@ struct poll_iocb {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
bool done;
bool cancelled;
bool work_scheduled;
bool work_need_resched;
struct wait_queue_entry wait;
struct work_struct work;
};
@ -1616,6 +1617,51 @@ static void aio_poll_put_work(struct work_struct *work)
iocb_put(iocb);
}
/*
* Safely lock the waitqueue which the request is on, synchronizing with the
* case where the ->poll() provider decides to free its waitqueue early.
*
* Returns true on success, meaning that req->head->lock was locked, req->wait
* is on req->head, and an RCU read lock was taken. Returns false if the
* request was already removed from its waitqueue (which might no longer exist).
*/
static bool poll_iocb_lock_wq(struct poll_iocb *req)
{
wait_queue_head_t *head;
/*
* While we hold the waitqueue lock and the waitqueue is nonempty,
* wake_up_pollfree() will wait for us. However, taking the waitqueue
* lock in the first place can race with the waitqueue being freed.
*
* We solve this as eventpoll does: by taking advantage of the fact that
* all users of wake_up_pollfree() will RCU-delay the actual free. If
* we enter rcu_read_lock() and see that the pointer to the queue is
* non-NULL, we can then lock it without the memory being freed out from
* under us, then check whether the request is still on the queue.
*
* Keep holding rcu_read_lock() as long as we hold the queue lock, in
* case the caller deletes the entry from the queue, leaving it empty.
* In that case, only RCU prevents the queue memory from being freed.
*/
rcu_read_lock();
head = smp_load_acquire(&req->head);
if (head) {
spin_lock(&head->lock);
if (!list_empty(&req->wait.entry))
return true;
spin_unlock(&head->lock);
}
rcu_read_unlock();
return false;
}
static void poll_iocb_unlock_wq(struct poll_iocb *req)
{
spin_unlock(&req->head->lock);
rcu_read_unlock();
}
static void aio_poll_complete_work(struct work_struct *work)
{
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@ -1635,14 +1681,27 @@ static void aio_poll_complete_work(struct work_struct *work)
* avoid further branches in the fast path.
*/
spin_lock_irq(&ctx->ctx_lock);
if (!mask && !READ_ONCE(req->cancelled)) {
add_wait_queue(req->head, &req->wait);
spin_unlock_irq(&ctx->ctx_lock);
return;
}
if (poll_iocb_lock_wq(req)) {
if (!mask && !READ_ONCE(req->cancelled)) {
/*
* The request isn't actually ready to be completed yet.
* Reschedule completion if another wakeup came in.
*/
if (req->work_need_resched) {
schedule_work(&req->work);
req->work_need_resched = false;
} else {
req->work_scheduled = false;
}
poll_iocb_unlock_wq(req);
spin_unlock_irq(&ctx->ctx_lock);
return;
}
list_del_init(&req->wait.entry);
poll_iocb_unlock_wq(req);
} /* else, POLLFREE has freed the waitqueue, so we must complete */
list_del_init(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
req->done = true;
spin_unlock_irq(&ctx->ctx_lock);
iocb_put(iocb);
@ -1654,13 +1713,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
struct poll_iocb *req = &aiocb->poll;
spin_lock(&req->head->lock);
WRITE_ONCE(req->cancelled, true);
if (!list_empty(&req->wait.entry)) {
list_del_init(&req->wait.entry);
schedule_work(&aiocb->poll.work);
}
spin_unlock(&req->head->lock);
if (poll_iocb_lock_wq(req)) {
WRITE_ONCE(req->cancelled, true);
if (!req->work_scheduled) {
schedule_work(&aiocb->poll.work);
req->work_scheduled = true;
}
poll_iocb_unlock_wq(req);
} /* else, the request was force-cancelled by POLLFREE already */
return 0;
}
@ -1677,20 +1737,26 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && !(mask & req->events))
return 0;
list_del_init(&req->wait.entry);
if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
/*
* Complete the request inline if possible. This requires that three
* conditions be met:
* 1. An event mask must have been passed. If a plain wakeup was done
* instead, then mask == 0 and we have to call vfs_poll() to get
* the events, so inline completion isn't possible.
* 2. The completion work must not have already been scheduled.
* 3. ctx_lock must not be busy. We have to use trylock because we
* already hold the waitqueue lock, so this inverts the normal
* locking order. Use irqsave/irqrestore because not all
* filesystems (e.g. fuse) call this function with IRQs disabled,
* yet IRQs have to be disabled before ctx_lock is obtained.
*/
if (mask && !req->work_scheduled &&
spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
struct kioctx *ctx = iocb->ki_ctx;
/*
* Try to complete the iocb inline if we can. Use
* irqsave/irqrestore because not all filesystems (e.g. fuse)
* call this function with IRQs disabled and because IRQs
* have to be disabled before ctx_lock is obtained.
*/
list_del_init(&req->wait.entry);
list_del(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
req->done = true;
if (iocb->ki_eventfd && eventfd_signal_count()) {
iocb = NULL;
INIT_WORK(&req->work, aio_poll_put_work);
@ -1700,7 +1766,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (iocb)
iocb_put(iocb);
} else {
schedule_work(&req->work);
/*
* Schedule the completion work if needed. If it was already
* scheduled, record that another wakeup came in.
*
* Don't remove the request from the waitqueue here, as it might
* not actually be complete yet (we won't know until vfs_poll()
* is called), and we must not miss any wakeups. POLLFREE is an
* exception to this; see below.
*/
if (req->work_scheduled) {
req->work_need_resched = true;
} else {
schedule_work(&req->work);
req->work_scheduled = true;
}
/*
* If the waitqueue is being freed early but we can't complete
* the request inline, we have to tear down the request as best
* we can. That means immediately removing the request from its
* waitqueue and preventing all further accesses to the
* waitqueue via the request. We also need to schedule the
* completion work (done above). Also mark the request as
* cancelled, to potentially skip an unneeded call to ->poll().
*/
if (mask & POLLFREE) {
WRITE_ONCE(req->cancelled, true);
list_del_init(&req->wait.entry);
/*
* Careful: this *must* be the last step, since as soon
* as req->head is NULL'ed out, the request can be
* completed and freed, since aio_poll_complete_work()
* will no longer need to take the waitqueue lock.
*/
smp_store_release(&req->head, NULL);
}
}
return 1;
}
@ -1708,6 +1810,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
struct aio_poll_table {
struct poll_table_struct pt;
struct aio_kiocb *iocb;
bool queued;
int error;
};
@ -1718,11 +1821,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
/* multiple wait queues per file are not supported */
if (unlikely(pt->iocb->poll.head)) {
if (unlikely(pt->queued)) {
pt->error = -EINVAL;
return;
}
pt->queued = true;
pt->error = 0;
pt->iocb->poll.head = head;
add_wait_queue(head, &pt->iocb->poll.wait);
@ -1747,12 +1851,14 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
req->head = NULL;
req->done = false;
req->cancelled = false;
req->work_scheduled = false;
req->work_need_resched = false;
apt.pt._qproc = aio_poll_queue_proc;
apt.pt._key = req->events;
apt.iocb = aiocb;
apt.queued = false;
apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
/* initialized the list so that we can do list_empty checks */
@ -1761,23 +1867,35 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
mask = vfs_poll(req->file, &apt.pt) & req->events;
spin_lock_irq(&ctx->ctx_lock);
if (likely(req->head)) {
spin_lock(&req->head->lock);
if (unlikely(list_empty(&req->wait.entry))) {
if (apt.error)
if (likely(apt.queued)) {
bool on_queue = poll_iocb_lock_wq(req);
if (!on_queue || req->work_scheduled) {
/*
* aio_poll_wake() already either scheduled the async
* completion work, or completed the request inline.
*/
if (apt.error) /* unsupported case: multiple queues */
cancel = true;
apt.error = 0;
mask = 0;
}
if (mask || apt.error) {
/* Steal to complete synchronously. */
list_del_init(&req->wait.entry);
} else if (cancel) {
/* Cancel if possible (may be too late though). */
WRITE_ONCE(req->cancelled, true);
} else if (!req->done) { /* actually waiting for an event */
} else if (on_queue) {
/*
* Actually waiting for an event, so add the request to
* active_reqs so that it can be cancelled if needed.
*/
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
aiocb->ki_cancel = aio_poll_cancel;
}
spin_unlock(&req->head->lock);
if (on_queue)
poll_iocb_unlock_wq(req);
}
if (mask) { /* no async, we'd stolen it */
aiocb->ki_res.res = mangle_poll(mask);

View file

@ -35,17 +35,7 @@
void signalfd_cleanup(struct sighand_struct *sighand)
{
wait_queue_head_t *wqh = &sighand->signalfd_wqh;
/*
* The lockless check can race with remove_wait_queue() in progress,
* but in this case its caller should run under rcu_read_lock() and
* sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
*/
if (likely(!waitqueue_active(wqh)))
return;
/* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
wake_up_poll(wqh, EPOLLHUP | POLLFREE);
wake_up_pollfree(&sighand->signalfd_wqh);
}
struct signalfd_ctx {

View file

@ -162,6 +162,77 @@ struct tracefs_fs_info {
struct tracefs_mount_opts mount_opts;
};
static void change_gid(struct dentry *dentry, kgid_t gid)
{
if (!dentry->d_inode)
return;
dentry->d_inode->i_gid = gid;
}
/*
* Taken from d_walk, but without he need for handling renames.
* Nothing can be renamed while walking the list, as tracefs
* does not support renames. This is only called when mounting
* or remounting the file system, to set all the files to
* the given gid.
*/
static void set_gid(struct dentry *parent, kgid_t gid)
{
struct dentry *this_parent;
struct list_head *next;
this_parent = parent;
spin_lock(&this_parent->d_lock);
change_gid(this_parent, gid);
repeat:
next = this_parent->d_subdirs.next;
resume:
while (next != &this_parent->d_subdirs) {
struct list_head *tmp = next;
struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
change_gid(dentry, gid);
if (!list_empty(&dentry->d_subdirs)) {
spin_unlock(&this_parent->d_lock);
spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
this_parent = dentry;
spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
goto repeat;
}
spin_unlock(&dentry->d_lock);
}
/*
* All done at this level ... ascend and resume the search.
*/
rcu_read_lock();
ascend:
if (this_parent != parent) {
struct dentry *child = this_parent;
this_parent = child->d_parent;
spin_unlock(&child->d_lock);
spin_lock(&this_parent->d_lock);
/* go into the first sibling still alive */
do {
next = child->d_child.next;
if (next == &this_parent->d_subdirs)
goto ascend;
child = list_entry(next, struct dentry, d_child);
} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
rcu_read_unlock();
goto resume;
}
rcu_read_unlock();
spin_unlock(&this_parent->d_lock);
return;
}
static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
{
substring_t args[MAX_OPT_ARGS];
@ -194,6 +265,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
if (!gid_valid(gid))
return -EINVAL;
opts->gid = gid;
set_gid(tracefs_mount->mnt_root, gid);
break;
case Opt_mode:
if (match_octal(&args[0], &option))
@ -409,6 +481,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
inode->i_mode = mode;
inode->i_fop = fops ? fops : &tracefs_file_operations;
inode->i_private = data;
inode->i_uid = d_inode(dentry->d_parent)->i_uid;
inode->i_gid = d_inode(dentry->d_parent)->i_gid;
d_instantiate(dentry, inode);
fsnotify_create(dentry->d_parent->d_inode, dentry);
return end_creating(dentry);
@ -431,6 +505,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
inode->i_op = ops;
inode->i_fop = &simple_dir_operations;
inode->i_uid = d_inode(dentry->d_parent)->i_uid;
inode->i_gid = d_inode(dentry->d_parent)->i_gid;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);

View file

@ -834,6 +834,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
return hdev->ll_driver == driver;
}
static inline bool hid_is_usb(struct hid_device *hdev)
{
return hid_is_using_ll_driver(hdev, &usb_hid_driver);
}
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1

View file

@ -6,6 +6,7 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/wait.h>
#include <linux/refcount.h>
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@ -34,6 +35,7 @@ extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
extern int rtnl_lock_killable(void);
extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
extern wait_queue_head_t netdev_unregistering_wq;
extern struct rw_semaphore pernet_ops_rwsem;
@ -83,6 +85,11 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
return rtnl_dereference(dev->ingress_queue);
}
static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev)
{
return rcu_dereference(dev->ingress_queue);
}
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
#ifdef CONFIG_NET_INGRESS

View file

@ -192,6 +192,7 @@ void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
@ -218,6 +219,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
#define wake_up_interruptible_sync_poll(x, m) \
__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
/**
* wake_up_pollfree - signal that a polled waitqueue is going away
* @wq_head: the wait queue head
*
* In the very rare cases where a ->poll() implementation uses a waitqueue whose
* lifetime is tied to a task rather than to the 'struct file' being polled,
* this function must be called before the waitqueue is freed so that
* non-blocking polls (e.g. epoll) are notified that the queue is going away.
*
* The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
* an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
*/
static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
{
/*
* For performance reasons, we don't always take the queue lock here.
* Therefore, we might race with someone removing the last entry from
* the queue, and proceed while they still hold the queue lock.
* However, rcu_read_lock() is required to be held in such cases, so we
* can safely proceed with an RCU-delayed free.
*/
if (waitqueue_active(wq_head))
__wake_up_pollfree(wq_head);
}
#define ___wait_cond_timeout(condition) \
({ \
bool __cond = (condition); \

View file

@ -142,7 +142,7 @@ struct tlb_slave_info {
struct alb_bond_info {
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
u32 unbalanced_load;
int tx_rebalance_counter;
atomic_t tx_rebalance_counter;
int lp_counter;
/* -------- rlb parameters -------- */
int rlb_enabled;

View file

@ -103,6 +103,7 @@ int qdisc_set_default(const char *id);
void qdisc_hash_add(struct Qdisc *q, bool invisible);
void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
struct nlattr *tab,
struct netlink_ext_ack *extack);

View file

@ -108,6 +108,7 @@ struct Qdisc {
spinlock_t busylock ____cacheline_aligned_in_smp;
spinlock_t seqlock;
struct rcu_head rcu;
};
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
@ -117,6 +118,19 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
refcount_inc(&qdisc->refcnt);
}
/* Intended to be used by unlocked users, when concurrent qdisc release is
* possible.
*/
static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_BUILTIN)
return qdisc;
if (refcount_inc_not_zero(&qdisc->refcnt))
return qdisc;
return NULL;
}
static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK)
@ -555,7 +569,8 @@ void dev_deactivate_many(struct list_head *head);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_destroy(struct Qdisc *qdisc);
void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,

View file

@ -29,7 +29,7 @@
#define POLLRDHUP 0x2000
#endif
#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
#define POLLFREE (__force __poll_t)0x4000
#define POLL_BUSY_LOOP (__force __poll_t)0x8000

View file

@ -3761,7 +3761,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
new_range = dst_reg->off;
if (range_right_open)
new_range--;
new_range++;
/* Examples for register markings:
*

View file

@ -209,6 +209,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_e
}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
void __wake_up_pollfree(struct wait_queue_head *wq_head)
{
__wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
/* POLLFREE must have cleared the queue. */
WARN_ON_ONCE(waitqueue_active(wq_head));
}
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
* because we need a memory barrier there on SMP, so that any

View file

@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
wb_shutdown(&bdi->wb);
cgwb_bdi_unregister(bdi);
/*
* If this BDI's min ratio has been set, use bdi_set_min_ratio() to
* update the global bdi_min_ratio.
*/
if (bdi->min_ratio)
bdi_set_min_ratio(bdi, 0);
if (bdi->dev) {
bdi_debug_unregister(bdi);
device_unregister(bdi->dev);

View file

@ -635,7 +635,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
ASSERT_RTNL();
n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
if (!n)
goto out;

View file

@ -130,6 +130,12 @@ int rtnl_is_locked(void)
}
EXPORT_SYMBOL(rtnl_is_locked);
bool refcount_dec_and_rtnl_lock(refcount_t *r)
{
return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
}
EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
#ifdef CONFIG_PROVE_LOCKING
bool lockdep_rtnl_is_held(void)
{

View file

@ -798,7 +798,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
kfree_skb(skb);
return -EINVAL;
}
if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb);
return -EINVAL;
}

View file

@ -148,6 +148,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
/* the control block has been erased, so we have to set the
* iif once again.
* We read the receiving interface index directly from the
* skb->skb_iif as it is done in the IPv4 receiving path (i.e.:
* ip_rcv_core(...)).
*/
IP6CB(skb)->iif = skb->skb_iif;
}
hdr->nexthdr = NEXTHDR_ROUTING;

View file

@ -1410,8 +1410,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
nfc_device_iter_exit(iter);
kfree(iter);
if (iter) {
nfc_device_iter_exit(iter);
kfree(iter);
}
return 0;
}

View file

@ -539,6 +539,7 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
struct netlink_ext_ack *extack)
{
struct tcf_block *block;
int err = 0;
if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
block = tcf_block_lookup(net, block_index);
@ -550,55 +551,95 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
const struct Qdisc_class_ops *cops;
struct net_device *dev;
rcu_read_lock();
/* Find link */
dev = __dev_get_by_index(net, ifindex);
if (!dev)
dev = dev_get_by_index_rcu(net, ifindex);
if (!dev) {
rcu_read_unlock();
return ERR_PTR(-ENODEV);
}
/* Find qdisc */
if (!*parent) {
*q = dev->qdisc;
*parent = (*q)->handle;
} else {
*q = qdisc_lookup(dev, TC_H_MAJ(*parent));
*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
if (!*q) {
NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
return ERR_PTR(-EINVAL);
err = -EINVAL;
goto errout_rcu;
}
}
*q = qdisc_refcount_inc_nz(*q);
if (!*q) {
NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
err = -EINVAL;
goto errout_rcu;
}
/* Is it classful? */
cops = (*q)->ops->cl_ops;
if (!cops) {
NL_SET_ERR_MSG(extack, "Qdisc not classful");
return ERR_PTR(-EINVAL);
err = -EINVAL;
goto errout_rcu;
}
if (!cops->tcf_block) {
NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
return ERR_PTR(-EOPNOTSUPP);
err = -EOPNOTSUPP;
goto errout_rcu;
}
/* At this point we know that qdisc is not noop_qdisc,
* which means that qdisc holds a reference to net_device
* and we hold a reference to qdisc, so it is safe to release
* rcu read lock.
*/
rcu_read_unlock();
/* Do we search for filter, attached to class? */
if (TC_H_MIN(*parent)) {
*cl = cops->find(*q, *parent);
if (*cl == 0) {
NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
return ERR_PTR(-ENOENT);
err = -ENOENT;
goto errout_qdisc;
}
}
/* And the last stroke */
block = cops->tcf_block(*q, *cl, extack);
if (!block)
return ERR_PTR(-EINVAL);
if (!block) {
err = -EINVAL;
goto errout_qdisc;
}
if (tcf_block_shared(block)) {
NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
return ERR_PTR(-EOPNOTSUPP);
err = -EOPNOTSUPP;
goto errout_qdisc;
}
}
return block;
errout_rcu:
rcu_read_unlock();
errout_qdisc:
if (*q) {
qdisc_put(*q);
*q = NULL;
}
return ERR_PTR(err);
}
static void tcf_block_release(struct Qdisc *q, struct tcf_block *block)
{
if (q)
qdisc_put(q);
}
struct tcf_block_owner_item {
@ -1336,6 +1377,7 @@ replay:
errout:
if (chain)
tcf_chain_put(chain);
tcf_block_release(q, block);
if (err == -EAGAIN)
/* Replay the request. */
goto replay;
@ -1457,6 +1499,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
errout:
if (chain)
tcf_chain_put(chain);
tcf_block_release(q, block);
return err;
}
@ -1542,6 +1585,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
errout:
if (chain)
tcf_chain_put(chain);
tcf_block_release(q, block);
return err;
}
@ -1858,7 +1902,8 @@ replay:
chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
return -EINVAL;
err = -EINVAL;
goto errout_block;
}
chain = tcf_chain_lookup(block, chain_index);
if (n->nlmsg_type == RTM_NEWCHAIN) {
@ -1870,23 +1915,27 @@ replay:
tcf_chain_hold(chain);
} else {
NL_SET_ERR_MSG(extack, "Filter chain already exists");
return -EEXIST;
err = -EEXIST;
goto errout_block;
}
} else {
if (!(n->nlmsg_flags & NLM_F_CREATE)) {
NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
return -ENOENT;
err = -ENOENT;
goto errout_block;
}
chain = tcf_chain_create(block, chain_index);
if (!chain) {
NL_SET_ERR_MSG(extack, "Failed to create filter chain");
return -ENOMEM;
err = -ENOMEM;
goto errout_block;
}
}
} else {
if (!chain || tcf_chain_held_by_acts_only(chain)) {
NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
return -EINVAL;
err = -EINVAL;
goto errout_block;
}
tcf_chain_hold(chain);
}
@ -1930,6 +1979,8 @@ replay:
errout:
tcf_chain_put(chain);
errout_block:
tcf_block_release(q, block);
if (err == -EAGAIN)
/* Replay the request. */
goto replay;

View file

@ -315,6 +315,24 @@ out:
return q;
}
struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
{
struct netdev_queue *nq;
struct Qdisc *q;
if (!handle)
return NULL;
q = qdisc_match_from_root(dev->qdisc, handle);
if (q)
goto out;
nq = dev_ingress_queue_rcu(dev);
if (nq)
q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
out:
return q;
}
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
{
unsigned long cl;
@ -928,7 +946,7 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
qdisc_notify(net, skb, n, clid, old, new);
if (old)
qdisc_destroy(old);
qdisc_put(old);
}
/* Graft qdisc "new" to class "classid" of qdisc "parent" or
@ -981,7 +999,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
qdisc_refcount_inc(new);
if (!ingress)
qdisc_destroy(old);
qdisc_put(old);
}
skip:
@ -1589,7 +1607,7 @@ graft:
err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
if (err) {
if (q)
qdisc_destroy(q);
qdisc_put(q);
return err;
}

View file

@ -150,7 +150,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
pr_debug("atm_tc_put: destroying\n");
list_del_init(&flow->list);
pr_debug("atm_tc_put: qdisc %p\n", flow->q);
qdisc_destroy(flow->q);
qdisc_put(flow->q);
tcf_block_put(flow->block);
if (flow->sock) {
pr_debug("atm_tc_put: f_count %ld\n",

View file

@ -1439,7 +1439,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
WARN_ON(cl->filters);
tcf_block_put(cl->block);
qdisc_destroy(cl->q);
qdisc_put(cl->q);
qdisc_put_rtab(cl->R_tab);
gen_kill_estimator(&cl->rate_est);
if (cl != &q->link)

View file

@ -452,7 +452,7 @@ static void cbs_destroy(struct Qdisc *sch)
cbs_disable_offload(dev, q);
if (q->qdisc)
qdisc_destroy(q->qdisc);
qdisc_put(q->qdisc);
}
static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)

View file

@ -134,7 +134,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
tca[TCA_RATE]);
if (err) {
NL_SET_ERR_MSG(extack, "Failed to replace estimator");
qdisc_destroy(cl->qdisc);
qdisc_put(cl->qdisc);
kfree(cl);
return err;
}
@ -153,7 +153,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
{
gen_kill_estimator(&cl->rate_est);
qdisc_destroy(cl->qdisc);
qdisc_put(cl->qdisc);
kfree(cl);
}

View file

@ -415,7 +415,7 @@ static void dsmark_destroy(struct Qdisc *sch)
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
tcf_block_put(p->block);
qdisc_destroy(p->q);
qdisc_put(p->q);
if (p->mv != p->embedded)
kfree(p->mv);
}

View file

@ -180,7 +180,7 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
if (q) {
err = fifo_set_limit(q, limit);
if (err < 0) {
qdisc_destroy(q);
qdisc_put(q);
q = NULL;
}
}

View file

@ -918,7 +918,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
if (!ops->init || ops->init(sch, NULL, extack) == 0)
return sch;
qdisc_destroy(sch);
qdisc_put(sch);
return NULL;
}
EXPORT_SYMBOL(qdisc_create_dflt);
@ -958,7 +958,14 @@ void qdisc_free(struct Qdisc *qdisc)
kfree((char *) qdisc - qdisc->padded);
}
void qdisc_destroy(struct Qdisc *qdisc)
static void qdisc_free_cb(struct rcu_head *head)
{
struct Qdisc *q = container_of(head, struct Qdisc, rcu);
qdisc_free(q);
}
static void qdisc_destroy(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops;
struct sk_buff *skb, *tmp;
@ -967,10 +974,6 @@ void qdisc_destroy(struct Qdisc *qdisc)
return;
ops = qdisc->ops;
if (qdisc->flags & TCQ_F_BUILTIN ||
!refcount_dec_and_test(&qdisc->refcnt))
return;
#ifdef CONFIG_NET_SCHED
qdisc_hash_del(qdisc);
@ -995,9 +998,34 @@ void qdisc_destroy(struct Qdisc *qdisc)
kfree_skb_list(skb);
}
qdisc_free(qdisc);
call_rcu(&qdisc->rcu, qdisc_free_cb);
}
EXPORT_SYMBOL(qdisc_destroy);
void qdisc_put(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_BUILTIN ||
!refcount_dec_and_test(&qdisc->refcnt))
return;
qdisc_destroy(qdisc);
}
EXPORT_SYMBOL(qdisc_put);
/* Version of qdisc_put() that is called with rtnl mutex unlocked.
* Intended to be used as optimization, this function only takes rtnl lock if
* qdisc reference counter reached zero.
*/
void qdisc_put_unlocked(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_BUILTIN ||
!refcount_dec_and_rtnl_lock(&qdisc->refcnt))
return;
qdisc_destroy(qdisc);
rtnl_unlock();
}
EXPORT_SYMBOL(qdisc_put_unlocked);
/* Attach toplevel qdisc to device queue. */
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
@ -1309,7 +1337,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
dev_queue->qdisc_sleeping = qdisc_default;
qdisc_destroy(qdisc);
qdisc_put(qdisc);
}
}
@ -1318,7 +1346,7 @@ void dev_shutdown(struct net_device *dev)
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
qdisc_destroy(dev->qdisc);
qdisc_put(dev->qdisc);
dev->qdisc = &noop_qdisc;
WARN_ON(timer_pending(&dev->watchdog_timer));

View file

@ -1092,7 +1092,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
struct hfsc_sched *q = qdisc_priv(sch);
tcf_block_put(cl->block);
qdisc_destroy(cl->qdisc);
qdisc_put(cl->qdisc);
gen_kill_estimator(&cl->rate_est);
if (cl != &q->root)
kfree(cl);

View file

@ -1224,7 +1224,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{
if (!cl->level) {
WARN_ON(!cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q);
qdisc_put(cl->un.leaf.q);
}
gen_kill_estimator(&cl->rate_est);
tcf_block_put(cl->block);
@ -1425,7 +1425,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* turn parent into inner node */
qdisc_reset(parent->un.leaf.q);
qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
qdisc_destroy(parent->un.leaf.q);
qdisc_put(parent->un.leaf.q);
if (parent->prio_activity)
htb_deactivate(q, parent);

View file

@ -65,7 +65,7 @@ static void mq_destroy(struct Qdisc *sch)
if (!priv->qdiscs)
return;
for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
qdisc_destroy(priv->qdiscs[ntx]);
qdisc_put(priv->qdiscs[ntx]);
kfree(priv->qdiscs);
}
@ -119,7 +119,7 @@ static void mq_attach(struct Qdisc *sch)
qdisc = priv->qdiscs[ntx];
old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
if (old)
qdisc_destroy(old);
qdisc_put(old);
#ifdef CONFIG_NET_SCHED
if (ntx < dev->real_num_tx_queues)
qdisc_hash_add(qdisc, false);

View file

@ -40,7 +40,7 @@ static void mqprio_destroy(struct Qdisc *sch)
for (ntx = 0;
ntx < dev->num_tx_queues && priv->qdiscs[ntx];
ntx++)
qdisc_destroy(priv->qdiscs[ntx]);
qdisc_put(priv->qdiscs[ntx]);
kfree(priv->qdiscs);
}
@ -300,7 +300,7 @@ static void mqprio_attach(struct Qdisc *sch)
qdisc = priv->qdiscs[ntx];
old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
if (old)
qdisc_destroy(old);
qdisc_put(old);
if (ntx < dev->real_num_tx_queues)
qdisc_hash_add(qdisc, false);
}

View file

@ -175,7 +175,7 @@ multiq_destroy(struct Qdisc *sch)
tcf_block_put(q->block);
for (band = 0; band < q->bands; band++)
qdisc_destroy(q->queues[band]);
qdisc_put(q->queues[band]);
kfree(q->queues);
}
@ -204,7 +204,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
q->queues[i] = &noop_qdisc;
qdisc_tree_reduce_backlog(child, child->q.qlen,
child->qstats.backlog);
qdisc_destroy(child);
qdisc_put(child);
}
}
@ -228,7 +228,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
qdisc_tree_reduce_backlog(old,
old->q.qlen,
old->qstats.backlog);
qdisc_destroy(old);
qdisc_put(old);
}
sch_tree_unlock(sch);
}

View file

@ -1054,7 +1054,7 @@ static void netem_destroy(struct Qdisc *sch)
qdisc_watchdog_cancel(&q->watchdog);
if (q->qdisc)
qdisc_destroy(q->qdisc);
qdisc_put(q->qdisc);
dist_free(q->delay_dist);
dist_free(q->slot_dist);
}

View file

@ -175,7 +175,7 @@ prio_destroy(struct Qdisc *sch)
tcf_block_put(q->block);
prio_offload(sch, NULL);
for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]);
qdisc_put(q->queues[prio]);
}
static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
@ -205,7 +205,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
extack);
if (!queues[i]) {
while (i > oldbands)
qdisc_destroy(queues[--i]);
qdisc_put(queues[--i]);
return -ENOMEM;
}
}
@ -220,7 +220,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
qdisc_tree_reduce_backlog(child, child->q.qlen,
child->qstats.backlog);
qdisc_destroy(child);
qdisc_put(child);
}
for (i = oldbands; i < q->bands; i++) {

Some files were not shown because too many files have changed in this diff Show more