Merge tag 'ASB-2022-12-05_4.19-stable' of https://android.googlesource.com/kernel/common into android13-4.19-kona

https://source.android.com/docs/security/bulletin/2022-12-01
CVE-2022-23960

# By Yang Yingliang (18) and others
# Via Greg Kroah-Hartman
* tag 'ASB-2022-12-05_4.19-stable' of https://android.googlesource.com/kernel/common:
  Linux 4.19.268
  ipc/sem: Fix dangling sem_array access in semtimedop race
  mmc: sdhci: Fix voltage switch delay
  mmc: sdhci: use FIELD_GET for preset value bit masks
  x86/ioremap: Fix page aligned size calculation in __ioremap_caller()
  Bluetooth: L2CAP: Fix accepting connection request for invalid SPSM
  x86/pm: Add enumeration check before spec MSRs save/restore setup
  x86/tsx: Add a feature bit for TSX control MSR support
  nvme: restrict management ioctls to admin
  tcp/udp: Fix memory leak in ipv6_renew_options().
  Kconfig.debug: provide a little extra FRAME_WARN leeway when KASAN is enabled
  parisc: Increase FRAME_WARN to 2048 bytes on parisc
  xtensa: increase size of gcc stack frame check
  parisc: Increase size of gcc stack frame check
  iommu/vt-d: Fix PCI device refcount leak in dmar_dev_scope_init()
  pinctrl: single: Fix potential division by zero
  ASoC: ops: Fix bounds check for _sx controls
  mm: Fix '.data.once' orphan section warning
  arm64: errata: Fix KVM Spectre-v2 mitigation selection for Cortex-A57/A72
  arm64: Fix panic() when Spectre-v2 causes Spectre-BHB to re-allocate KVM vectors
  pinctrl: intel: Save and restore pins in "direct IRQ" mode
  x86/bugs: Make sure MSR_SPEC_CTRL is updated properly upon resume from S3
  nilfs2: fix NULL pointer dereference in nilfs_palloc_commit_free_entry()
  tools/vm/slabinfo-gnuplot: use "grep -E" instead of "egrep"
  error-injection: Add prompt for function error injection
  btrfs: qgroup: fix sleep from invalid context bug in btrfs_qgroup_inherit()
  hwmon: (coretemp) fix pci device refcount leak in nv1a_ram_new()
  hwmon: (coretemp) Check for null before removing sysfs attrs
  net: ethernet: renesas: ravb: Fix promiscuous mode after system resumed
  packet: do not set TP_STATUS_CSUM_VALID on CHECKSUM_COMPLETE
  net: tun: Fix use-after-free in tun_detach()
  net: hsr: Fix potential use-after-free
  dsa: lan9303: Correct stat name
  net/9p: Fix a potential socket leak in p9_socket_open
  net: net_netdev: Fix error handling in ntb_netdev_init_module()
  net: phy: fix null-ptr-deref while probe() failed
  qlcnic: fix sleep-in-atomic-context bugs caused by msleep
  can: cc770: cc770_isa_probe(): add missing free_cc770dev()
  can: sja1000_isa: sja1000_isa_probe(): add missing free_sja1000dev()
  net/mlx5: Fix uninitialized variable bug in outlen_write()
  of: property: decrement node refcount in of_fwnode_get_reference_args()
  hwmon: (ibmpex) Fix possible UAF when ibmpex_register_bmc() fails
  hwmon: (i5500_temp) fix missing pci_disable_device()
  scripts/faddr2line: Fix regression in name resolution on ppc64le
  iio: light: rpr0521: add missing Kconfig dependencies
  iio: health: afe4404: Fix oob read in afe4404_[read|write]_raw
  iio: health: afe4403: Fix oob read in afe4403_read_raw
  Revert "x86/speculation: Change FILL_RETURN_BUFFER to work with objtool"
  v4l2: don't fall back to follow_pfn() if pin_user_pages_fast() fails
  proc: proc_skip_spaces() shouldn't think it is working on C strings
  proc: avoid integer type confusion in get_proc_long
  spi: spi-imx: Fix spi_bus_clk if requested clock is higher than input clock
  btrfs: free btrfs_path before copying inodes to userspace
  drm/amdgpu: always register an MMU notifier for userptr
  drm/amd/dc/dce120: Fix audio register mapping, stop triggering KASAN
  btrfs: free btrfs_path before copying subvol info to userspace
  btrfs: free btrfs_path before copying fspath to userspace
  btrfs: free btrfs_path before copying root refs to userspace
  dm integrity: flush the journal on suspend
  net: usb: qmi_wwan: add Telit 0x103a composition
  tcp: configurable source port perturb table size
  platform/x86: hp-wmi: Ignore Smart Experience App event
  platform/x86: acer-wmi: Enable SW_TABLET_MODE on Switch V 10 (SW5-017)
  platform/x86: asus-wmi: add missing pci_dev_put() in asus_wmi_set_xusb2pr()
  xen/platform-pci: add missing free_irq() in error path
  serial: 8250: 8250_omap: Avoid RS485 RTS glitch on ->set_termios()
  Input: synaptics - switch touchpad on HP Laptop 15-da3001TU to RMI mode
  nilfs2: fix nilfs_sufile_mark_dirty() not set segment usage as dirty
  ceph: avoid putting the realm twice when decoding snaps fails
  ceph: do not update snapshot context when there is no new snapshot
  iio: pressure: ms5611: fixed value compensation bug
  iio: ms5611: Simplify IO callback parameters
  nios2: add FORCE for vmlinuz.gz
  iio: core: Fix entry not deleted when iio_register_sw_trigger_type() fails
  iio: light: apds9960: fix wrong register for gesture gain
  arm64: dts: rockchip: lower rk3399-puma-haikou SD controller clock frequency
  s390/crashdump: fix TOD programmable field size
  net: thunderx: Fix the ACPI memory leak
  nfc: st-nci: fix memory leaks in EVT_TRANSACTION
  nfc: st-nci: fix incorrect validating logic in EVT_TRANSACTION
  s390/dasd: fix no record found for raw_track_access
  dccp/tcp: Reset saddr on failure after inet6?_hash_connect().
  bnx2x: fix pci device refcount leak in bnx2x_vf_is_pcie_pending()
  NFC: nci: fix memory leak in nci_rx_data_packet()
  xfrm: Fix ignored return value in xfrm6_init()
  tipc: check skb_linearize() return value in tipc_disc_rcv()
  tipc: add an extra conn_get in tipc_conn_alloc
  tipc: set con sock in tipc_conn_alloc
  net/mlx5: Fix FW tracer timestamp calculation
  Drivers: hv: vmbus: fix possible memory leak in vmbus_device_register()
  Drivers: hv: vmbus: fix double free in the error path of vmbus_add_channel_work()
  net: pch_gbe: fix pci device refcount leak while module exiting
  net/qla3xxx: fix potential memleak in ql3xxx_send()
  net/mlx4: Check retval of mlx4_bitmap_init
  ARM: mxs: fix memory leak in mxs_machine_init()
  9p/fd: fix issue of list_del corruption in p9_fd_cancel()
  net: pch_gbe: fix potential memleak in pch_gbe_tx_queue()
  nfc/nci: fix race with opening and closing
  ARM: dts: at91: sam9g20ek: enable udc vbus gpio pinctrl
  bus: sunxi-rsb: Support atomic transfers
  ASoC: sgtl5000: Reset the CHIP_CLK_CTRL reg on remove
  ARM: dts: am335x-pcm-953: Define fixed regulators in root node
  af_key: Fix send_acquire race with pfkey_register
  MIPS: pic32: treat port as signed integer
  RISC-V: vdso: Do not add missing symbols to version section in linker script
  drm: panel-orientation-quirks: Add quirk for Acer Switch V 10 (SW5-017)
  spi: stm32: fix stm32_spi_prepare_mbr() that halves spi clk for every run
  wifi: mac80211: Fix ack frame idr leak when mesh has no route
  audit: fix undefined behavior in bit shift for AUDIT_BIT
  wifi: mac80211_hwsim: fix debugfs attribute ps with rc table support
  Linux 4.19.267
  ntfs: check overflow when iterating ATTR_RECORDs
  ntfs: fix out-of-bounds read in ntfs_attr_find()
  ntfs: fix use-after-free in ntfs_attr_find()
  mm: fs: initialize fsdata passed to write_begin/write_end interface
  9p/trans_fd: always use O_NONBLOCK read/write
  gfs2: Switch from strlcpy to strscpy
  gfs2: Check sb_bsize_shift after reading superblock
  9p: trans_fd/p9_conn_cancel: drop client lock earlier
  kcm: close race conditions on sk_receive_queue
  bpf, test_run: Fix alignment problem in bpf_prog_test_run_skb()
  kcm: avoid potential race in kcm_tx_work
  tcp: cdg: allow tcp_cdg_release() to be called multiple times
  macvlan: enforce a consistent minimal mtu
  serial: 8250: Flush DMA Rx on RLSI
  Input: i8042 - fix leaking of platform device on module removal
  scsi: target: tcm_loop: Fix possible name leak in tcm_loop_setup_hba_bus()
  misc/vmw_vmci: fix an infoleak in vmci_host_do_receive_datagram()
  docs: update mediator contact information in CoC doc
  mmc: sdhci-pci: Fix possible memory leak caused by missing pci_dev_put()
  mmc: core: properly select voltage range without power cycle
  serial: 8250_lpss: Configure DMA also w/o DMA filter
  serial: 8250: Fall back to non-DMA Rx if IIR_RDI occurs
  dm ioctl: fix misbehavior if list_versions races with module loading
  iio: pressure: ms5611: changed hardcoded SPI speed to value limited
  iio: trigger: sysfs: fix possible memory leak in iio_sysfs_trig_init()
  iio: adc: at91_adc: fix possible memory leak in at91_adc_allocate_trigger()
  usb: chipidea: fix deadlock in ci_otg_del_timer
  usb: add NO_LPM quirk for Realforce 87U Keyboard
  USB: serial: option: add Fibocom FM160 0x0111 composition
  USB: serial: option: add u-blox LARA-L6 modem
  USB: serial: option: add u-blox LARA-R6 00B modem
  USB: serial: option: remove old LARA-R6 PID
  USB: serial: option: add Sierra Wireless EM9191
  speakup: fix a segfault caused by switching consoles
  slimbus: stream: correct presence rate frequencies
  ALSA: usb-audio: Drop snd_BUG_ON() from snd_usbmidi_output_open()
  ring_buffer: Do not deactivate non-existant pages
  ftrace: Fix null pointer dereference in ftrace_add_mod()
  ftrace: Optimize the allocation for mcount entries
  ftrace: Fix the possible incorrect kernel message
  net: thunderbolt: Fix error handling in tbnet_init()
  cifs: Fix wrong return value checking when GETFLAGS
  net/x25: Fix skb leak in x25_lapb_receive_frame()
  drbd: use after free in drbd_create_device()
  xen/pcpu: fix possible memory leak in register_pcpu()
  bnxt_en: Remove debugfs when pci_register_driver failed
  net: caif: fix double disconnect client in chnl_net_open()
  mISDN: fix misuse of put_device() in mISDN_register_device()
  mISDN: fix possible memory leak in mISDN_dsp_element_register()
  net: bgmac: Drop free_netdev() from bgmac_enet_remove()
  ata: libata-transport: fix double ata_host_put() in ata_tport_add()
  pinctrl: devicetree: fix null pointer dereferencing in pinctrl_dt_to_map
  parport_pc: Avoid FIFO port location truncation
  siox: fix possible memory leak in siox_device_add()
  block: sed-opal: kmalloc the cmd/resp buffers
  ASoC: soc-utils: Remove __exit for snd_soc_util_exit()
  tty: n_gsm: fix sleep-in-atomic-context bug in gsm_control_send
  serial: imx: Add missing .thaw_noirq hook
  serial: 8250: omap: Flush PM QOS work on remove
  serial: 8250_omap: remove wait loop from Errata i202 workaround
  ASoC: core: Fix use-after-free in snd_soc_exit()
  Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm
  btrfs: remove pointless and double ulist frees in error paths of qgroup tests
  drm/imx: imx-tve: Fix return type of imx_tve_connector_mode_valid
  NFSv4: Retry LOCK on OLD_STATEID during delegation return
  selftests/intel_pstate: fix build for ARCH=x86_64
  selftests/futex: fix build for clang
  spi: intel: Fix the offset to get the 64K erase opcode
  ASoC: wm8997: Revert "ASoC: wm8997: Fix PM disable depth imbalance in wm8997_probe"
  ASoC: wm5110: Revert "ASoC: wm5110: Fix PM disable depth imbalance in wm5110_probe"
  ASoC: wm5102: Revert "ASoC: wm5102: Fix PM disable depth imbalance in wm5102_probe"
  x86/cpu: Restore AMD's DE_CFG MSR after resume
  net: tun: call napi_schedule_prep() to ensure we own a napi
  dmaengine: at_hdmac: Check return code of dma_async_device_register
  dmaengine: at_hdmac: Fix impossible condition
  dmaengine: at_hdmac: Don't allow CPU to reorder channel enable
  dmaengine: at_hdmac: Fix completion of unissued descriptor in case of errors
  dmaengine: at_hdmac: Don't start transactions at tx_submit level
  dmaengine: at_hdmac: Fix at_lli struct definition
  cert host tools: Stop complaining about deprecated OpenSSL functions
  udf: Fix a slab-out-of-bounds write bug in udf_find_entry()
  btrfs: selftests: fix wrong error check in btrfs_free_dummy_root()
  platform/x86: hp_wmi: Fix rfkill causing soft blocked wifi
  drm/i915/dmabuf: fix sg_table handling in map_dma_buf
  nilfs2: fix use-after-free bug of ns_writer on remount
  nilfs2: fix deadlock in nilfs_count_free_blocks()
  vmlinux.lds.h: Fix placement of '.data..decrypted' section
  ALSA: usb-audio: Add DSD support for Accuphase DAC-60
  ALSA: usb-audio: Add quirk entry for M-Audio Micro
  ALSA: hda: fix potential memleak in 'add_widget_node'
  ALSA: hda/ca0132: add quirk for EVGA Z390 DARK
  arm64: efi: Fix handling of misaligned runtime regions and drop warning
  riscv: process: fix kernel info leakage
  net: macvlan: fix memory leaks of macvlan_common_newlink
  net: mv643xx_eth: disable napi when init rxq or txq failed in mv643xx_eth_open()
  ethernet: s2io: disable napi when start nic failed in s2io_card_up()
  net: cxgb3_main: disable napi when bind qsets failed in cxgb_up()
  net: nixge: disable napi when enable interrupts failed in nixge_open()
  drivers: net: xgene: disable napi when register irq failed in xgene_enet_open()
  dmaengine: mv_xor_v2: Fix a resource leak in mv_xor_v2_remove()
  tipc: fix the msg->req tlv len check in tipc_nl_compat_name_table_dump_header
  ipv6: addrlabel: fix infoleak when sending struct ifaddrlblmsg to network
  drm/vc4: Fix missing platform_unregister_drivers() call in vc4_drm_register()
  hamradio: fix issue of dev reference count leakage in bpq_device_event()
  net: lapbether: fix issue of dev reference count leakage in lapbeth_device_event()
  capabilities: fix undefined behavior in bit shift for CAP_TO_MASK
  net: fman: Unregister ethernet device on removal
  bnxt_en: fix potentially incorrect return value for ndo_rx_flow_steer
  net: tun: Fix memory leaks of napi_get_frags
  net: gso: fix panic on frag_list with mixed head alloc types
  HID: hyperv: fix possible memory leak in mousevsc_probe()
  wifi: cfg80211: fix memory leak in query_regdb_file()
  phy: stm32: fix an error code in probe
  Linux 4.19.266
  x86/speculation: Add RSB VM Exit protections
  x86/bugs: Warn when "ibrs" mitigation is selected on Enhanced IBRS parts
  x86/speculation: Use DECLARE_PER_CPU for x86_spec_ctrl_current
  x86/speculation: Disable RRSBA behavior
  x86/bugs: Add Cannon lake to RETBleed affected CPU list
  x86/cpu/amd: Enumerate BTC_NO
  x86/common: Stamp out the stepping madness
  x86/speculation: Fill RSB on vmexit for IBRS
  KVM: VMX: Fix IBRS handling after vmexit
  KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
  x86/speculation: Remove x86_spec_ctrl_mask
  x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit
  x86/speculation: Fix SPEC_CTRL write on SMT state change
  x86/speculation: Fix firmware entry SPEC_CTRL handling
  x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n
  x86/speculation: Change FILL_RETURN_BUFFER to work with objtool
  intel_idle: Disable IBRS during long idle
  x86/bugs: Report Intel retbleed vulnerability
  x86/bugs: Split spectre_v2_select_mitigation() and spectre_v2_user_select_mitigation()
  x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS
  x86/bugs: Optimize SPEC_CTRL MSR writes
  x86/entry: Add kernel IBRS implementation
  x86/entry: Remove skip_r11rcx
  x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value
  x86/bugs: Add AMD retbleed= boot parameter
  x86/bugs: Report AMD retbleed vulnerability
  x86/cpufeatures: Move RETPOLINE flags to word 11
  x86/cpu: Add a steppings field to struct x86_cpu_id
  x86/cpu: Add consistent CPU match macros
  x86/devicetable: Move x86 specific macro out of generic code
  x86/cpufeature: Fix various quality problems in the <asm/cpu_device_hd.h> header
  x86/cpufeature: Add facility to check for min microcode revisions
  Revert "x86/cpu: Add a steppings field to struct x86_cpu_id"
  Revert "x86/speculation: Add RSB VM Exit protections"
  ANDROID: preserve CRC for some DRM functions
  Revert "tcp/udp: Make early_demux back namespacified."
  Linux 4.19.265
  wifi: brcmfmac: Fix potential buffer overflow in brcmf_fweh_event_worker()
  linux/bits.h: make BIT(), GENMASK(), and friends available in assembly
  KVM: x86: emulator: update the emulation mode after CR0 write
  KVM: x86: emulator: introduce emulator_recalc_and_set_mode
  KVM: x86: emulator: em_sysexit should update ctxt->mode
  KVM: x86: Mask off reserved bits in CPUID.80000008H
  ext4: fix warning in 'ext4_da_release_space'
  parisc: Avoid printing the hardware path twice
  parisc: Export iosapic_serial_irq() symbol for serial port driver
  parisc: Make 8250_gsc driver dependend on CONFIG_PARISC
  efi: random: reduce seed size to 32 bytes
  ALSA: usb-audio: Add quirks for MacroSilicon MS2100/MS2106 devices
  capabilities: fix potential memleak on error path from vfs_getxattr_alloc()
  tracing/histogram: Update document for KEYS_MAX size
  kprobe: reverse kp->flags when arm_kprobe failed
  tcp/udp: Make early_demux back namespacified.
  btrfs: fix type of parameter generation in btrfs_get_dentry
  block, bfq: protect 'bfqd->queued' by 'bfqd->lock'
  Bluetooth: L2CAP: Fix attempting to access uninitialized memory
  i2c: xiic: Add platform module alias
  HID: saitek: add madcatz variant of MMO7 mouse device ID
  media: dvb-frontends/drxk: initialize err to 0
  media: cros-ec-cec: limit msg.len to CEC_MAX_MSG_SIZE
  media: s5p_cec: limit msg.len to CEC_MAX_MSG_SIZE
  ipv6: fix WARNING in ip6_route_net_exit_late()
  net, neigh: Fix null-ptr-deref in neigh_table_clear()
  net: mdio: fix undefined behavior in bit shift for __mdiobus_register
  Bluetooth: L2CAP: fix use-after-free in l2cap_conn_del()
  Bluetooth: L2CAP: Fix use-after-free caused by l2cap_reassemble_sdu
  btrfs: fix ulist leaks in error paths of qgroup self tests
  btrfs: fix inode list leak during backref walking at resolve_indirect_refs()
  isdn: mISDN: netjet: fix wrong check of device registration
  mISDN: fix possible memory leak in mISDN_register_device()
  rose: Fix NULL pointer dereference in rose_send_frame()
  ipvs: fix WARNING in ip_vs_app_net_cleanup()
  ipvs: fix WARNING in __ip_vs_cleanup_batch()
  ipvs: use explicitly signed chars
  net: tun: fix bugs for oversize packet when napi frags enabled
  net: sched: Fix use after free in red_enqueue()
  ata: pata_legacy: fix pdc20230_set_piomode()
  net: fec: fix improper use of NETDEV_TX_BUSY
  nfc: nfcmrvl: Fix potential memory leak in nfcmrvl_i2c_nci_send()
  nfc: s3fwrn5: Fix potential memory leak in s3fwrn5_nci_send()
  RDMA/qedr: clean up work queue on failure in qedr_alloc_resources()
  net: dsa: Fix possible memory leaks in dsa_loop_init()
  nfs4: Fix kmemleak when allocate slot failed
  NFSv4.1: We must always send RECLAIM_COMPLETE after a reboot
  NFSv4.1: Handle RECLAIM_COMPLETE trunking errors
  BACKPORT: ARM: 9039/1: assembler: generalize byte swapping macro into rev_l
  BACKPORT: ARM: 9035/1: uncompress: Add be32tocpu macro

 Conflicts:
	drivers/mmc/host/sdhci.c
	drivers/slimbus/stream.c

Change-Id: Ic112be181f3558a83f85d01fb4e25444f14c7548
This commit is contained in:
Michael Bestas 2022-12-15 21:06:13 +02:00
commit 45bf774877
No known key found for this signature in database
GPG key ID: CC95044519BE6669
261 changed files with 2262 additions and 949 deletions

View file

@ -4169,6 +4169,18 @@
retain_initrd [RAM] Keep initrd memory after extraction
retbleed= [X86] Control mitigation of RETBleed (Arbitrary
Speculative Code Execution with Return Instructions)
vulnerability.
off - unconditionally disable
auto - automatically select a migitation
Selecting 'auto' will choose a mitigation method at run
time according to the CPU.
Not specifying this option is equivalent to retbleed=auto.
rfkill.default_state=
0 "airplane mode". All wifi, bluetooth, wimax, gps, fm,
etc. communication is blocked by default.
@ -4414,6 +4426,7 @@
eibrs - enhanced IBRS
eibrs,retpoline - enhanced IBRS + Retpolines
eibrs,lfence - enhanced IBRS + LFENCE
ibrs - use IBRS to protect kernel
Not specifying this option is equivalent to
spectre_v2=auto.

View file

@ -51,7 +51,7 @@ the Technical Advisory Board (TAB) or other maintainers if you're
uncertain how to handle situations that come up. It will not be
considered a violation report unless you want it to be. If you are
uncertain about approaching the TAB or any other maintainers, please
reach out to our conflict mediator, Joanna Lee <joanna.lee@gesmer.com>.
reach out to our conflict mediator, Joanna Lee <jlee@linuxfoundation.org>.
In the end, "be kind to each other" is really what the end goal is for
everybody. We know everyone is human and we all fail at times, but the

View file

@ -39,7 +39,7 @@ Documentation written by Tom Zanussi
will use the event's kernel stacktrace as the key. The keywords
'keys' or 'key' can be used to specify keys, and the keywords
'values', 'vals', or 'val' can be used to specify values. Compound
keys consisting of up to two fields can be specified by the 'keys'
keys consisting of up to three fields can be specified by the 'keys'
keyword. Hashing a compound key produces a unique entry in the
table for each unique combination of component keys, and can be
useful for providing more fine-grained summaries of event data.

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 264
SUBLEVEL = 268
EXTRAVERSION =
NAME = "People's Front"

View file

@ -114,6 +114,13 @@
#endif
.endm
.macro be32tocpu, val, tmp
#ifndef __ARMEB__
/* convert to little endian */
rev_l \val, \tmp
#endif
.endm
.section ".start", #alloc, #execinstr
/*
* sort out different calling conventions
@ -305,13 +312,7 @@ restart: adr r0, LC0
/* Get the initial DTB size */
ldr r5, [r6, #4]
#ifndef __ARMEB__
/* convert to little endian */
eor r1, r5, r5, ror #16
bic r1, r1, #0x00ff0000
mov r5, r5, ror #8
eor r5, r5, r1, lsr #8
#endif
be32tocpu r5, r1
/* 50% DTB growth should be good enough */
add r5, r5, r5, lsr #1
/* preserve 64-bit alignment */
@ -364,13 +365,7 @@ restart: adr r0, LC0
/* Get the current DTB size */
ldr r5, [r6, #4]
#ifndef __ARMEB__
/* convert r5 (dtb size) to little endian */
eor r1, r5, r5, ror #16
bic r1, r1, #0x00ff0000
mov r5, r5, ror #8
eor r5, r5, r1, lsr #8
#endif
be32tocpu r5, r1
/* preserve 64-bit alignment */
add r5, r5, #7

View file

@ -15,22 +15,20 @@
compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx";
/* Power */
regulators {
vcc3v3: fixedregulator@1 {
compatible = "regulator-fixed";
regulator-name = "vcc3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
};
vcc3v3: fixedregulator1 {
compatible = "regulator-fixed";
regulator-name = "vcc3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
};
vcc1v8: fixedregulator@2 {
compatible = "regulator-fixed";
regulator-name = "vcc1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
};
vcc1v8: fixedregulator2 {
compatible = "regulator-fixed";
regulator-name = "vcc1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
};
/* User IO */

View file

@ -39,6 +39,13 @@
};
usb1 {
pinctrl_usb1_vbus_gpio: usb1_vbus_gpio {
atmel,pins =
<AT91_PIOC 5 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PC5 GPIO */
};
};
mmc0_slot1 {
pinctrl_board_mmc0_slot1: mmc0_slot1-board {
atmel,pins =
@ -84,6 +91,8 @@
};
usb1: gadget@fffa4000 {
pinctrl-0 = <&pinctrl_usb1_vbus_gpio>;
pinctrl-names = "default";
atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>;
status = "okay";
};

View file

@ -506,4 +506,21 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#define _ASM_NOKPROBE(entry)
#endif
/*
* rev_l - byte-swap a 32-bit value
*
* @val: source/destination register
* @tmp: scratch register
*/
.macro rev_l, val:req, tmp:req
.if __LINUX_ARM_ARCH__ < 6
eor \tmp, \val, \val, ror #16
bic \tmp, \tmp, #0x00ff0000
mov \val, \val, ror #8
eor \val, \val, \tmp, lsr #8
.else
rev \val, \val
.endif
.endm
#endif /* __ASM_ASSEMBLER_H__ */

View file

@ -393,8 +393,10 @@ static void __init mxs_machine_init(void)
root = of_find_node_by_path("/");
ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
if (ret)
if (ret) {
kfree(soc_dev_attr);
return;
}
soc_dev_attr->family = "Freescale MXS Family";
soc_dev_attr->soc_id = mxs_get_soc_id();

View file

@ -203,7 +203,7 @@
cap-sd-highspeed;
cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
disable-wp;
max-frequency = <150000000>;
max-frequency = <40000000>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
vmmc-supply = <&vcc3v3_baseboard>;

View file

@ -143,9 +143,12 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
if (fn != __this_cpu_read(bp_hardening_data.fn)) {
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
__this_cpu_write(bp_hardening_data.template_start,
hyp_vecs_start);
}
spin_unlock(&bp_lock);
}
#else
@ -1264,8 +1267,11 @@ static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
if (hyp_vecs_start != __this_cpu_read(bp_hardening_data.template_start)) {
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.template_start,
hyp_vecs_start);
}
spin_unlock(&bp_lock);
}
#else
@ -1301,7 +1307,13 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
case 8:
kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
/*
* A57/A72-r0 will already have selected the
* spectre-indirect vector, which is sufficient
* for BHB too.
*/
if (!__this_cpu_read(bp_hardening_data.fn))
kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
break;
case 24:
kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);

View file

@ -16,6 +16,14 @@
#include <asm/efi.h>
static bool region_is_misaligned(const efi_memory_desc_t *md)
{
if (PAGE_SIZE == EFI_PAGE_SIZE)
return false;
return !PAGE_ALIGNED(md->phys_addr) ||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
}
/*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits
@ -29,14 +37,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
if (type == EFI_MEMORY_MAPPED_IO)
return PROT_DEVICE_nGnRE;
if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
"UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
if (region_is_misaligned(md)) {
static bool __initdata code_is_misaligned;
/*
* If the region is not aligned to the page size of the OS, we
* can not use strict permissions, since that would also affect
* the mapping attributes of the adjacent regions.
* Regions that are not aligned to the OS page size cannot be
* mapped with strict permissions, as those might interfere
* with the permissions that are needed by the adjacent
* region's mapping. However, if we haven't encountered any
* misaligned runtime code regions so far, we can safely use
* non-executable permissions for non-code regions.
*/
return pgprot_val(PAGE_KERNEL_EXEC);
code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
: pgprot_val(PAGE_KERNEL);
}
/* R-- */
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
@ -66,19 +82,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_RUNTIME_SERVICES_DATA);
if (!PAGE_ALIGNED(md->phys_addr) ||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
/*
* If the end address of this region is not aligned to page
* size, the mapping is rounded up, and may end up sharing a
* page frame with the next UEFI memory region. If we create
* a block entry now, we may need to split it again when mapping
* the next region, and support for that is going to be removed
* from the MMU routines. So avoid block mappings altogether in
* that case.
*/
/*
* If this region is not aligned to the page size used by the OS, the
* mapping will be rounded outwards, and may end up sharing a page
* frame with an adjacent runtime memory region. Given that the page
* table descriptor covering the shared page will be rewritten when the
* adjacent region gets mapped, we must avoid block mappings here so we
* don't have to worry about splitting them when that happens.
*/
if (region_is_misaligned(md))
page_mappings_only = true;
}
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
@ -106,6 +119,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
md->type != EFI_RUNTIME_SERVICES_DATA);
if (region_is_misaligned(md))
return 0;
/*
* Calling apply_to_page_range() is only safe on regions that are
* guaranteed to be mapped down to pages. Since we are only called

View file

@ -26,6 +26,6 @@ extern char *fw_getcmdline(void);
extern void fw_meminit(void);
extern char *fw_getenv(char *name);
extern unsigned long fw_getenvl(char *name);
extern void fw_init_early_console(char port);
extern void fw_init_early_console(void);
#endif /* __ASM_FW_H_ */

View file

@ -35,7 +35,7 @@
#define U_BRG(x) (UART_BASE(x) + 0x40)
static void __iomem *uart_base;
static char console_port = -1;
static int console_port = -1;
static int __init configure_uart_pins(int port)
{
@ -55,7 +55,7 @@ static int __init configure_uart_pins(int port)
return 0;
}
static void __init configure_uart(char port, int baud)
static void __init configure_uart(int port, int baud)
{
u32 pbclk;
@ -68,7 +68,7 @@ static void __init configure_uart(char port, int baud)
uart_base + PIC32_SET(U_STA(port)));
}
static void __init setup_early_console(char port, int baud)
static void __init setup_early_console(int port, int baud)
{
if (configure_uart_pins(port))
return;
@ -138,16 +138,15 @@ _out:
return baud;
}
void __init fw_init_early_console(char port)
void __init fw_init_early_console(void)
{
char *arch_cmdline = pic32_getcmdline();
int baud = -1;
int baud, port;
uart_base = ioremap_nocache(PIC32_BASE_UART, 0xc00);
baud = get_baud_from_cmdline(arch_cmdline);
if (port == -1)
port = get_port_from_cmdline(arch_cmdline);
port = get_port_from_cmdline(arch_cmdline);
if (port == -1)
port = EARLY_CONSOLE_PORT;

View file

@ -68,7 +68,7 @@ void __init plat_mem_setup(void)
strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
#ifdef CONFIG_EARLY_PRINTK
fw_init_early_console(-1);
fw_init_early_console();
#endif
pic32_config_init();
}

View file

@ -20,7 +20,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
$(obj)/vmImage: $(obj)/vmlinux.gz
$(obj)/vmImage: $(obj)/vmlinux.gz FORCE
$(call if_changed,uimage)
@$(kecho) 'Kernel: $@ is ready'

View file

@ -10,12 +10,12 @@
#define SVERSION_ANY_ID PA_SVERSION_ANY_ID
struct hp_hardware {
unsigned short hw_type:5; /* HPHW_xxx */
unsigned short hversion;
unsigned long sversion:28;
unsigned short opt;
const char name[80]; /* The hardware description */
};
unsigned int hw_type:8; /* HPHW_xxx */
unsigned int hversion:12;
unsigned int sversion:12;
unsigned char opt;
unsigned char name[59]; /* The hardware description */
} __packed;
struct parisc_device;

View file

@ -861,15 +861,13 @@ void __init walk_central_bus(void)
&root);
}
static void print_parisc_device(struct parisc_device *dev)
static __init void print_parisc_device(struct parisc_device *dev)
{
char hw_path[64];
static int count;
static int count __initdata;
print_pa_hwpath(dev, hw_path);
pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }",
++count, dev->name, &(dev->hpa.start), dev->id.hw_type,
dev->id.hversion, dev->id.sversion, dev->id.hversion_rev);
if (dev->num_addrs) {
int k;
@ -1058,7 +1056,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
static int print_one_device(struct device * dev, void * data)
static __init int print_one_device(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);

View file

@ -104,6 +104,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
{
struct pt_regs *childregs = task_pt_regs(p);
memset(&p->thread.s, 0, sizeof(p->thread.s));
/* p->thread holds context to be restored by __switch_to() */
if (unlikely(p->flags & PF_KTHREAD)) {
/* Kernel thread */

View file

@ -17,6 +17,9 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
obj-y += vdso.o vdso-syms.o
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
ifneq ($(filter vgettimeofday, $(vdso-syms)),)
CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY
endif
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n

View file

@ -70,9 +70,11 @@ VERSION
LINUX_4.15 {
global:
__vdso_rt_sigreturn;
#ifdef HAS_VGETTIMEOFDAY
__vdso_gettimeofday;
__vdso_clock_gettime;
__vdso_clock_getres;
#endif
__vdso_getcpu;
__vdso_flush_icache;
local: *;

View file

@ -45,7 +45,7 @@ struct save_area {
u64 fprs[16];
u32 fpc;
u32 prefix;
u64 todpreg;
u32 todpreg;
u64 timer;
u64 todcmp;
u64 vxrs_low[16];

View file

@ -6,6 +6,8 @@
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/nospec-branch.h>
/*
@ -146,27 +148,19 @@ For 32-bit we have the following conventions - kernel is built with
.endm
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
.macro POP_REGS pop_rdi=1
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
.if \skip_r11rcx
popq %rsi
.else
popq %r11
.endif
popq %r10
popq %r9
popq %r8
popq %rax
.if \skip_r11rcx
popq %rsi
.else
popq %rcx
.endif
popq %rdx
popq %rsi
.if \pop_rdi
@ -316,6 +310,62 @@ For 32-bit we have the following conventions - kernel is built with
#endif
/*
* IBRS kernel mitigation for Spectre_v2.
*
* Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
* the regs it uses (AX, CX, DX). Must be called before the first RET
* instruction (NOTE! UNTRAIN_RET includes a RET instruction)
*
* The optional argument is used to save/restore the current value,
* which is used on the paranoid paths.
*
* Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
*/
.macro IBRS_ENTER save_reg
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx
.ifnb \save_reg
rdmsr
shl $32, %rdx
or %rdx, %rax
mov %rax, \save_reg
test $SPEC_CTRL_IBRS, %eax
jz .Ldo_wrmsr_\@
lfence
jmp .Lend_\@
.Ldo_wrmsr_\@:
.endif
movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
movl %edx, %eax
shr $32, %rdx
wrmsr
.Lend_\@:
.endm
/*
* Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
* regs. Must be called after the last RET.
*/
.macro IBRS_EXIT save_reg
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx
.ifnb \save_reg
mov \save_reg, %rdx
.else
movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
andl $(~SPEC_CTRL_IBRS), %edx
.endif
movl %edx, %eax
shr $32, %rdx
wrmsr
.Lend_\@:
.endm
/*
* Mitigate Spectre v1 for conditional swapgs code paths.
*

View file

@ -643,7 +643,6 @@ ENTRY(__switch_to_asm)
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
#endif
#ifdef CONFIG_RETPOLINE
/*
* When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated
@ -652,7 +651,6 @@ ENTRY(__switch_to_asm)
* speculative execution to prevent attack.
*/
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */
popfl

View file

@ -235,6 +235,10 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
/* IRQs are off. */
movq %rax, %rdi
movq %rsp, %rsi
/* clobbers %rax, make sure it is after saving the syscall nr */
IBRS_ENTER
call do_syscall_64 /* returns with IRQs disabled */
TRACE_IRQS_IRETQ /* we're about to change IF */
@ -311,8 +315,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
* perf profiles. Nothing jumps here.
*/
syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
POP_REGS pop_rdi=0 skip_r11rcx=1
IBRS_EXIT
POP_REGS pop_rdi=0
/*
* Now all regs are restored except RSP and RDI.
@ -363,7 +367,6 @@ ENTRY(__switch_to_asm)
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
#endif
#ifdef CONFIG_RETPOLINE
/*
* When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated
@ -372,7 +375,6 @@ ENTRY(__switch_to_asm)
* speculative execution to prevent attack.
*/
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */
popfq
@ -685,6 +687,7 @@ GLOBAL(retint_user)
TRACE_IRQS_IRETQ
GLOBAL(swapgs_restore_regs_and_return_to_usermode)
IBRS_EXIT
#ifdef CONFIG_DEBUG_ENTRY
/* Assert that pt_regs indicates user mode. */
testb $3, CS(%rsp)
@ -1250,7 +1253,13 @@ ENTRY(paranoid_entry)
*/
FENCE_SWAPGS_KERNEL_ENTRY
ret
/*
* Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
* CR3 above, keep the old value in a callee saved register.
*/
IBRS_ENTER save_reg=%r15
RET
END(paranoid_entry)
/*
@ -1278,12 +1287,20 @@ ENTRY(paranoid_exit)
jmp .Lparanoid_exit_restore
.Lparanoid_exit_no_swapgs:
TRACE_IRQS_IRETQ_DEBUG
/*
* Must restore IBRS state before both CR3 and %GS since we need access
* to the per-CPU x86_spec_ctrl_shadow variable.
*/
IBRS_EXIT save_reg=%r15
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
.Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
END(paranoid_exit)
/*
* Save all registers in pt_regs, and switch GS if needed.
*/
@ -1303,6 +1320,7 @@ ENTRY(error_entry)
FENCE_SWAPGS_USER_ENTRY
/* We have user CR3. Change to kernel CR3. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
IBRS_ENTER
.Lerror_entry_from_usermode_after_swapgs:
/* Put us onto the real thread stack. */
@ -1367,6 +1385,7 @@ ENTRY(error_entry)
SWAPGS
FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
IBRS_ENTER
/*
* Pretend that the exception came from user mode: set up pt_regs
@ -1472,6 +1491,8 @@ ENTRY(nmi)
PUSH_AND_CLEAR_REGS rdx=(%rdx)
ENCODE_FRAME_POINTER
IBRS_ENTER
/*
* At this point we no longer need to worry about stack damage
* due to nesting -- we're on the normal thread stack and we're
@ -1695,6 +1716,9 @@ end_repeat_nmi:
movq $-1, %rsi
call do_nmi
/* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
IBRS_EXIT save_reg=%r15
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14

View file

@ -4,7 +4,6 @@
*
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
*/
#include "calling.h"
#include <asm/asm-offsets.h>
#include <asm/current.h>
#include <asm/errno.h>
@ -17,6 +16,8 @@
#include <linux/linkage.h>
#include <linux/err.h>
#include "calling.h"
.section .entry.text, "ax"
/*
@ -106,6 +107,8 @@ ENTRY(entry_SYSENTER_compat)
xorl %r15d, %r15d /* nospec r15 */
cld
IBRS_ENTER
/*
* SYSENTER doesn't filter flags, so we need to clear NT and AC
* ourselves. To save a few cycles, we can check whether
@ -253,6 +256,8 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
*/
TRACE_IRQS_OFF
IBRS_ENTER
movq %rsp, %rdi
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
@ -262,6 +267,9 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
/* Opportunistic SYSRET */
sysret32_from_system_call:
TRACE_IRQS_ON /* User mode traces as IRQs on. */
IBRS_EXIT
movq RBX(%rsp), %rbx /* pt_regs->rbx */
movq RBP(%rsp), %rbp /* pt_regs->rbp */
movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */
@ -403,6 +411,7 @@ ENTRY(entry_INT80_compat)
* gate turned them off.
*/
TRACE_IRQS_OFF
IBRS_ENTER
movq %rsp, %rdi
call do_int80_syscall_32

View file

@ -1,16 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CPU_DEVICE_ID
#define _CPU_DEVICE_ID 1
#ifndef _ASM_X86_CPU_DEVICE_ID
#define _ASM_X86_CPU_DEVICE_ID
/*
* Declare drivers belonging to specific x86 CPUs
* Similar in spirit to pci_device_id and related PCI functions
*
* The wildcard initializers are in mod_devicetable.h because
* file2alias needs them. Sigh.
*/
#include <linux/mod_devicetable.h>
/* Get the INTEL_FAM* model defines */
#include <asm/intel-family.h>
/* And the X86_VENDOR_* ones */
#include <asm/processor.h>
/* Centaur FAM6 models */
#define X86_CENTAUR_FAM6_C7_A 0xa
#define X86_CENTAUR_FAM6_C7_D 0xd
#define X86_CENTAUR_FAM6_NANO 0xf
#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
/**
* X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
@ -23,8 +33,11 @@
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* Backport version to keep the SRBDS pile consistant. No shorter variants
* required for this.
* Use only if you need all selectors. Otherwise use one of the shorter
* macros of the X86_MATCH_* family. If there is no matching shorthand
* macro, consider to add one. If you really need to wrap one of the macros
* into another macro at the usage site for good reasons, then please
* start this local macro with X86_MATCH to allow easy grepping.
*/
#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
_steppings, _feature, _data) { \
@ -36,6 +49,147 @@
.driver_data = (unsigned long) _data \
}
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
/**
* X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Macro for CPU matching
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@_vendor
* @_family: The family number or X86_FAMILY_ANY
* @_model: The model number, model constant or X86_MODEL_ANY
* @_feature: A X86_FEATURE bit or X86_FEATURE_ANY
* @_data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* The steppings arguments of X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE() is
* set to wildcards.
*/
#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, feature, data) \
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(vendor, family, model, \
X86_STEPPING_ANY, feature, data)
#endif
/**
* X86_MATCH_VENDOR_FAM_FEATURE - Macro for matching vendor, family and CPU feature
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@vendor
* @family: The family number or X86_FAMILY_ANY
* @feature: A X86_FEATURE bit
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
* set to wildcards.
*/
#define X86_MATCH_VENDOR_FAM_FEATURE(vendor, family, feature, data) \
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, \
X86_MODEL_ANY, feature, data)
/**
* X86_MATCH_VENDOR_FEATURE - Macro for matching vendor and CPU feature
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@vendor
* @feature: A X86_FEATURE bit
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
* set to wildcards.
*/
#define X86_MATCH_VENDOR_FEATURE(vendor, feature, data) \
X86_MATCH_VENDOR_FAM_FEATURE(vendor, X86_FAMILY_ANY, feature, data)
/**
* X86_MATCH_FEATURE - Macro for matching a CPU feature
* @feature: A X86_FEATURE bit
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
* set to wildcards.
*/
#define X86_MATCH_FEATURE(feature, data) \
X86_MATCH_VENDOR_FEATURE(ANY, feature, data)
/* Transitional to keep the existing code working */
#define X86_FEATURE_MATCH(feature) X86_MATCH_FEATURE(feature, NULL)
/**
* X86_MATCH_VENDOR_FAM_MODEL - Match vendor, family and model
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@vendor
* @family: The family number or X86_FAMILY_ANY
* @model: The model number, model constant or X86_MODEL_ANY
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
* set to wildcards.
*/
#define X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, data) \
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, \
X86_FEATURE_ANY, data)
/**
* X86_MATCH_VENDOR_FAM - Match vendor and family
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@vendor
* @family: The family number or X86_FAMILY_ANY
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* All other missing arguments to X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
* set of wildcards.
*/
#define X86_MATCH_VENDOR_FAM(vendor, family, data) \
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, X86_MODEL_ANY, data)
/**
* X86_MATCH_INTEL_FAM6_MODEL - Match vendor INTEL, family 6 and model
* @model: The model name without the INTEL_FAM6_ prefix or ANY
* The model name is expanded to INTEL_FAM6_@model internally
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* The vendor is set to INTEL, the family to 6 and all other missing
* arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are set to wildcards.
*
* See X86_MATCH_VENDOR_FAM_MODEL_FEATURE() for further information.
*/
#define X86_MATCH_INTEL_FAM6_MODEL(model, data) \
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data)
/*
* Match specific microcode revisions.
*
* vendor/family/model/stepping must be all set.
*
* Only checks against the boot CPU. When mixed-stepping configs are
* valid for a CPU model, add a quirk for every valid stepping and
* do the fine-tuning in the quirk handler.
*/
struct x86_cpu_desc {
u8 x86_family;
u8 x86_vendor;
u8 x86_model;
u8 x86_stepping;
u32 x86_microcode_rev;
};
#define INTEL_CPU_DESC(model, stepping, revision) { \
.x86_family = 6, \
.x86_vendor = X86_VENDOR_INTEL, \
.x86_model = (model), \
.x86_stepping = (stepping), \
.x86_microcode_rev = (revision), \
}
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
#endif /* _ASM_X86_CPU_DEVICE_ID */

View file

@ -202,8 +202,8 @@
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
@ -283,7 +283,16 @@
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+ 6) /* "" Fill RSB on VM exit when EIBRS is enabled */
/* FREE! (11*32+ 6) */
/* FREE! (11*32+ 7) */
/* FREE! (11*32+ 8) */
/* FREE! (11*32+ 9) */
/* FREE! (11*32+10) */
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
#define X86_FEATURE_MSR_TSX_CTRL (11*32+18) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
@ -296,6 +305,7 @@
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@ -397,6 +407,7 @@
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
#endif /* _ASM_X86_CPUFEATURES_H */

View file

@ -13,6 +13,9 @@
* that group keep the CPUID for the variants sorted by model number.
*/
/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
#define INTEL_FAM6_ANY X86_MODEL_ANY
#define INTEL_FAM6_CORE_YONAH 0x0E
#define INTEL_FAM6_CORE2_MEROM 0x0F
@ -101,6 +104,9 @@
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
/* Family 5 */
#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */
/* Useful macros */
#define INTEL_CPU_FAM_ANY(_family, _model, _driver_data) \
{ \

View file

@ -47,6 +47,8 @@
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
@ -73,6 +75,7 @@
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
#define ARCH_CAP_SSB_NO BIT(4) /*
* Not susceptible to Speculative Store Bypass
@ -120,6 +123,13 @@
* bit available to control VERW
* behavior.
*/
#define ARCH_CAP_RRSBA BIT(19) /*
* Indicates RET may use predictors
* other than the RSB. With eIBRS
* enabled predictions in kernel mode
* are restricted to targets in
* kernel.
*/
#define ARCH_CAP_PBRSB_NO BIT(24) /*
* Not susceptible to Post-Barrier
* Return Stack Buffer Predictions.
@ -389,6 +399,11 @@
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_DE_CFG 0xc0011029
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
@ -457,9 +472,6 @@
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
#define MSR_F10H_DECFG 0xc0011029
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a

View file

@ -9,6 +9,7 @@
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
#include <asm/percpu.h>
/*
* Fill the CPU return stack buffer.
@ -70,12 +71,11 @@
add $(BITS_PER_LONG/8) * nr, sp;
#endif
/* Sequence to mitigate PBRSB on eIBRS CPUs */
#define __ISSUE_UNBALANCED_RET_GUARD(sp) \
call 881f; \
#define ISSUE_UNBALANCED_RET_GUARD(sp) \
call 992f; \
int3; \
881: \
add $(BITS_PER_LONG/8), sp; \
992: \
add $(BITS_PER_LONG/8), sp; \
lfence;
#ifdef __ASSEMBLY__
@ -165,13 +165,11 @@
* monstrosity above, manually.
*/
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
#ifdef CONFIG_RETPOLINE
ANNOTATE_NOSPEC_ALTERNATIVE
ALTERNATIVE "jmp .Lskip_rsb_\@", \
__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
\ftr
.Lskip_rsb_\@:
#endif
.endm
#else /* __ASSEMBLY__ */
@ -252,6 +250,7 @@ enum spectre_v2_mitigation {
SPECTRE_V2_EIBRS,
SPECTRE_V2_EIBRS_RETPOLINE,
SPECTRE_V2_EIBRS_LFENCE,
SPECTRE_V2_IBRS,
};
/* The indirect branch speculation control variants */
@ -280,26 +279,21 @@ extern char __indirect_thunk_end[];
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
* CPUs with IBRS_ALL *might* it be avoided.
*/
static inline void vmexit_fill_RSB(void)
static __always_inline void vmexit_fill_RSB(void)
{
#ifdef CONFIG_RETPOLINE
unsigned long loops;
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
ALTERNATIVE("jmp 910f",
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
X86_FEATURE_RETPOLINE)
ALTERNATIVE_2("jmp 910f", "", X86_FEATURE_RSB_VMEXIT,
"jmp 911f", X86_FEATURE_RSB_VMEXIT_LITE)
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1))
"911:"
__stringify(ISSUE_UNBALANCED_RET_GUARD(%1))
"910:"
: "=r" (loops), ASM_CALL_CONSTRAINT
: : "memory" );
#endif
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
ALTERNATIVE("jmp 920f",
__stringify(__ISSUE_UNBALANCED_RET_GUARD(%0)),
X86_FEATURE_RSB_VMEXIT_LITE)
"920:"
: ASM_CALL_CONSTRAINT
: : "memory" );
}
static __always_inline
@ -322,6 +316,9 @@ static inline void indirect_branch_prediction_barrier(void)
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
extern void update_spec_ctrl_cond(u64 val);
extern u64 spec_ctrl_current(void);
/*
* With retpoline, we must use IBRS to restrict branch prediction
@ -331,18 +328,16 @@ extern u64 x86_spec_ctrl_base;
*/
#define firmware_restrict_branch_speculation_start() \
do { \
u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
\
preempt_disable(); \
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current() | SPEC_CTRL_IBRS, \
X86_FEATURE_USE_IBRS_FW); \
} while (0)
#define firmware_restrict_branch_speculation_end() \
do { \
u64 val = x86_spec_ctrl_base; \
\
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current(), \
X86_FEATURE_USE_IBRS_FW); \
preempt_enable(); \
} while (0)

View file

@ -789,8 +789,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
#define MSR_AMD64_DE_CFG 0xC0011029
static void init_amd_ln(struct cpuinfo_x86 *c)
{
/*
@ -885,12 +883,21 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_ZEN);
/*
* Fix erratum 1076: CPB feature bit not being set in CPUID.
* Always set it, except when running under a hypervisor.
*/
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
/* Fix up CPUID bits, but only if not virtualised. */
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
/* Erratum 1076: CPB feature bit not being set in CPUID. */
if (!cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
/*
* Zen3 (Fam19 model < 0x10) parts are not susceptible to
* Branch Type Confusion, but predate the allocation of the
* BTC_NO bit.
*/
if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
set_cpu_cap(c, X86_FEATURE_BTC_NO);
}
}
static void init_amd(struct cpuinfo_x86 *c)
@ -951,16 +958,16 @@ static void init_amd(struct cpuinfo_x86 *c)
* msr_set_bit() uses the safe accessors, too, even if the MSR
* is not present.
*/
msr_set_bit(MSR_F10H_DECFG,
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
msr_set_bit(MSR_AMD64_DE_CFG,
MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
/*
* Verify that the MSR write was successful (could be running
* under a hypervisor) and only then assume that LFENCE is
* serializing.
*/
ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
ret = rdmsrl_safe(MSR_AMD64_DE_CFG, &val);
if (!ret && (val & MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)) {
/* A serializing LFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
} else {

View file

@ -37,6 +37,8 @@
static void __init spectre_v1_select_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
static void __init retbleed_select_mitigation(void);
static void __init spectre_v2_user_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
@ -46,16 +48,47 @@ static void __init taa_select_mitigation(void);
static void __init mmio_select_mitigation(void);
static void __init srbds_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
/* The current value of the SPEC_CTRL MSR with task-specific bits set */
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
static DEFINE_MUTEX(spec_ctrl_mutex);
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val)
{
this_cpu_write(x86_spec_ctrl_current, val);
wrmsrl(MSR_IA32_SPEC_CTRL, val);
}
/*
* The vendor and possibly platform specific bits which can be modified in
* x86_spec_ctrl_base.
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
*/
static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
void update_spec_ctrl_cond(u64 val)
{
if (this_cpu_read(x86_spec_ctrl_current) == val)
return;
this_cpu_write(x86_spec_ctrl_current, val);
/*
* When KERNEL_IBRS this MSR is written on return-to-user, unless
* forced the update can be delayed until that time.
*/
if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
wrmsrl(MSR_IA32_SPEC_CTRL, val);
}
u64 spec_ctrl_current(void)
{
return this_cpu_read(x86_spec_ctrl_current);
}
EXPORT_SYMBOL_GPL(spec_ctrl_current);
/*
* AMD specific MSR info for Speculative Store Bypass control.
@ -105,13 +138,21 @@ void __init check_bugs(void)
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
/* Allow STIBP in MSR_SPEC_CTRL if supported */
if (boot_cpu_has(X86_FEATURE_STIBP))
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
/*
* retbleed_select_mitigation() relies on the state set by
* spectre_v2_select_mitigation(); specifically it wants to know about
* spectre_v2=ibrs.
*/
retbleed_select_mitigation();
/*
* spectre_v2_user_select_mitigation() relies on the state set by
* retbleed_select_mitigation(); specifically the STIBP selection is
* forced for UNRET.
*/
spectre_v2_user_select_mitigation();
ssb_select_mitigation();
l1tf_select_mitigation();
md_clear_select_mitigation();
@ -151,31 +192,17 @@ void __init check_bugs(void)
#endif
}
/*
* NOTE: For VMX, this function is not called in the vmexit path.
* It uses vmx_spec_ctrl_restore_host() instead.
*/
void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
{
u64 msrval, guestval, hostval = x86_spec_ctrl_base;
u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
struct thread_info *ti = current_thread_info();
/* Is MSR_SPEC_CTRL implemented ? */
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
/*
* Restrict guest_spec_ctrl to supported values. Clear the
* modifiable bits in the host base value and or the
* modifiable bits from the guest value.
*/
guestval = hostval & ~x86_spec_ctrl_mask;
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
/* SSBD controlled in MSR_SPEC_CTRL */
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD))
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
/* Conditional STIBP enabled? */
if (static_branch_unlikely(&switch_to_cond_stibp))
hostval |= stibp_tif_to_spec_ctrl(ti->flags);
if (hostval != guestval) {
msrval = setguest ? guestval : hostval;
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@ -705,12 +732,103 @@ static int __init nospectre_v1_cmdline(char *str)
}
early_param("nospectre_v1", nospectre_v1_cmdline);
#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE;
#undef pr_fmt
#define pr_fmt(fmt) "RETBleed: " fmt
enum retbleed_mitigation {
RETBLEED_MITIGATION_NONE,
RETBLEED_MITIGATION_IBRS,
RETBLEED_MITIGATION_EIBRS,
};
enum retbleed_mitigation_cmd {
RETBLEED_CMD_OFF,
RETBLEED_CMD_AUTO,
};
const char * const retbleed_strings[] = {
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
[RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
[RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
};
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
RETBLEED_MITIGATION_NONE;
static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
RETBLEED_CMD_AUTO;
static int __init retbleed_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "off"))
retbleed_cmd = RETBLEED_CMD_OFF;
else if (!strcmp(str, "auto"))
retbleed_cmd = RETBLEED_CMD_AUTO;
else
pr_err("Unknown retbleed option (%s). Defaulting to 'auto'\n", str);
return 0;
}
early_param("retbleed", retbleed_parse_cmdline);
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n"
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
static void __init retbleed_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
return;
switch (retbleed_cmd) {
case RETBLEED_CMD_OFF:
return;
case RETBLEED_CMD_AUTO:
default:
/*
* The Intel mitigation (IBRS) was already selected in
* spectre_v2_select_mitigation().
*/
break;
}
switch (retbleed_mitigation) {
default:
break;
}
/*
* Let IBRS trump all on Intel without affecting the effects of the
* retbleed= cmdline option.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
switch (spectre_v2_enabled) {
case SPECTRE_V2_IBRS:
retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
break;
case SPECTRE_V2_EIBRS:
case SPECTRE_V2_EIBRS_RETPOLINE:
case SPECTRE_V2_EIBRS_LFENCE:
retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
break;
default:
pr_err(RETBLEED_INTEL_MSG);
}
}
pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
}
#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
SPECTRE_V2_USER_NONE;
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
@ -740,6 +858,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
#ifdef CONFIG_BPF_SYSCALL
void unpriv_ebpf_notify(int new_state)
@ -781,6 +900,7 @@ enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_EIBRS,
SPECTRE_V2_CMD_EIBRS_RETPOLINE,
SPECTRE_V2_CMD_EIBRS_LFENCE,
SPECTRE_V2_CMD_IBRS,
};
enum spectre_v2_user_cmd {
@ -821,13 +941,15 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
}
static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
static enum spectre_v2_user_cmd __init
spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
spectre_v2_parse_user_cmdline(void)
{
char arg[20];
int ret, i;
switch (v2_cmd) {
switch (spectre_v2_cmd) {
case SPECTRE_V2_CMD_NONE:
return SPECTRE_V2_USER_CMD_NONE;
case SPECTRE_V2_CMD_FORCE:
@ -853,15 +975,16 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
return SPECTRE_V2_USER_CMD_AUTO;
}
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
{
return (mode == SPECTRE_V2_EIBRS ||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
mode == SPECTRE_V2_EIBRS_LFENCE);
return mode == SPECTRE_V2_IBRS ||
mode == SPECTRE_V2_EIBRS ||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
mode == SPECTRE_V2_EIBRS_LFENCE;
}
static void __init
spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
spectre_v2_user_select_mitigation(void)
{
enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
bool smt_possible = IS_ENABLED(CONFIG_SMP);
@ -874,7 +997,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
smt_possible = false;
cmd = spectre_v2_parse_user_cmdline(v2_cmd);
cmd = spectre_v2_parse_user_cmdline();
switch (cmd) {
case SPECTRE_V2_USER_CMD_NONE:
goto set_mode;
@ -922,12 +1045,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
}
/*
* If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
* required.
* If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
* STIBP is not required.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!smt_possible ||
spectre_v2_in_eibrs_mode(spectre_v2_enabled))
spectre_v2_in_ibrs_mode(spectre_v2_enabled))
return;
/*
@ -952,6 +1075,7 @@ static const char * const spectre_v2_strings[] = {
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
};
static const struct {
@ -969,6 +1093,7 @@ static const struct {
{ "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
{ "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
{ "auto", SPECTRE_V2_CMD_AUTO, false },
{ "ibrs", SPECTRE_V2_CMD_IBRS, false },
};
static void __init spec_v2_print_cond(const char *reason, bool secure)
@ -1031,6 +1156,24 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) {
pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
spec_v2_print_cond(mitigation_options[i].option,
mitigation_options[i].secure);
return cmd;
@ -1046,6 +1189,22 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
return SPECTRE_V2_RETPOLINE;
}
/* Disable in-kernel use of non-RSB RET predictors */
static void __init spec_ctrl_disable_kernel_rrsba(void)
{
u64 ia32_cap;
if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
return;
ia32_cap = x86_read_arch_cap_msr();
if (ia32_cap & ARCH_CAP_RRSBA) {
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
update_spec_ctrl(x86_spec_ctrl_base);
}
}
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
{
/*
@ -1070,19 +1229,24 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_
*/
switch (mode) {
case SPECTRE_V2_NONE:
/* These modes already fill RSB at vmexit */
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_RETPOLINE:
case SPECTRE_V2_EIBRS_RETPOLINE:
return;
case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) {
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
}
return;
case SPECTRE_V2_EIBRS_RETPOLINE:
case SPECTRE_V2_RETPOLINE:
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_IBRS:
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
return;
}
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
@ -1113,6 +1277,14 @@ static void __init spectre_v2_select_mitigation(void)
break;
}
if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
retbleed_cmd != RETBLEED_CMD_OFF &&
boot_cpu_has(X86_FEATURE_IBRS) &&
boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
mode = SPECTRE_V2_IBRS;
break;
}
mode = spectre_v2_select_retpoline();
break;
@ -1129,6 +1301,10 @@ static void __init spectre_v2_select_mitigation(void)
mode = spectre_v2_select_retpoline();
break;
case SPECTRE_V2_CMD_IBRS:
mode = SPECTRE_V2_IBRS;
break;
case SPECTRE_V2_CMD_EIBRS:
mode = SPECTRE_V2_EIBRS;
break;
@ -1145,10 +1321,9 @@ static void __init spectre_v2_select_mitigation(void)
if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
if (spectre_v2_in_eibrs_mode(mode)) {
/* Force it so VMEXIT will restore correctly */
if (spectre_v2_in_ibrs_mode(mode)) {
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
update_spec_ctrl(x86_spec_ctrl_base);
}
switch (mode) {
@ -1156,6 +1331,12 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_EIBRS:
break;
case SPECTRE_V2_IBRS:
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
break;
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_EIBRS_LFENCE:
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
@ -1167,16 +1348,56 @@ static void __init spectre_v2_select_mitigation(void)
break;
}
/*
* Disable alternate RSB predictions in kernel when indirect CALLs and
* JMPs gets protection against BHI and Intramode-BTI, but RET
* prediction from a non-RSB predictor is still a risk.
*/
if (mode == SPECTRE_V2_EIBRS_LFENCE ||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
mode == SPECTRE_V2_RETPOLINE)
spec_ctrl_disable_kernel_rrsba();
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
/*
* If spectre v2 protection has been enabled, unconditionally fill
* RSB during a context switch; this protects against two independent
* issues:
* If Spectre v2 protection has been enabled, fill the RSB during a
* context switch. In general there are two types of RSB attacks
* across context switches, for which the CALLs/RETs may be unbalanced.
*
* - RSB underflow (and switch to BTB) on Skylake+
* - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
* 1) RSB underflow
*
* Some Intel parts have "bottomless RSB". When the RSB is empty,
* speculated return targets may come from the branch predictor,
* which could have a user-poisoned BTB or BHB entry.
*
* AMD has it even worse: *all* returns are speculated from the BTB,
* regardless of the state of the RSB.
*
* When IBRS or eIBRS is enabled, the "user -> kernel" attack
* scenario is mitigated by the IBRS branch prediction isolation
* properties, so the RSB buffer filling wouldn't be necessary to
* protect against this type of attack.
*
* The "user -> user" attack scenario is mitigated by RSB filling.
*
* 2) Poisoned RSB entry
*
* If the 'next' in-kernel return stack is shorter than 'prev',
* 'next' could be tricked into speculating with a user-poisoned RSB
* entry.
*
* The "user -> kernel" attack scenario is mitigated by SMEP and
* eIBRS.
*
* The "user -> user" scenario, also known as SpectreBHB, requires
* RSB clearing.
*
* So to mitigate all cases, unconditionally fill RSB on context
* switches.
*
* FIXME: Is this pointless for retbleed-affected AMD?
*/
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
@ -1184,28 +1405,29 @@ static void __init spectre_v2_select_mitigation(void)
spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
/*
* Retpoline means the kernel is safe because it has no indirect
* branches. Enhanced IBRS protects firmware too, so, enable restricted
* speculation around firmware calls only when Enhanced IBRS isn't
* supported.
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
* and Enhanced IBRS protect firmware too, so enable IBRS around
* firmware calls only when IBRS / Enhanced IBRS aren't otherwise
* enabled.
*
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
* the user might select retpoline on the kernel command line and if
* the CPU supports Enhanced IBRS, kernel might un-intentionally not
* enable IBRS around firmware calls.
*/
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
/* Set up IBPB and STIBP depending on the general spectre V2 command */
spectre_v2_user_select_mitigation(cmd);
spectre_v2_cmd = cmd;
}
static void update_stibp_msr(void * __unused)
{
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
update_spec_ctrl(val);
}
/* Update x86_spec_ctrl_base in case SMT state changed. */
@ -1421,16 +1643,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
break;
}
/*
* If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
* bit in the mask to allow guests to use the mitigation even in the
* case where the host does not enable it.
*/
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
}
/*
* We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
@ -1448,7 +1660,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
x86_amd_ssb_disable();
} else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
update_spec_ctrl(x86_spec_ctrl_base);
}
}
@ -1653,7 +1865,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
void x86_spec_ctrl_setup_ap(void)
{
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
update_spec_ctrl(x86_spec_ctrl_base);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
x86_amd_ssb_disable();
@ -1888,7 +2100,7 @@ static ssize_t mmio_stale_data_show_state(char *buf)
static char *stibp_state(void)
{
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
return "";
switch (spectre_v2_user_stibp) {
@ -1922,7 +2134,7 @@ static char *pbrsb_eibrs_state(void)
{
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
boot_cpu_has(X86_FEATURE_RETPOLINE))
boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
return ", PBRSB-eIBRS: SW sequence";
else
return ", PBRSB-eIBRS: Vulnerable";
@ -1958,6 +2170,11 @@ static ssize_t srbds_show_state(char *buf)
return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
}
static ssize_t retbleed_show_state(char *buf)
{
return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
}
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@ -2004,6 +2221,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_MMIO_UNKNOWN:
return mmio_stale_data_show_state(buf);
case X86_BUG_RETBLEED:
return retbleed_show_state(buf);
default:
break;
}
@ -2063,4 +2283,9 @@ ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *at
else
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
}
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
}
#endif

View file

@ -954,8 +954,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#define MSBDS_ONLY BIT(5)
#define NO_SWAPGS BIT(6)
#define NO_ITLB_MULTIHIT BIT(7)
#define NO_EIBRS_PBRSB BIT(8)
#define NO_MMIO BIT(9)
#define NO_MMIO BIT(8)
#define NO_EIBRS_PBRSB BIT(9)
#define VULNWL(_vendor, _family, _model, _whitelist) \
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@ -1022,48 +1022,55 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
{}
};
#define VULNBL(vendor, family, model, blacklist) \
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
INTEL_FAM6_##model, steppings, \
X86_FEATURE_ANY, issues)
#define VULNBL_AMD(family, blacklist) \
VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
#define SRBDS BIT(0)
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
#define MMIO BIT(1)
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
#define MMIO_SBDS BIT(2)
/* CPU is affected by RETbleed, speculating where you would not expect it */
#define RETBLEED BIT(3)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_CORE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_ULT, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_GT3E, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_XEON_D,X86_STEPPINGS(0x3, 0x5), MMIO),
VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_XEON_D,X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_CORE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) |
BIT(7) | BIT(0xB), MMIO),
VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0x8), SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0x8), SRBDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_MOBILE, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPINGS(0x1, 0x1), MMIO),
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO),
VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO),
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(CANNONLAKE_MOBILE,X86_STEPPING_ANY, RETBLEED),
VULNBL_INTEL_STEPPINGS(ICELAKE_MOBILE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
VULNBL_AMD(0x15, RETBLEED),
VULNBL_AMD(0x16, RETBLEED),
VULNBL_AMD(0x17, RETBLEED),
{}
};
@ -1169,6 +1176,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
}
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
setup_force_cpu_bug(X86_BUG_RETBLEED);
}
if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
!(ia32_cap & ARCH_CAP_PBRSB_NO))

View file

@ -16,12 +16,17 @@
* respective wildcard entries.
*
* A typical table entry would be to match a specific CPU
* { X86_VENDOR_INTEL, 6, 0x12 }
* or to match a specific CPU feature
* { X86_FEATURE_MATCH(X86_FEATURE_FOOBAR) }
*
* X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_BROADWELL,
* X86_FEATURE_ANY, NULL);
*
* Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY,
* %X86_MODEL_ANY, %X86_FEATURE_ANY or 0 (except for vendor)
* %X86_MODEL_ANY, %X86_FEATURE_ANY (except for vendor)
*
* asm/cpu_device_id.h contains a set of useful macros which are shortcuts
* for various common selections. The above can be shortened to:
*
* X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, NULL);
*
* Arrays used to match for this should also be declared using
* MODULE_DEVICE_TABLE(x86cpu, ...)
@ -53,3 +58,34 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
return NULL;
}
EXPORT_SYMBOL(x86_match_cpu);
static const struct x86_cpu_desc *
x86_match_cpu_with_stepping(const struct x86_cpu_desc *match)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
const struct x86_cpu_desc *m;
for (m = match; m->x86_family | m->x86_model; m++) {
if (c->x86_vendor != m->x86_vendor)
continue;
if (c->x86 != m->x86_family)
continue;
if (c->x86_model != m->x86_model)
continue;
if (c->x86_stepping != m->x86_stepping)
continue;
return m;
}
return NULL;
}
bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table)
{
const struct x86_cpu_desc *res = x86_match_cpu_with_stepping(table);
if (!res || res->x86_microcode_rev > boot_cpu_data.microcode)
return false;
return true;
}
EXPORT_SYMBOL_GPL(x86_cpu_has_min_microcode_rev);

View file

@ -21,6 +21,7 @@ struct cpuid_bit {
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },

View file

@ -55,24 +55,6 @@ void tsx_enable(void)
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
}
static bool __init tsx_ctrl_is_supported(void)
{
u64 ia32_cap = x86_read_arch_cap_msr();
/*
* TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
* MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
*
* TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
* microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
* bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
* MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
* tsx= cmdline requests will do nothing on CPUs without
* MSR_IA32_TSX_CTRL support.
*/
return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
}
static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
{
if (boot_cpu_has_bug(X86_BUG_TAA))
@ -86,9 +68,22 @@ void __init tsx_init(void)
char arg[5] = {};
int ret;
if (!tsx_ctrl_is_supported())
/*
* TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
* MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
*
* TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
* microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
* bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
* MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
* tsx= cmdline requests will do nothing on CPUs without
* MSR_IA32_TSX_CTRL support.
*/
if (!(x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR))
return;
setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL);
ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
if (ret >= 0) {
if (!strcmp(arg, "on")) {

View file

@ -434,7 +434,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
}
if (updmsr)
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
update_spec_ctrl_cond(msr);
}
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)

View file

@ -680,6 +680,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
entry->edx = 0;
/*
* IBRS, IBPB and VIRT_SSBD aren't necessarily present in

View file

@ -759,8 +759,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
ctxt->mode, linear);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
enum x86emul_mode mode)
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
{
ulong linear;
int rc;
@ -770,41 +769,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
}
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
{
return assign_eip(ctxt, dst, ctxt->mode);
u64 efer;
struct desc_struct cs;
u16 selector;
u32 base3;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
/* Real mode. cpu must not have long mode active */
if (efer & EFER_LMA)
return X86EMUL_UNHANDLEABLE;
ctxt->mode = X86EMUL_MODE_REAL;
return X86EMUL_CONTINUE;
}
if (ctxt->eflags & X86_EFLAGS_VM) {
/* Protected/VM86 mode. cpu must not have long mode active */
if (efer & EFER_LMA)
return X86EMUL_UNHANDLEABLE;
ctxt->mode = X86EMUL_MODE_VM86;
return X86EMUL_CONTINUE;
}
if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
return X86EMUL_UNHANDLEABLE;
if (efer & EFER_LMA) {
if (cs.l) {
/* Proper long mode */
ctxt->mode = X86EMUL_MODE_PROT64;
} else if (cs.d) {
/* 32 bit compatibility mode*/
ctxt->mode = X86EMUL_MODE_PROT32;
} else {
ctxt->mode = X86EMUL_MODE_PROT16;
}
} else {
/* Legacy 32 bit / 16 bit mode */
ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
}
return X86EMUL_CONTINUE;
}
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
const struct desc_struct *cs_desc)
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
enum x86emul_mode mode = ctxt->mode;
int rc;
return assign_eip(ctxt, dst);
}
#ifdef CONFIG_X86_64
if (ctxt->mode >= X86EMUL_MODE_PROT16) {
if (cs_desc->l) {
u64 efer = 0;
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
{
int rc = emulator_recalc_and_set_mode(ctxt);
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
mode = X86EMUL_MODE_PROT64;
} else
mode = X86EMUL_MODE_PROT32; /* temporary value */
}
#endif
if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
rc = assign_eip(ctxt, dst, mode);
if (rc == X86EMUL_CONTINUE)
ctxt->mode = mode;
return rc;
if (rc != X86EMUL_CONTINUE)
return rc;
return assign_eip(ctxt, dst);
}
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@ -2192,7 +2221,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
rc = assign_eip_far(ctxt, ctxt->src.val);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
@ -2273,7 +2302,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
rc = assign_eip_far(ctxt, eip);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
@ -2895,6 +2924,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->_eip = rdx;
ctxt->mode = usermode;
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
return X86EMUL_CONTINUE;
@ -3491,7 +3521,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
rc = assign_eip_far(ctxt, ctxt->src.val);
if (rc != X86EMUL_CONTINUE)
goto fail;
@ -3638,11 +3668,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
int cr_num = ctxt->modrm_reg;
int r;
if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
if (cr_num == 0) {
/*
* CR0 write might have updated CR0.PE and/or CR0.PG
* which can affect the cpu's execution mode.
*/
r = emulator_recalc_and_set_mode(ctxt);
if (r != X86EMUL_CONTINUE)
return r;
}
return X86EMUL_CONTINUE;
}

View file

@ -50,6 +50,7 @@
#include <asm/kvm_para.h>
#include <asm/irq_remapping.h>
#include <asm/spec-ctrl.h>
#include <asm/cpu_device_id.h>
#include <asm/virtext.h>
#include "trace.h"
@ -4154,9 +4155,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
msr->data = 0;
switch (msr->index) {
case MSR_F10H_DECFG:
if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
case MSR_AMD64_DE_CFG:
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
break;
default:
return 1;
@ -4258,7 +4259,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0x1E;
}
break;
case MSR_F10H_DECFG:
case MSR_AMD64_DE_CFG:
msr_info->data = svm->msr_decfg;
break;
default:
@ -4445,7 +4446,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_VM_IGNNE:
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
case MSR_F10H_DECFG: {
case MSR_AMD64_DE_CFG: {
struct kvm_msr_entry msr_entry;
msr_entry.index = msr->index;

View file

@ -41,6 +41,7 @@
#include <asm/asm.h>
#include <asm/cpu.h>
#include <asm/cpu_device_id.h>
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/vmx.h>
@ -2132,9 +2133,9 @@ static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
if (!vmx->disable_fb_clear)
return;
rdmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
msr |= FB_CLEAR_DIS;
wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
/* Cache the MSR value to avoid reading it later */
vmx->msr_ia32_mcu_opt_ctrl = msr;
}
@ -2145,7 +2146,7 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
return;
vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
}
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
@ -10759,10 +10760,35 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->hv_timer_armed = false;
}
u64 __always_inline vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx)
{
u64 guestval, hostval = this_cpu_read(x86_spec_ctrl_current);
if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
return 0;
guestval = __rdmsr(MSR_IA32_SPEC_CTRL);
/*
*
* For legacy IBRS, the IBRS bit always needs to be written after
* transitioning from a less privileged predictor mode, regardless of
* whether the guest/host values differ.
*/
if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
guestval != hostval)
native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
barrier_nospec();
return guestval;
}
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4, evmcs_rsp;
u64 spec_ctrl;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi &&
@ -10988,9 +11014,24 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
/* Eliminate branch target predictions from guest mode */
/*
* IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
* the first unbalanced RET after vmexit!
*
* For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
* entries and (in some cases) RSB underflow.
*
* eIBRS has its own protection against poisoned RSB, so it doesn't
* need the RSB filling sequence. But it does need to be enabled, and a
* single call to retire, before the first unbalanced RET.
*
* So no RETs before vmx_spec_ctrl_restore_host() below.
*/
vmexit_fill_RSB();
/* Save this for below */
spec_ctrl = vmx_spec_ctrl_restore_host(vmx);
vmx_enable_fb_clear(vmx);
/*
@ -11009,9 +11050,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* save it.
*/
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
vmx->spec_ctrl = spec_ctrl;
/* All fields are clean at this point */
if (static_branch_unlikely(&enable_evmcs))

View file

@ -1156,7 +1156,7 @@ static u32 msr_based_features[] = {
MSR_IA32_VMX_EPT_VPID_CAP,
MSR_IA32_VMX_VMFUNC,
MSR_F10H_DECFG,
MSR_AMD64_DE_CFG,
MSR_IA32_UCODE_REV,
MSR_IA32_ARCH_CAPABILITIES,
};
@ -9736,9 +9736,9 @@ void kvm_arch_end_assignment(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
bool kvm_arch_has_assigned_device(struct kvm *kvm)
bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
{
return atomic_read(&kvm->arch.assigned_device_count);
return arch_atomic_read(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);

View file

@ -171,9 +171,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PHYSICAL_PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
/*
* Mask out any bits not part of the actual physical
* address, like memory encryption bits.
*/
phys_addr &= PHYSICAL_PAGE_MASK;
retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
pcm, &new_pcm);
if (retval) {

View file

@ -527,15 +527,23 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
static void pm_save_spec_msr(void)
{
u32 spec_msr_id[] = {
MSR_IA32_SPEC_CTRL,
MSR_IA32_TSX_CTRL,
MSR_TSX_FORCE_ABORT,
MSR_IA32_MCU_OPT_CTRL,
MSR_AMD64_LS_CFG,
struct msr_enumeration {
u32 msr_no;
u32 feature;
} msr_enum[] = {
{ MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
{ MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
{ MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
{ MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
{ MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
{ MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
};
int i;
msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
if (boot_cpu_has(msr_enum[i].feature))
msr_build_context(&msr_enum[i].msr_no, 1);
}
}
static int pm_check_save_msr(void)

View file

@ -416,6 +416,8 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
*/
void bfq_schedule_dispatch(struct bfq_data *bfqd)
{
lockdep_assert_held(&bfqd->lock);
if (bfqd->queued != 0) {
bfq_log(bfqd, "schedule dispatch");
blk_mq_run_hw_queues(bfqd->queue, true);
@ -5278,8 +5280,8 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_bfqq_expire(bfqd, bfqq, true, reason);
schedule_dispatch:
spin_unlock_irqrestore(&bfqd->lock, flags);
bfq_schedule_dispatch(bfqd);
spin_unlock_irqrestore(&bfqd->lock, flags);
}
/*

View file

@ -94,8 +94,8 @@ struct opal_dev {
u64 lowest_lba;
size_t pos;
u8 cmd[IO_BUFFER_LENGTH];
u8 resp[IO_BUFFER_LENGTH];
u8 *cmd;
u8 *resp;
struct parsed_resp parsed;
size_t prev_d_len;
@ -2028,6 +2028,8 @@ void free_opal_dev(struct opal_dev *dev)
if (!dev)
return;
clean_opal_dev(dev);
kfree(dev->resp);
kfree(dev->cmd);
kfree(dev);
}
EXPORT_SYMBOL(free_opal_dev);
@ -2040,16 +2042,38 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv)
if (!dev)
return NULL;
/*
* Presumably DMA-able buffers must be cache-aligned. Kmalloc makes
* sure the allocated buffer is DMA-safe in that regard.
*/
dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
if (!dev->cmd)
goto err_free_dev;
dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
if (!dev->resp)
goto err_free_cmd;
INIT_LIST_HEAD(&dev->unlk_lst);
mutex_init(&dev->dev_lock);
dev->data = data;
dev->send_recv = send_recv;
if (check_opal_support(dev) != 0) {
pr_debug("Opal is not supported on this device\n");
kfree(dev);
return NULL;
goto err_free_resp;
}
return dev;
err_free_resp:
kfree(dev->resp);
err_free_cmd:
kfree(dev->cmd);
err_free_dev:
kfree(dev);
return NULL;
}
EXPORT_SYMBOL(init_opal_dev);

View file

@ -317,7 +317,6 @@ int ata_tport_add(struct device *parent,
tport_err:
transport_destroy_device(dev);
put_device(dev);
ata_host_put(ap->host);
return error;
}

View file

@ -292,9 +292,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
outb(inb(0x1F4) & 0x07, 0x1F4);
rt = inb(0x1F3);
rt &= 0x07 << (3 * adev->devno);
rt &= ~(0x07 << (3 * !adev->devno));
if (pio)
rt |= (1 + 3 * pio) << (3 * adev->devno);
rt |= (1 + 3 * pio) << (3 * !adev->devno);
outb(rt, 0x1F3);
udelay(100);
outb(inb(0x1F2) | 0x01, 0x1F2);

View file

@ -669,6 +669,12 @@ ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_retbleed(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@ -679,6 +685,7 @@ static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@ -691,6 +698,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_itlb_multihit.attr,
&dev_attr_srbds.attr,
&dev_attr_mmio_stale_data.attr,
&dev_attr_retbleed.attr,
NULL
};

View file

@ -2770,7 +2770,7 @@ static int init_submitter(struct drbd_device *device)
enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
{
struct drbd_resource *resource = adm_ctx->resource;
struct drbd_connection *connection;
struct drbd_connection *connection, *n;
struct drbd_device *device;
struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
@ -2898,7 +2898,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
out_idr_remove_vol:
idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource:
for_each_connection(connection, resource) {
for_each_connection_safe(connection, n, resource) {
peer_device = idr_remove(&connection->peer_devices, vnr);
if (peer_device)
kref_put(&connection->kref, drbd_destroy_connection);

View file

@ -268,6 +268,9 @@ EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register);
/* common code that starts a transfer */
static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
{
u32 int_mask, status;
bool timeout;
if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) {
dev_dbg(rsb->dev, "RSB transfer still in progress\n");
return -EBUSY;
@ -275,13 +278,23 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
reinit_completion(&rsb->complete);
writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER,
rsb->regs + RSB_INTE);
int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER;
writel(int_mask, rsb->regs + RSB_INTE);
writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB,
rsb->regs + RSB_CTRL);
if (!wait_for_completion_io_timeout(&rsb->complete,
msecs_to_jiffies(100))) {
if (irqs_disabled()) {
timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS,
status, (status & int_mask),
10, 100000);
writel(status, rsb->regs + RSB_INTS);
} else {
timeout = !wait_for_completion_io_timeout(&rsb->complete,
msecs_to_jiffies(100));
status = rsb->status;
}
if (timeout) {
dev_dbg(rsb->dev, "RSB timeout\n");
/* abort the transfer */
@ -293,18 +306,18 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
return -ETIMEDOUT;
}
if (rsb->status & RSB_INTS_LOAD_BSY) {
if (status & RSB_INTS_LOAD_BSY) {
dev_dbg(rsb->dev, "RSB busy\n");
return -EBUSY;
}
if (rsb->status & RSB_INTS_TRANS_ERR) {
if (rsb->status & RSB_INTS_TRANS_ERR_ACK) {
if (status & RSB_INTS_TRANS_ERR) {
if (status & RSB_INTS_TRANS_ERR_ACK) {
dev_dbg(rsb->dev, "RSB slave nack\n");
return -EINVAL;
}
if (rsb->status & RSB_INTS_TRANS_ERR_DATA) {
if (status & RSB_INTS_TRANS_ERR_DATA) {
dev_dbg(rsb->dev, "RSB transfer data error\n");
return -EIO;
}

View file

@ -47,6 +47,7 @@
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");

View file

@ -21,6 +21,7 @@
#include <asm/msr.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
#include "cpufreq_ondemand.h"

View file

@ -252,6 +252,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
ATC_SPIP_BOUNDARY(first->boundary));
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
ATC_DPIP_BOUNDARY(first->boundary));
/* Don't allow CPU to reorder channel enable. */
wmb();
dma_writel(atdma, CHER, atchan->mask);
vdbg_dump_regs(atchan);
@ -312,7 +314,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc;
int ret;
u32 ctrla, dscr, trials;
u32 ctrla, dscr;
unsigned int i;
/*
* If the cookie doesn't match to the currently running transfer then
@ -382,7 +385,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
dscr = channel_readl(atchan, DSCR);
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
u32 new_dscr;
rmb(); /* ensure DSCR is read after CTRLA */
@ -408,7 +411,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
}
if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
if (unlikely(i == ATC_MAX_DSCR_TRIALS))
return -ETIMEDOUT;
/* for the first descriptor we can be more accurate */
@ -556,10 +559,6 @@ static void atc_handle_error(struct at_dma_chan *atchan)
bad_desc = atc_first_active(atchan);
list_del_init(&bad_desc->desc_node);
/* As we are stopped, take advantage to push queued descriptors
* in active_list */
list_splice_init(&atchan->queue, atchan->active_list.prev);
/* Try to restart the controller */
if (!list_empty(&atchan->active_list))
atc_dostart(atchan, atc_first_active(atchan));
@ -680,19 +679,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&atchan->lock, flags);
cookie = dma_cookie_assign(tx);
if (list_empty(&atchan->active_list)) {
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
desc->txd.cookie);
atc_dostart(atchan, desc);
list_add_tail(&desc->desc_node, &atchan->active_list);
} else {
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie);
list_add_tail(&desc->desc_node, &atchan->queue);
}
list_add_tail(&desc->desc_node, &atchan->queue);
spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie);
return cookie;
}
@ -1967,7 +1958,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common);
err = dma_async_device_register(&atdma->dma_common);
if (err) {
dev_err(&pdev->dev, "Unable to register: %d.\n", err);
goto err_dma_async_device_register;
}
/*
* Do not return an error if the dmac node is not present in order to
@ -1987,6 +1982,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common);
err_dma_async_device_register:
dma_pool_destroy(atdma->memset_pool);
err_memset_pool_create:
dma_pool_destroy(atdma->dma_desc_pool);

View file

@ -168,13 +168,13 @@
/* LLI == Linked List Item; aka DMA buffer descriptor */
struct at_lli {
/* values that are not changed by hardware */
dma_addr_t saddr;
dma_addr_t daddr;
u32 saddr;
u32 daddr;
/* value that may get written back: */
u32 ctrla;
u32 ctrla;
/* more values that are not changed by hardware */
u32 ctrlb;
dma_addr_t dscr; /* chain to next lli */
u32 ctrlb;
u32 dscr; /* chain to next lli */
};
/**

View file

@ -906,6 +906,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
tasklet_kill(&xor_dev->irq_tasklet);
clk_disable_unprepare(xor_dev->clk);
clk_disable_unprepare(xor_dev->reg_clk);
return 0;
}

View file

@ -556,7 +556,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
seed = early_memremap(efi.rng_seed, sizeof(*seed));
if (seed != NULL) {
size = seed->size;
size = min(seed->size, EFI_RANDOM_SEED_SIZE);
early_memunmap(seed, sizeof(*seed));
} else {
pr_err("Could not map UEFI random seed!\n");

View file

@ -330,11 +330,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
r = amdgpu_mn_register(bo, args->addr);
if (r)
goto release_object;
}
r = amdgpu_mn_register(bo, args->addr);
if (r)
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,

View file

@ -324,7 +324,8 @@ static const struct dce_audio_registers audio_regs[] = {
audio_regs(2),
audio_regs(3),
audio_regs(4),
audio_regs(5)
audio_regs(5),
audio_regs(6),
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\

View file

@ -95,6 +95,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Acer Switch V 10 (SW5-017) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Anbernic Win600 */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"),

View file

@ -55,13 +55,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
goto err_unpin_pages;
}
ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
if (ret)
goto err_free;
src = obj->mm.pages->sgl;
dst = st->sgl;
for (i = 0; i < obj->mm.pages->nents; i++) {
for (i = 0; i < obj->mm.pages->orig_nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);

View file

@ -243,8 +243,9 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
return ret;
}
static int imx_tve_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
static enum drm_mode_status
imx_tve_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct imx_tve *tve = con_to_tve(connector);
unsigned long rate;

View file

@ -394,7 +394,12 @@ static int __init vc4_drm_register(void)
if (ret)
return ret;
return platform_driver_register(&vc4_platform_driver);
ret = platform_driver_register(&vc4_platform_driver);
if (ret)
platform_unregister_drivers(component_drivers,
ARRAY_SIZE(component_drivers));
return ret;
}
static void __exit vc4_drm_unregister(void)

View file

@ -500,7 +500,7 @@ static int mousevsc_probe(struct hv_device *device,
ret = hid_add_device(hid_dev);
if (ret)
goto probe_err1;
goto probe_err2;
ret = hid_parse(hid_dev);

View file

@ -793,6 +793,7 @@
#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
#define USB_DEVICE_ID_MADCATZ_RAT5 0x1705
#define USB_DEVICE_ID_MADCATZ_RAT9 0x1709
#define USB_DEVICE_ID_MADCATZ_MMO7 0x1713
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076

View file

@ -623,6 +623,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) },
#endif
#if IS_ENABLED(CONFIG_HID_SAMSUNG)
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },

View file

@ -191,6 +191,8 @@ static const struct hid_device_id saitek_devices[] = {
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
.driver_data = SAITEK_RELEASE_MODE_MMO7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7),
.driver_data = SAITEK_RELEASE_MODE_MMO7 },
{ }
};

View file

@ -508,13 +508,17 @@ static void vmbus_add_channel_work(struct work_struct *work)
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
* method.
*
* If vmbus_device_register() fails, the 'device_obj' is freed in
* vmbus_device_release() as called by device_unregister() in the
* error path of vmbus_device_register(). In the outside error
* path, there's no need to free it.
*/
ret = vmbus_device_register(newchannel->device_obj);
if (ret != 0) {
pr_err("unable to add child device object (relid %d)\n",
newchannel->offermsg.child_relid);
kfree(newchannel->device_obj);
goto err_deq_chan;
}

View file

@ -1634,6 +1634,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
ret = device_register(&child_device_obj->device);
if (ret) {
pr_err("Unable to register child device\n");
put_device(&child_device_obj->device);
return ret;
}

View file

@ -255,10 +255,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
*/
if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
if (host_bridge->device == tjmax_pci_table[i].device)
if (host_bridge->device == tjmax_pci_table[i].device) {
pci_dev_put(host_bridge);
return tjmax_pci_table[i].tjmax;
}
}
}
pci_dev_put(host_bridge);
for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
if (strstr(c->x86_model_id, tjmax_table[i].id))
@ -546,6 +549,10 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
{
struct temp_data *tdata = pdata->core_data[indx];
/* if we errored on add then this is already gone */
if (!tdata)
return;
/* Remove the sysfs attributes */
sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);

View file

@ -117,7 +117,7 @@ static int i5500_temp_probe(struct pci_dev *pdev,
u32 tstimer;
s8 tsfsc;
err = pci_enable_device(pdev);
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Failed to enable device\n");
return err;

View file

@ -517,6 +517,7 @@ static void ibmpex_register_bmc(int iface, struct device *dev)
return;
out_register:
list_del(&data->list);
hwmon_device_unregister(data->hwmon_dev);
out_user:
ipmi_destroy_user(data->user);

View file

@ -893,6 +893,7 @@ static struct platform_driver xiic_i2c_driver = {
module_platform_driver(xiic_i2c_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("info@mocean-labs.com");
MODULE_DESCRIPTION("Xilinx I2C bus driver");
MODULE_LICENSE("GPL v2");

View file

@ -58,11 +58,13 @@
#include <linux/tick.h>
#include <trace/events/power.h>
#include <linux/sched.h>
#include <linux/sched/smt.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/nospec-branch.h>
#include <asm/mwait.h>
#include <asm/msr.h>
@ -109,6 +111,12 @@ static struct cpuidle_state *cpuidle_state_table;
*/
#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
/*
* Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
* above.
*/
#define CPUIDLE_FLAG_IBRS BIT(16)
/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
@ -119,6 +127,24 @@ static struct cpuidle_state *cpuidle_state_table;
#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
bool smt_active = sched_smt_active();
u64 spec_ctrl = spec_ctrl_current();
int ret;
if (smt_active)
wrmsrl(MSR_IA32_SPEC_CTRL, 0);
ret = intel_idle(dev, drv, index);
if (smt_active)
wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
return ret;
}
/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
@ -617,7 +643,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C6",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 85,
.target_residency = 200,
.enter = &intel_idle,
@ -625,7 +651,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C7s",
.desc = "MWAIT 0x33",
.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 124,
.target_residency = 800,
.enter = &intel_idle,
@ -633,7 +659,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C8",
.desc = "MWAIT 0x40",
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
@ -641,7 +667,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C9",
.desc = "MWAIT 0x50",
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 480,
.target_residency = 5000,
.enter = &intel_idle,
@ -649,7 +675,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C10",
.desc = "MWAIT 0x60",
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 890,
.target_residency = 5000,
.enter = &intel_idle,
@ -678,7 +704,7 @@ static struct cpuidle_state skx_cstates[] = {
{
.name = "C6",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 133,
.target_residency = 600,
.enter = &intel_idle,
@ -1384,6 +1410,11 @@ static void __init intel_idle_cpuidle_driver_init(void)
drv->states[drv->state_count] = /* structure copy */
cpuidle_state_table[cstate];
if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
drv->states[drv->state_count].enter = intel_idle_ibrs;
}
drv->state_count += 1;
}

View file

@ -617,8 +617,10 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev,
trig->ops = &at91_adc_trigger_ops;
ret = iio_trigger_register(trig);
if (ret)
if (ret) {
iio_trigger_free(trig);
return NULL;
}
return trig;
}

View file

@ -253,14 +253,14 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4403_data *afe = iio_priv(indio_dev);
unsigned int reg = afe4403_channel_values[chan->address];
unsigned int field = afe4403_channel_leds[chan->address];
unsigned int reg, field;
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
reg = afe4403_channel_values[chan->address];
ret = afe4403_read(afe, reg, val);
if (ret)
return ret;
@ -270,6 +270,7 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
field = afe4403_channel_leds[chan->address];
ret = regmap_field_read(afe->fields[field], val);
if (ret)
return ret;

View file

@ -258,20 +258,20 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
unsigned int value_reg = afe4404_channel_values[chan->address];
unsigned int led_field = afe4404_channel_leds[chan->address];
unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
unsigned int value_reg, led_field, offdac_field;
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
value_reg = afe4404_channel_values[chan->address];
ret = regmap_read(afe->regmap, value_reg, val);
if (ret)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
offdac_field = afe4404_channel_offdacs[chan->address];
ret = regmap_field_read(afe->fields[offdac_field], val);
if (ret)
return ret;
@ -281,6 +281,7 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
led_field = afe4404_channel_leds[chan->address];
ret = regmap_field_read(afe->fields[led_field], val);
if (ret)
return ret;
@ -303,19 +304,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
unsigned int led_field = afe4404_channel_leds[chan->address];
unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
unsigned int led_field, offdac_field;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_OFFSET:
offdac_field = afe4404_channel_offdacs[chan->address];
return regmap_field_write(afe->fields[offdac_field], val);
}
break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
led_field = afe4404_channel_leds[chan->address];
return regmap_field_write(afe->fields[led_field], val);
}
break;

View file

@ -61,8 +61,12 @@ int iio_register_sw_trigger_type(struct iio_sw_trigger_type *t)
t->group = configfs_register_default_group(iio_triggers_group, t->name,
&iio_trigger_type_group_type);
if (IS_ERR(t->group))
if (IS_ERR(t->group)) {
mutex_lock(&iio_trigger_types_lock);
list_del(&t->list);
mutex_unlock(&iio_trigger_types_lock);
ret = PTR_ERR(t->group);
}
return ret;
}

View file

@ -238,6 +238,8 @@ config RPR0521
tristate "ROHM RPR0521 ALS and proximity sensor driver"
depends on I2C
select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build support for ROHM's RPR0521
ambient light and proximity sensor device.

View file

@ -55,9 +55,6 @@
#define APDS9960_REG_CONTROL_PGAIN_MASK_SHIFT 2
#define APDS9960_REG_CONFIG_2 0x90
#define APDS9960_REG_CONFIG_2_GGAIN_MASK 0x60
#define APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT 5
#define APDS9960_REG_ID 0x92
#define APDS9960_REG_STATUS 0x93
@ -78,6 +75,9 @@
#define APDS9960_REG_GCONF_1_GFIFO_THRES_MASK_SHIFT 6
#define APDS9960_REG_GCONF_2 0xa3
#define APDS9960_REG_GCONF_2_GGAIN_MASK 0x60
#define APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT 5
#define APDS9960_REG_GOFFSET_U 0xa4
#define APDS9960_REG_GOFFSET_D 0xa5
#define APDS9960_REG_GPULSE 0xa6
@ -396,9 +396,9 @@ static int apds9960_set_pxs_gain(struct apds9960_data *data, int val)
}
ret = regmap_update_bits(data->regmap,
APDS9960_REG_CONFIG_2,
APDS9960_REG_CONFIG_2_GGAIN_MASK,
idx << APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT);
APDS9960_REG_GCONF_2,
APDS9960_REG_GCONF_2_GGAIN_MASK,
idx << APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT);
if (!ret)
data->pxs_gain = idx;
mutex_unlock(&data->lock);

View file

@ -28,13 +28,6 @@ enum {
MS5607,
};
struct ms5611_chip_info {
u16 prom[MS5611_PROM_WORDS_NB];
int (*temp_and_pressure_compensate)(struct ms5611_chip_info *chip_info,
s32 *temp, s32 *pressure);
};
/*
* OverSampling Rate descriptor.
* Warning: cmd MUST be kept aligned on a word boundary (see
@ -53,12 +46,15 @@ struct ms5611_state {
const struct ms5611_osr *pressure_osr;
const struct ms5611_osr *temp_osr;
int (*reset)(struct device *dev);
int (*read_prom_word)(struct device *dev, int index, u16 *word);
int (*read_adc_temp_and_pressure)(struct device *dev,
u16 prom[MS5611_PROM_WORDS_NB];
int (*reset)(struct ms5611_state *st);
int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word);
int (*read_adc_temp_and_pressure)(struct ms5611_state *st,
s32 *temp, s32 *pressure);
struct ms5611_chip_info *chip_info;
int (*compensate_temp_and_pressure)(struct ms5611_state *st, s32 *temp,
s32 *pressure);
struct regulator *vdd;
};

View file

@ -88,8 +88,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
struct ms5611_state *st = iio_priv(indio_dev);
for (i = 0; i < MS5611_PROM_WORDS_NB; i++) {
ret = st->read_prom_word(&indio_dev->dev,
i, &st->chip_info->prom[i]);
ret = st->read_prom_word(st, i, &st->prom[i]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read prom at %d\n", i);
@ -97,7 +96,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
}
}
if (!ms5611_prom_is_valid(st->chip_info->prom, MS5611_PROM_WORDS_NB)) {
if (!ms5611_prom_is_valid(st->prom, MS5611_PROM_WORDS_NB)) {
dev_err(&indio_dev->dev, "PROM integrity check failed\n");
return -ENODEV;
}
@ -111,28 +110,27 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev,
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
ret = st->read_adc_temp_and_pressure(&indio_dev->dev, temp, pressure);
ret = st->read_adc_temp_and_pressure(st, temp, pressure);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read temperature and pressure\n");
return ret;
}
return st->chip_info->temp_and_pressure_compensate(st->chip_info,
temp, pressure);
return st->compensate_temp_and_pressure(st, temp, pressure);
}
static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
static int ms5611_temp_and_pressure_compensate(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
s32 t = *temp, p = *pressure;
s64 off, sens, dt;
dt = t - (chip_info->prom[5] << 8);
off = ((s64)chip_info->prom[2] << 16) + ((chip_info->prom[4] * dt) >> 7);
sens = ((s64)chip_info->prom[1] << 15) + ((chip_info->prom[3] * dt) >> 8);
dt = t - (st->prom[5] << 8);
off = ((s64)st->prom[2] << 16) + ((st->prom[4] * dt) >> 7);
sens = ((s64)st->prom[1] << 15) + ((st->prom[3] * dt) >> 8);
t = 2000 + ((chip_info->prom[6] * dt) >> 23);
t = 2000 + ((st->prom[6] * dt) >> 23);
if (t < 2000) {
s64 off2, sens2, t2;
@ -158,17 +156,17 @@ static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_inf
return 0;
}
static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
static int ms5607_temp_and_pressure_compensate(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
s32 t = *temp, p = *pressure;
s64 off, sens, dt;
dt = t - (chip_info->prom[5] << 8);
off = ((s64)chip_info->prom[2] << 17) + ((chip_info->prom[4] * dt) >> 6);
sens = ((s64)chip_info->prom[1] << 16) + ((chip_info->prom[3] * dt) >> 7);
dt = t - (st->prom[5] << 8);
off = ((s64)st->prom[2] << 17) + ((st->prom[4] * dt) >> 6);
sens = ((s64)st->prom[1] << 16) + ((st->prom[3] * dt) >> 7);
t = 2000 + ((chip_info->prom[6] * dt) >> 23);
t = 2000 + ((st->prom[6] * dt) >> 23);
if (t < 2000) {
s64 off2, sens2, t2, tmp;
@ -199,7 +197,7 @@ static int ms5611_reset(struct iio_dev *indio_dev)
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
ret = st->reset(&indio_dev->dev);
ret = st->reset(st);
if (ret < 0) {
dev_err(&indio_dev->dev, "failed to reset device\n");
return ret;
@ -346,15 +344,6 @@ static int ms5611_write_raw(struct iio_dev *indio_dev,
static const unsigned long ms5611_scan_masks[] = {0x3, 0};
static struct ms5611_chip_info chip_info_tbl[] = {
[MS5611] = {
.temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate,
},
[MS5607] = {
.temp_and_pressure_compensate = ms5607_temp_and_pressure_compensate,
}
};
static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_PRESSURE,
@ -437,7 +426,20 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
struct ms5611_state *st = iio_priv(indio_dev);
mutex_init(&st->lock);
st->chip_info = &chip_info_tbl[type];
switch (type) {
case MS5611:
st->compensate_temp_and_pressure =
ms5611_temp_and_pressure_compensate;
break;
case MS5607:
st->compensate_temp_and_pressure =
ms5607_temp_and_pressure_compensate;
break;
default:
return -EINVAL;
}
st->temp_osr =
&ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1];
st->pressure_osr =

View file

@ -21,17 +21,15 @@
#include "ms5611.h"
static int ms5611_i2c_reset(struct device *dev)
static int ms5611_i2c_reset(struct ms5611_state *st)
{
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
return i2c_smbus_write_byte(st->client, MS5611_RESET);
}
static int ms5611_i2c_read_prom_word(struct device *dev, int index, u16 *word)
static int ms5611_i2c_read_prom_word(struct ms5611_state *st, int index,
u16 *word)
{
int ret;
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = i2c_smbus_read_word_swapped(st->client,
MS5611_READ_PROM_WORD + (index << 1));
@ -58,11 +56,10 @@ static int ms5611_i2c_read_adc(struct ms5611_state *st, s32 *val)
return 0;
}
static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev,
static int ms5611_i2c_read_adc_temp_and_pressure(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
int ret;
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
const struct ms5611_osr *osr = st->temp_osr;
ret = i2c_smbus_write_byte(st->client, osr->cmd);

View file

@ -16,18 +16,17 @@
#include "ms5611.h"
static int ms5611_spi_reset(struct device *dev)
static int ms5611_spi_reset(struct ms5611_state *st)
{
u8 cmd = MS5611_RESET;
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
return spi_write_then_read(st->client, &cmd, 1, NULL, 0);
}
static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word)
static int ms5611_spi_read_prom_word(struct ms5611_state *st, int index,
u16 *word)
{
int ret;
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = spi_w8r16be(st->client, MS5611_READ_PROM_WORD + (index << 1));
if (ret < 0)
@ -38,11 +37,10 @@ static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word)
return 0;
}
static int ms5611_spi_read_adc(struct device *dev, s32 *val)
static int ms5611_spi_read_adc(struct ms5611_state *st, s32 *val)
{
int ret;
u8 buf[3] = { MS5611_READ_ADC };
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = spi_write_then_read(st->client, buf, 1, buf, 3);
if (ret < 0)
@ -53,11 +51,10 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val)
return 0;
}
static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
static int ms5611_spi_read_adc_temp_and_pressure(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
int ret;
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
const struct ms5611_osr *osr = st->temp_osr;
/*
@ -69,7 +66,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
return ret;
usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
ret = ms5611_spi_read_adc(dev, temp);
ret = ms5611_spi_read_adc(st, temp);
if (ret < 0)
return ret;
@ -79,7 +76,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
return ret;
usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
return ms5611_spi_read_adc(dev, pressure);
return ms5611_spi_read_adc(st, pressure);
}
static int ms5611_spi_probe(struct spi_device *spi)
@ -95,7 +92,7 @@ static int ms5611_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
spi->mode = SPI_MODE_0;
spi->max_speed_hz = 20000000;
spi->max_speed_hz = min(spi->max_speed_hz, 20000000U);
spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0)

View file

@ -211,9 +211,13 @@ static int iio_sysfs_trigger_remove(int id)
static int __init iio_sysfs_trig_init(void)
{
int ret;
device_initialize(&iio_sysfs_trig_dev);
dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
return device_add(&iio_sysfs_trig_dev);
ret = device_add(&iio_sysfs_trig_dev);
if (ret)
put_device(&iio_sysfs_trig_dev);
return ret;
}
module_init(iio_sysfs_trig_init);

View file

@ -338,6 +338,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
spin_lock_init(&dev->qpidr.idr_lock);
idr_init(&dev->qpidr.idr);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
if (!dev->iwarp_wq) {
rc = -ENOMEM;
goto err1;
}
}
/* Allocate Status blocks for CNQ */
@ -345,7 +349,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
GFP_KERNEL);
if (!dev->sb_array) {
rc = -ENOMEM;
goto err1;
goto err_destroy_wq;
}
dev->cnq_array = kcalloc(dev->num_cnq,
@ -399,6 +403,9 @@ err3:
kfree(dev->cnq_array);
err2:
kfree(dev->sb_array);
err_destroy_wq:
if (IS_IWARP(dev))
destroy_workqueue(dev->iwarp_wq);
err1:
kfree(dev->sgid_tbl);
return rc;

View file

@ -192,6 +192,7 @@ static const char * const smbus_pnp_ids[] = {
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
"SYN3286", /* HP Laptop 15-da3001TU */
NULL
};

View file

@ -1544,8 +1544,6 @@ static int i8042_probe(struct platform_device *dev)
{
int error;
i8042_platform_device = dev;
if (i8042_reset == I8042_RESET_ALWAYS) {
error = i8042_controller_selftest();
if (error)
@ -1583,7 +1581,6 @@ static int i8042_probe(struct platform_device *dev)
i8042_free_aux_ports(); /* in case KBD failed but AUX not */
i8042_free_irqs();
i8042_controller_reset(false);
i8042_platform_device = NULL;
return error;
}
@ -1593,7 +1590,6 @@ static int i8042_remove(struct platform_device *dev)
i8042_unregister_ports();
i8042_free_irqs();
i8042_controller_reset(false);
i8042_platform_device = NULL;
return 0;
}

View file

@ -804,6 +804,7 @@ int __init dmar_dev_scope_init(void)
info = dmar_alloc_pci_notify_info(dev,
BUS_NOTIFY_ADD_DEVICE);
if (!info) {
pci_dev_put(dev);
return dmar_dev_scope_status;
} else {
dmar_pci_bus_add_dev(info);

View file

@ -970,7 +970,7 @@ nj_release(struct tiger_hw *card)
}
if (card->irq > 0)
free_irq(card->irq, card);
if (card->isac.dch.dev.dev.class)
if (device_is_registered(&card->isac.dch.dev.dev))
mISDN_unregister_device(&card->isac.dch.dev);
for (i = 0; i < 2; i++) {

View file

@ -231,7 +231,7 @@ mISDN_register_device(struct mISDNdevice *dev,
err = get_free_devid();
if (err < 0)
goto error1;
return err;
dev->id = err;
device_initialize(&dev->dev);
@ -242,11 +242,12 @@ mISDN_register_device(struct mISDNdevice *dev,
if (debug & DEBUG_CORE)
printk(KERN_DEBUG "mISDN_register %s %d\n",
dev_name(&dev->dev), dev->id);
dev->dev.class = &mISDN_class;
err = create_stack(dev);
if (err)
goto error1;
dev->dev.class = &mISDN_class;
dev->dev.platform_data = dev;
dev->dev.parent = parent;
dev_set_drvdata(&dev->dev, dev);
@ -258,8 +259,8 @@ mISDN_register_device(struct mISDNdevice *dev,
error3:
delete_stack(dev);
return err;
error1:
put_device(&dev->dev);
return err;
}

View file

@ -97,6 +97,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
if (!entry)
return -ENOMEM;
INIT_LIST_HEAD(&entry->list);
entry->elem = elem;
entry->dev.class = elements_class;
@ -131,7 +132,7 @@ err2:
device_unregister(&entry->dev);
return ret;
err1:
kfree(entry);
put_device(&entry->dev);
return ret;
}
EXPORT_SYMBOL(mISDN_dsp_element_register);

View file

@ -2127,10 +2127,6 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
write_start = ic->committed_section;
write_sections = ic->n_committed_sections;
@ -2466,8 +2462,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
drain_workqueue(ic->commit_wq);
if (ic->mode == 'J') {
if (ic->meta_dev)
queue_work(ic->writer_wq, &ic->writer_work);
queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
dm_integrity_flush_buffers(ic, true);
}

View file

@ -573,7 +573,7 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param)
size_t *needed = needed_param;
*needed += sizeof(struct dm_target_versions);
*needed += strlen(tt->name);
*needed += strlen(tt->name) + 1;
*needed += ALIGN_MASK;
}
@ -628,7 +628,7 @@ static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param
iter_info.old_vers = NULL;
iter_info.vers = vers;
iter_info.flags = 0;
iter_info.end = (char *)vers+len;
iter_info.end = (char *)vers + needed;
/*
* Now loop through filling out the names & versions.

View file

@ -6694,7 +6694,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr)
static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct drxk_state *state = fe->demodulator_priv;
u16 err;
u16 err = 0;
dprintk(1, "\n");

View file

@ -44,6 +44,8 @@ static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
uint8_t *cec_message = cros_ec->event_data.data.cec_message;
unsigned int len = cros_ec->event_size;
if (len > CEC_MAX_MSG_SIZE)
len = CEC_MAX_MSG_SIZE;
cros_ec_cec->rx_msg.len = len;
memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);

View file

@ -116,6 +116,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
cec->rx = STATE_BUSY;
cec->msg.len = status >> 24;
if (cec->msg.len > CEC_MAX_MSG_SIZE)
cec->msg.len = CEC_MAX_MSG_SIZE;
cec->msg.rx_status = CEC_RX_STATUS_OK;
s5p_cec_get_rx_buf(cec, cec->msg.len,
cec->msg.msg);

View file

@ -865,6 +865,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
u32 context_id = vmci_get_context_id();
struct vmci_event_qp ev;
memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);
@ -1476,6 +1477,7 @@ static int qp_notify_peer(bool attach,
* kernel.
*/
memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);

View file

@ -2383,7 +2383,13 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
mmc_power_cycle(host, ocr);
} else {
bit = fls(ocr) - 1;
ocr &= 3 << bit;
/*
* The bit variable represents the highest voltage bit set in
* the OCR register.
* To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
* we must shift the mask '3' with (bit - 1).
*/
ocr &= 3 << (bit - 1);
if (bit != host->ios.vdd)
dev_warn(mmc_dev(host), "exceeding card's volts\n");
}

Some files were not shown because too many files have changed in this diff Show more