Merge tag 'ASB-2023-04-05_4.19-stable' of https://android.googlesource.com/kernel/common into android13-4.19-kona

https://source.android.com/docs/security/bulletin/2023-04-01
CVE-2022-4696
CVE-2023-20941

* tag 'ASB-2023-04-05_4.19-stable' of https://android.googlesource.com/kernel/common:
  UPSTREAM: ext4: fix kernel BUG in 'ext4_write_inline_data_end()'
  UPSTREAM: fsverity: don't drop pagecache at end of FS_IOC_ENABLE_VERITY
  UPSTREAM: fsverity: Remove WQ_UNBOUND from fsverity read workqueue
  BACKPORT: blk-mq: clear stale request in tags->rq[] before freeing one request pool
  Linux 4.19.279
  HID: uhid: Over-ride the default maximum data buffer value with our own
  HID: core: Provide new max_buffer_size attribute to over-ride the default
  serial: 8250_em: Fix UART port type
  drm/i915: Don't use stolen memory for ring buffers with LLC
  x86/mm: Fix use of uninitialized buffer in sme_enable()
  fbdev: stifb: Provide valid pixelclock and add fb_check_var() checks
  ftrace: Fix invalid address access in lookup_rec() when index is 0
  tracing: Make tracepoint lockdep check actually test something
  tracing: Check field value in hist_field_name()
  sh: intc: Avoid spurious sizeof-pointer-div warning
  drm/amdkfd: Fix an illegal memory access
  ext4: fix task hung in ext4_xattr_delete_inode
  ext4: fail ext4_iget if special inode unallocated
  jffs2: correct logic when creating a hole in jffs2_write_begin
  mmc: atmel-mci: fix race between stop command and start of next command
  media: m5mols: fix off-by-one loop termination error
  hwmon: (xgene) Fix use after free bug in xgene_hwmon_remove due to race condition
  hwmon: (adt7475) Fix masking of hysteresis registers
  hwmon: (adt7475) Display smoothing attributes in correct order
  ethernet: sun: add check for the mdesc_grab()
  net/iucv: Fix size of interrupt data
  net: usb: smsc75xx: Move packet length check to prevent kernel panic in skb_pull
  ipv4: Fix incorrect table ID in IOCTL path
  block: sunvdc: add check for mdesc_grab() returning NULL
  nvmet: avoid potential UAF in nvmet_req_complete()
  net: usb: smsc75xx: Limit packet length to skb->len
  nfc: st-nci: Fix use after free bug in ndlc_remove due to race condition
  net: phy: smsc: bail out in lan87xx_read_status if genphy_read_status fails
  net: tunnels: annotate lockless accesses to dev->needed_headroom
  qed/qed_dev: guard against a possible division by zero
  nfc: pn533: initialize struct pn533_out_arg properly
  tcp: tcp_make_synack() can be called from process context
  clk: HI655X: select REGMAP instead of depending on it
  fs: sysfs_emit_at: Remove PAGE_SIZE alignment check
  ext4: fix cgroup writeback accounting with fs-layer encryption
  UPSTREAM: ext4: fix another off-by-one fsmap error on 1k block filesystems
  Linux 4.19.278
  ila: do not generate empty messages in ila_xlat_nl_cmd_get_mapping()
  nfc: fdp: add null check of devm_kmalloc_array in fdp_nci_i2c_read_device_properties
  net: caif: Fix use-after-free in cfusbl_device_notify()
  drm/i915: Don't use BAR mappings for ring buffers with LLC
  tipc: improve function tipc_wait_for_cond()
  media: ov5640: Fix analogue gain control
  PCI: Add SolidRun vendor ID
  macintosh: windfarm: Use unsigned type for 1-bit bitfields
  alpha: fix R_ALPHA_LITERAL reloc for large modules
  MIPS: Fix a compilation issue
  Revert "spi: mt7621: Fix an error message in mt7621_spi_probe()"
  scsi: core: Remove the /proc/scsi/${proc_name} directory earlier
  kbuild: generate modules.order only in directories visited by obj-y/m
  kbuild: fix false-positive need-builtin calculation
  udf: Detect system inodes linked into directory hierarchy
  udf: Preserve link count of system files
  udf: Remove pointless union in udf_inode_info
  udf: reduce leakage of blocks related to named streams
  udf: Explain handling of load_nls() failure
  nfc: change order inside nfc_se_io error path
  ext4: zero i_disksize when initializing the bootloader inode
  ext4: fix WARNING in ext4_update_inline_data
  ext4: move where set the MAY_INLINE_DATA flag is set
  ext4: fix another off-by-one fsmap error on 1k block filesystems
  ext4: fix RENAME_WHITEOUT handling for inline directories
  x86/CPU/AMD: Disable XSAVES on AMD family 0x17
  fs: prevent out-of-bounds array speculation when closing a file descriptor
  Linux 4.19.277
  staging: rtl8192e: Remove call_usermodehelper starting RadioPower.sh
  staging: rtl8192e: Remove function ..dm_check_ac_dc_power calling a script
  wifi: cfg80211: Partial revert "wifi: cfg80211: Fix use after free for wext"
  Linux 4.19.276
  thermal: intel: powerclamp: Fix cur_state for multi package system
  f2fs: fix cgroup writeback accounting with fs-layer encryption
  media: uvcvideo: Fix race condition with usb_kill_urb
  media: uvcvideo: Provide sync and async uvc_ctrl_status_event
  tcp: Fix listen() regression in 4.19.270
  s390/setup: init jump labels before command line parsing
  s390/maccess: add no DAT mode to kernel_write
  Bluetooth: hci_sock: purge socket queues in the destruct() callback
  phy: rockchip-typec: Fix unsigned comparison with less than zero
  usb: uvc: Enumerate valid values for color matching
  USB: ene_usb6250: Allocate enough memory for full object
  usb: host: xhci: mvebu: Iterate over array indexes instead of using pointer math
  iio: accel: mma9551_core: Prevent uninitialized variable in mma9551_read_config_word()
  iio: accel: mma9551_core: Prevent uninitialized variable in mma9551_read_status_word()
  tools/iio/iio_utils:fix memory leak
  mei: bus-fixup:upon error print return values of send and receive
  tty: serial: fsl_lpuart: disable the CTS when send break signal
  tty: fix out-of-bounds access in tty_driver_lookup_tty()
  media: uvcvideo: Silence memcpy() run-time false positive warnings
  media: uvcvideo: Handle errors from calls to usb_string
  media: uvcvideo: Handle cameras with invalid descriptors
  firmware/efi sysfb_efi: Add quirk for Lenovo IdeaPad Duet 3
  tracing: Add NULL checks for buffer in ring_buffer_free_read_page()
  thermal: intel: quark_dts: fix error pointer dereference
  scsi: ipr: Work around fortify-string warning
  vc_screen: modify vcs_size() handling in vcs_read()
  tcp: tcp_check_req() can be called from process context
  ARM: dts: spear320-hmi: correct STMPE GPIO compatible
  nfc: fix memory leak of se_io context in nfc_genl_se_io
  9p/rdma: unmap receive dma buffer in rdma_request()/post_recv()
  9p/xen: fix connection sequence
  9p/xen: fix version parsing
  net: fix __dev_kfree_skb_any() vs drop monitor
  netfilter: ctnetlink: fix possible refcount leak in ctnetlink_create_conntrack()
  watchdog: pcwd_usb: Fix attempting to access uninitialized memory
  watchdog: Fix kmemleak in watchdog_cdev_register
  watchdog: at91sam9_wdt: use devm_request_irq to avoid missing free_irq() in error path
  x86: um: vdso: Add '%rcx' and '%r11' to the syscall clobber list
  ubi: ubi_wl_put_peb: Fix infinite loop when wear-leveling work failed
  ubi: Fix UAF wear-leveling entry in eraseblk_count_seq_show()
  ubifs: ubifs_writepage: Mark page dirty after writing inode failed
  ubifs: dirty_cow_znode: Fix memleak in error handling path
  ubifs: Re-statistic cleaned znode count if commit failed
  ubi: Fix possible null-ptr-deref in ubi_free_volume()
  ubi: Fix unreferenced object reported by kmemleak in ubi_resize_volume()
  ubi: Fix use-after-free when volume resizing failed
  ubifs: Reserve one leb for each journal head while doing budget
  ubifs: do_rename: Fix wrong space budget when target inode's nlink > 1
  ubifs: Fix wrong dirty space budget for dirty inode
  ubifs: Rectify space budget for ubifs_xrename()
  ubifs: Rectify space budget for ubifs_symlink() if symlink is encrypted
  ubi: ensure that VID header offset + VID header size <= alloc, size
  um: vector: Fix memory leak in vector_config
  pwm: stm32-lp: fix the check on arr and cmp registers update
  fs/jfs: fix shift exponent db_agl2size negative
  net/sched: Retire tcindex classifier
  kbuild: Port silent mode detection to future gnu make.
  wifi: ath9k: use proper statements in conditionals
  drm/radeon: Fix eDP for single-display iMac11,2
  PCI: Avoid FLR for AMD FCH AHCI adapters
  scsi: ses: Fix slab-out-of-bounds in ses_intf_remove()
  scsi: ses: Fix possible desc_ptr out-of-bounds accesses
  scsi: ses: Fix possible addl_desc_ptr out-of-bounds accesses
  scsi: ses: Fix slab-out-of-bounds in ses_enclosure_data_process()
  scsi: ses: Don't attach if enclosure has no components
  scsi: qla2xxx: Fix erroneous link down
  scsi: qla2xxx: Fix link failure in NPIV environment
  ktest.pl: Add RUN_TIMEOUT option with default unlimited
  ktest.pl: Fix missing "end_monitor" when machine check fails
  ktest.pl: Give back console on Ctrt^C on monitor
  media: ipu3-cio2: Fix PM runtime usage_count in driver unbind
  mips: fix syscall_get_nr
  alpha: fix FEN fault handling
  rbd: avoid use-after-free in do_rbd_add() when rbd_dev_create() fails
  ARM: dts: exynos: correct TMU phandle in Odroid XU
  ARM: dts: exynos: correct TMU phandle in Exynos4
  dm flakey: don't corrupt the zero page
  dm flakey: fix logic when corrupting a bio
  wifi: cfg80211: Fix use after free for wext
  wifi: rtl8xxxu: Use a longer retry limit of 48
  ext4: refuse to create ea block when umounted
  ext4: optimize ea_inode block expansion
  ALSA: ice1712: Do not left ice->gpio_mutex locked in aureon_add_controls()
  irqdomain: Drop bogus fwspec-mapping error handling
  irqdomain: Fix disassociation race
  irqdomain: Fix association race
  ima: Align ima_file_mmap() parameters with mmap_file LSM hook
  Documentation/hw-vuln: Document the interaction between IBRS and STIBP
  x86/speculation: Allow enabling STIBP with legacy IBRS
  x86/microcode/AMD: Fix mixed steppings support
  x86/microcode/AMD: Add a @cpu parameter to the reloading functions
  x86/microcode/amd: Remove load_microcode_amd()'s bsp parameter
  x86/kprobes: Fix arch_check_optimized_kprobe check within optimized_kprobe range
  x86/kprobes: Fix __recover_optprobed_insn check optimizing logic
  x86/reboot: Disable SVM, not just VMX, when stopping CPUs
  x86/reboot: Disable virtualization in an emergency if SVM is supported
  x86/crash: Disable virt in core NMI crash handler to avoid double shootdown
  x86/virt: Force GIF=1 prior to disabling SVM (for reboot flows)
  udf: Fix file corruption when appending just after end of preallocated extent
  udf: Do not update file length for failed writes to inline files
  udf: Do not bother merging very long extents
  udf: Truncate added extents on failed expansion
  ocfs2: fix non-auto defrag path not working issue
  ocfs2: fix defrag path triggering jbd2 ASSERT
  f2fs: fix information leak in f2fs_move_inline_dirents()
  fs: hfsplus: fix UAF issue in hfsplus_put_super
  hfs: fix missing hfs_bnode_get() in __hfs_bnode_create
  ARM: dts: exynos: correct HDMI phy compatible in Exynos4
  s390/kprobes: fix current_kprobe never cleared after kprobes reenter
  s390/kprobes: fix irq mask clobbering on kprobe reenter from post_handler
  s390: discard .interp section
  rtc: pm8xxx: fix set-alarm race
  firmware: coreboot: framebuffer: Ignore reserved pixel color bits
  wifi: rtl8xxxu: fixing transmisison failure for rtl8192eu
  dm cache: add cond_resched() to various workqueue loops
  dm thin: add cond_resched() to various workqueue loops
  pinctrl: at91: use devm_kasprintf() to avoid potential leaks
  regulator: s5m8767: Bounds check id indexing into arrays
  regulator: max77802: Bounds check regulator id against opmode
  ASoC: kirkwood: Iterate over array indexes instead of using pointer math
  docs/scripts/gdb: add necessary make scripts_gdb step
  drm/msm/dsi: Add missing check for alloc_ordered_workqueue
  drm/radeon: free iio for atombios when driver shutdown
  drm/amd/display: Fix potential null-deref in dm_resume
  net/mlx5: fw_tracer: Fix debug print
  ACPI: video: Fix Lenovo Ideapad Z570 DMI match
  m68k: Check syscall_trace_enter() return code
  net: bcmgenet: Add a check for oversized packets
  ACPI: Don't build ACPICA with '-Os'
  inet: fix fast path in __inet_hash_connect()
  wifi: brcmfmac: ensure CLM version is null-terminated to prevent stack-out-of-bounds
  x86/bugs: Reset speculation control settings on init
  timers: Prevent union confusion from unexpected restart_syscall()
  thermal: intel: Fix unsigned comparison with less than zero
  rcu: Suppress smp_processor_id() complaint in synchronize_rcu_expedited_wait()
  wifi: brcmfmac: Fix potential stack-out-of-bounds in brcmf_c_preinit_dcmds()
  ARM: dts: exynos: Use Exynos5420 compatible for the MIPI video phy
  udf: Define EFSCORRUPTED error code
  rpmsg: glink: Avoid infinite loop on intent for missing channel
  media: usb: siano: Fix use after free bugs caused by do_submit_urb
  media: i2c: ov7670: 0 instead of -EINVAL was returned
  media: rc: Fix use-after-free bugs caused by ene_tx_irqsim()
  media: i2c: ov772x: Fix memleak in ov772x_probe()
  powerpc: Remove linker flag from KBUILD_AFLAGS
  media: platform: ti: Add missing check for devm_regulator_get
  MIPS: vpe-mt: drop physical_memsize
  powerpc/rtas: ensure 4KB alignment for rtas_data_buf
  powerpc/rtas: make all exports GPL
  powerpc/pseries/lparcfg: add missing RTAS retry status handling
  clk: Honor CLK_OPS_PARENT_ENABLE in clk_core_is_enabled()
  powerpc/powernv/ioda: Skip unallocated resources when mapping to PE
  Input: ads7846 - don't check penirq immediately for 7845
  Input: ads7846 - don't report pressure for ads7845
  mtd: rawnand: sunxi: Fix the size of the last OOB region
  mfd: pcf50633-adc: Fix potential memleak in pcf50633_adc_async_read()
  selftests/ftrace: Fix bash specific "==" operator
  sparc: allow PM configs for sparc32 COMPILE_TEST
  perf tools: Fix auto-complete on aarch64
  perf llvm: Fix inadvertent file creation
  gfs2: jdata writepage fix
  cifs: Fix warning and UAF when destroy the MR list
  cifs: Fix lost destroy smbd connection when MR allocate failed
  nfsd: fix race to check ls_layouts
  dm: remove flush_scheduled_work() during local_exit()
  hwmon: (mlxreg-fan) Return zero speed for broken fan
  spi: bcm63xx-hsspi: Fix multi-bit mode setting
  spi: bcm63xx-hsspi: fix pm_runtime
  scsi: aic94xx: Add missing check for dma_map_single()
  hwmon: (ltc2945) Handle error case in ltc2945_value_store
  gpio: vf610: connect GPIO label to dev name
  ASoC: soc-compress.c: fixup private_data on snd_soc_new_compress()
  drm/mediatek: Clean dangling pointer on bind error path
  drm/mediatek: Drop unbalanced obj unref
  gpu: host1x: Don't skip assigning syncpoints to channels
  drm/msm/dpu: Add check for pstates
  drm/msm: use strscpy instead of strncpy
  drm/mipi-dsi: Fix byte order of 16-bit DCS set/get brightness
  ALSA: hda/ca0132: minor fix for allocation size
  pinctrl: rockchip: Fix refcount leak in rockchip_pinctrl_parse_groups
  pinctrl: pinctrl-rockchip: Fix a bunch of kerneldoc misdemeanours
  drm/msm/hdmi: Add missing check for alloc_ordered_workqueue
  gpu: ipu-v3: common: Add of_node_put() for reference returned by of_graph_get_port_by_id()
  drm/vc4: dpi: Fix format mapping for RGB565
  drm/vc4: dpi: Add option for inverting pixel clock and output enable
  drm: Clarify definition of the DRM_BUS_FLAG_(PIXDATA|SYNC)_* macros
  drm/bridge: megachips: Fix error handling in i2c_register_driver()
  drm: mxsfb: DRM_MXSFB should depend on ARCH_MXS || ARCH_MXC
  selftest: fib_tests: Always cleanup before exit
  irqchip/irq-bcm7120-l2: Set IRQ_LEVEL for level triggered interrupts
  irqchip/irq-brcmstb-l2: Set IRQ_LEVEL for level triggered interrupts
  can: esd_usb: Move mislocated storage of SJA1000_ECC_SEG bits in case of a bus error
  wifi: mac80211: make rate u32 in sta_set_rate_info_rx()
  crypto: crypto4xx - Call dma_unmap_page when done
  wifi: mwifiex: fix loop iterator in mwifiex_update_ampdu_txwinsize()
  wifi: iwl4965: Add missing check for create_singlethread_workqueue()
  wifi: iwl3945: Add missing check for create_singlethread_workqueue
  RISC-V: time: initialize hrtimer based broadcast clock event device
  m68k: /proc/hardware should depend on PROC_FS
  crypto: rsa-pkcs1pad - Use akcipher_request_complete
  rds: rds_rm_zerocopy_callback() correct order for list_add_tail()
  libbpf: Fix alen calculation in libbpf_nla_dump_errormsg()
  Bluetooth: L2CAP: Fix potential user-after-free
  irqchip/irq-mvebu-gicp: Fix refcount leak in mvebu_gicp_probe
  irqchip/alpine-msi: Fix refcount leak in alpine_msix_init_domains
  net/mlx5: Enhance debug print in page allocation failure
  powercap: fix possible name leak in powercap_register_zone()
  crypto: seqiv - Handle EBUSY correctly
  ACPI: battery: Fix missing NUL-termination with large strings
  wifi: ath9k: Fix potential stack-out-of-bounds write in ath9k_wmi_rsp_callback()
  wifi: ath9k: hif_usb: clean up skbs if ath9k_hif_usb_rx_stream() fails
  ath9k: htc: clean up statistics macros
  ath9k: hif_usb: simplify if-if to if-else
  wifi: ath9k: htc_hst: free skb in ath9k_htc_rx_msg() if there is no callback function
  wifi: orinoco: check return value of hermes_write_wordrec()
  ACPICA: nsrepair: handle cases without a return value correctly
  lib/mpi: Fix buffer overrun when SG is too long
  genirq: Fix the return type of kstat_cpu_irqs_sum()
  ACPICA: Drop port I/O validation for some regions
  wifi: wl3501_cs: don't call kfree_skb() under spin_lock_irqsave()
  wifi: libertas: cmdresp: don't call kfree_skb() under spin_lock_irqsave()
  wifi: libertas: main: don't call kfree_skb() under spin_lock_irqsave()
  wifi: libertas: if_usb: don't call kfree_skb() under spin_lock_irqsave()
  wifi: libertas_tf: don't call kfree_skb() under spin_lock_irqsave()
  wifi: brcmfmac: unmap dma buffer in brcmf_msgbuf_alloc_pktid()
  wifi: brcmfmac: fix potential memory leak in brcmf_netdev_start_xmit()
  wifi: ipw2200: fix memory leak in ipw_wdev_init()
  wifi: ipw2x00: don't call dev_kfree_skb() under spin_lock_irqsave()
  ipw2x00: switch from 'pci_' to 'dma_' API
  wifi: rtlwifi: Fix global-out-of-bounds bug in _rtl8812ae_phy_set_txpower_limit()
  rtlwifi: fix -Wpointer-sign warning
  wifi: rtl8xxxu: don't call dev_kfree_skb() under spin_lock_irqsave()
  wifi: libertas: fix memory leak in lbs_init_adapter()
  wifi: rsi: Fix memory leak in rsi_coex_attach()
  block: bio-integrity: Copy flags when bio_integrity_payload is cloned
  blk-mq: remove stale comment for blk_mq_sched_mark_restart_hctx
  arm64: dts: mediatek: mt7622: Add missing pwm-cells to pwm node
  arm64: dts: amlogic: meson-gxl: add missing unit address to eth-phy-mux node name
  arm64: dts: amlogic: meson-gx: add missing unit address to rng node name
  arm64: dts: amlogic: meson-gx: add missing SCPI sensors compatible
  arm64: dts: amlogic: meson-axg: fix SCPI clock dvfs node name
  arm64: dts: meson-axg: enable SCPI
  arm64: dts: amlogic: meson-gx: fix SCPI clock dvfs node name
  ARM: imx: Call ida_simple_remove() for ida_simple_get
  ARM: dts: exynos: correct wr-active property in Exynos3250 Rinato
  ARM: OMAP1: call platform_device_put() in error case in omap1_dm_timer_init()
  arm64: dts: meson-gx: Fix the SCPI DVFS node name and unit address
  arm64: dts: meson-gx: Fix Ethernet MAC address unit name
  ARM: zynq: Fix refcount leak in zynq_early_slcr_init
  ARM: OMAP2+: Fix memory leak in realtime_counter_init()
  HID: asus: use spinlock to safely schedule workers
  HID: asus: use spinlock to protect concurrent accesses
  HID: asus: Remove check for same LED brightness on set

Change-Id: Ie09175b59aef5de140e476316d94097cac7a3031
This commit is contained in:
Michael Bestas 2023-04-06 14:00:28 +03:00
commit e4b3323f61
No known key found for this signature in database
GPG key ID: CC95044519BE6669
300 changed files with 2054 additions and 1879 deletions

View file

@ -479,8 +479,16 @@ Spectre variant 2
On Intel Skylake-era systems the mitigation covers most, but not all, On Intel Skylake-era systems the mitigation covers most, but not all,
cases. See :ref:`[3] <spec_ref3>` for more details. cases. See :ref:`[3] <spec_ref3>` for more details.
On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced On CPUs with hardware mitigation for Spectre variant 2 (e.g. IBRS
IBRS on x86), retpoline is automatically disabled at run time. or enhanced IBRS on x86), retpoline is automatically disabled at run time.
Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
boot, by setting the IBRS bit, and they're automatically protected against
Spectre v2 variant attacks, including cross-thread branch target injections
on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
Legacy IBRS systems clear the IBRS bit on exit to userspace and
therefore explicitly enable STIBP for that
The retpoline mitigation is turned on by default on vulnerable The retpoline mitigation is turned on by default on vulnerable
CPUs. It can be forced on or off by the administrator CPUs. It can be forced on or off by the administrator
@ -504,9 +512,12 @@ Spectre variant 2
For Spectre variant 2 mitigation, individual user programs For Spectre variant 2 mitigation, individual user programs
can be compiled with return trampolines for indirect branches. can be compiled with return trampolines for indirect branches.
This protects them from consuming poisoned entries in the branch This protects them from consuming poisoned entries in the branch
target buffer left by malicious software. Alternatively, the target buffer left by malicious software.
programs can disable their indirect branch speculation via prctl()
(See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`). On legacy IBRS systems, at return to userspace, implicit STIBP is disabled
because the kernel clears the IBRS bit. In this case, the userspace programs
can disable indirect branch speculation via prctl() (See
:ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
On x86, this will turn on STIBP to guard against attacks from the On x86, this will turn on STIBP to guard against attacks from the
sibling thread when the user program is running, and use IBPB to sibling thread when the user program is running, and use IBPB to
flush the branch target buffer when switching to/from the program. flush the branch target buffer when switching to/from the program.

View file

@ -39,6 +39,10 @@ Setup
this mode. In this case, you should build the kernel with this mode. In this case, you should build the kernel with
CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR. CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR.
- Build the gdb scripts (required on kernels v5.1 and above)::
make scripts_gdb
- Enable the gdb stub of QEMU/KVM, either - Enable the gdb stub of QEMU/KVM, either
- at VM startup time by appending "-s" to the QEMU command line - at VM startup time by appending "-s" to the QEMU command line

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 275 SUBLEVEL = 279
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"
@ -88,10 +88,17 @@ endif
# If the user is running make -s (silent mode), suppress echoing of # If the user is running make -s (silent mode), suppress echoing of
# commands # commands
# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),) ifeq ($(filter 3.%,$(MAKE_VERSION)),)
quiet=silent_ silence:=$(findstring s,$(firstword -$(MAKEFLAGS)))
tools_silent=s else
silence:=$(findstring s,$(filter-out --%,$(MAKEFLAGS)))
endif
ifeq ($(silence),s)
quiet=silent_
tools_silent=s
endif endif
export quiet Q KBUILD_VERBOSE export quiet Q KBUILD_VERBOSE

View file

@ -158,10 +158,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
/* The small sections were sorted to the end of the segment.
The following should definitely cover them. */
gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
got = sechdrs[me->arch.gotsecindex].sh_addr; got = sechdrs[me->arch.gotsecindex].sh_addr;
gp = got + 0x8000;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
unsigned long r_sym = ELF64_R_SYM (rela[i].r_info); unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);

View file

@ -235,7 +235,21 @@ do_entIF(unsigned long type, struct pt_regs *regs)
{ {
int signo, code; int signo, code;
if ((regs->ps & ~IPL_MAX) == 0) { if (type == 3) { /* FEN fault */
/* Irritating users can call PAL_clrfen to disable the
FPU for the process. The kernel will then trap in
do_switch_stack and undo_switch_stack when we try
to save and restore the FP registers.
Given that GCC by default generates code that uses the
FP registers, PAL_clrfen is not useful except for DoS
attacks. So turn the bleeding FPU back on and be done
with it. */
current_thread_info()->pcb.flags |= 1;
__reload_thread(&current_thread_info()->pcb);
return;
}
if (!user_mode(regs)) {
if (type == 1) { if (type == 1) {
const unsigned int *data const unsigned int *data
= (const unsigned int *) regs->pc; = (const unsigned int *) regs->pc;
@ -368,20 +382,6 @@ do_entIF(unsigned long type, struct pt_regs *regs)
} }
break; break;
case 3: /* FEN fault */
/* Irritating users can call PAL_clrfen to disable the
FPU for the process. The kernel will then trap in
do_switch_stack and undo_switch_stack when we try
to save and restore the FP registers.
Given that GCC by default generates code that uses the
FP registers, PAL_clrfen is not useful except for DoS
attacks. So turn the bleeding FPU back on and be done
with it. */
current_thread_info()->pcb.flags |= 1;
__reload_thread(&current_thread_info()->pcb);
return;
case 5: /* illoc */ case 5: /* illoc */
default: /* unexpected instruction-fault type */ default: /* unexpected instruction-fault type */
; ;

View file

@ -237,7 +237,7 @@
i80-if-timings { i80-if-timings {
cs-setup = <0>; cs-setup = <0>;
wr-setup = <0>; wr-setup = <0>;
wr-act = <1>; wr-active = <1>;
wr-hold = <0>; wr-hold = <0>;
}; };
}; };

View file

@ -10,7 +10,7 @@
/ { / {
thermal-zones { thermal-zones {
cpu_thermal: cpu-thermal { cpu_thermal: cpu-thermal {
thermal-sensors = <&tmu 0>; thermal-sensors = <&tmu>;
polling-delay-passive = <0>; polling-delay-passive = <0>;
polling-delay = <0>; polling-delay = <0>;
trips { trips {

View file

@ -611,7 +611,7 @@
status = "disabled"; status = "disabled";
hdmi_i2c_phy: hdmiphy@38 { hdmi_i2c_phy: hdmiphy@38 {
compatible = "exynos4210-hdmiphy"; compatible = "samsung,exynos4210-hdmiphy";
reg = <0x38>; reg = <0x38>;
}; };
}; };

View file

@ -113,7 +113,6 @@
}; };
&cpu0_thermal { &cpu0_thermal {
thermal-sensors = <&tmu_cpu0 0>;
polling-delay-passive = <0>; polling-delay-passive = <0>;
polling-delay = <0>; polling-delay = <0>;

View file

@ -530,7 +530,7 @@
}; };
mipi_phy: mipi-video-phy { mipi_phy: mipi-video-phy {
compatible = "samsung,s5pv210-mipi-video-phy"; compatible = "samsung,exynos5420-mipi-video-phy";
syscon = <&pmu_system_controller>; syscon = <&pmu_system_controller>;
#phy-cells = <1>; #phy-cells = <1>;
}; };

View file

@ -248,7 +248,7 @@
irq-trigger = <0x1>; irq-trigger = <0x1>;
stmpegpio: stmpe-gpio { stmpegpio: stmpe-gpio {
compatible = "stmpe,gpio"; compatible = "st,stmpe-gpio";
reg = <0>; reg = <0>;
gpio-controller; gpio-controller;
#gpio-cells = <2>; #gpio-cells = <2>;

View file

@ -105,6 +105,7 @@ struct mmdc_pmu {
cpumask_t cpu; cpumask_t cpu;
struct hrtimer hrtimer; struct hrtimer hrtimer;
unsigned int active_events; unsigned int active_events;
int id;
struct device *dev; struct device *dev;
struct perf_event *mmdc_events[MMDC_NUM_COUNTERS]; struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
struct hlist_node node; struct hlist_node node;
@ -445,8 +446,6 @@ static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc, static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
void __iomem *mmdc_base, struct device *dev) void __iomem *mmdc_base, struct device *dev)
{ {
int mmdc_num;
*pmu_mmdc = (struct mmdc_pmu) { *pmu_mmdc = (struct mmdc_pmu) {
.pmu = (struct pmu) { .pmu = (struct pmu) {
.task_ctx_nr = perf_invalid_context, .task_ctx_nr = perf_invalid_context,
@ -463,15 +462,16 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
.active_events = 0, .active_events = 0,
}; };
mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL); pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
return mmdc_num; return pmu_mmdc->id;
} }
static int imx_mmdc_remove(struct platform_device *pdev) static int imx_mmdc_remove(struct platform_device *pdev)
{ {
struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev); struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
perf_pmu_unregister(&pmu_mmdc->pmu); perf_pmu_unregister(&pmu_mmdc->pmu);
iounmap(pmu_mmdc->mmdc_base); iounmap(pmu_mmdc->mmdc_base);
@ -485,7 +485,6 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
{ {
struct mmdc_pmu *pmu_mmdc; struct mmdc_pmu *pmu_mmdc;
char *name; char *name;
int mmdc_num;
int ret; int ret;
const struct of_device_id *of_id = const struct of_device_id *of_id =
of_match_device(imx_mmdc_dt_ids, &pdev->dev); of_match_device(imx_mmdc_dt_ids, &pdev->dev);
@ -508,14 +507,14 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
cpuhp_mmdc_state = ret; cpuhp_mmdc_state = ret;
} }
mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk; if (ret < 0)
if (mmdc_num == 0) goto pmu_free;
name = "mmdc";
else
name = devm_kasprintf(&pdev->dev,
GFP_KERNEL, "mmdc%d", mmdc_num);
name = devm_kasprintf(&pdev->dev,
GFP_KERNEL, "mmdc%d", ret);
pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data; pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC, hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
@ -536,6 +535,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
pmu_register_err: pmu_register_err:
pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret); pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
hrtimer_cancel(&pmu_mmdc->hrtimer); hrtimer_cancel(&pmu_mmdc->hrtimer);
pmu_free: pmu_free:

View file

@ -165,7 +165,7 @@ err_free_pdata:
kfree(pdata); kfree(pdata);
err_free_pdev: err_free_pdev:
platform_device_unregister(pdev); platform_device_put(pdev);
return ret; return ret;
} }

View file

@ -650,6 +650,7 @@ static void __init realtime_counter_init(void)
} }
rate = clk_get_rate(sys_clk); rate = clk_get_rate(sys_clk);
clk_put(sys_clk);
if (soc_is_dra7xx()) { if (soc_is_dra7xx()) {
/* /*

View file

@ -222,6 +222,7 @@ int __init zynq_early_slcr_init(void)
zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr"); zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr");
if (IS_ERR(zynq_slcr_regmap)) { if (IS_ERR(zynq_slcr_regmap)) {
pr_err("%s: failed to find zynq-slcr\n", __func__); pr_err("%s: failed to find zynq-slcr\n", __func__);
of_node_put(np);
return -ENODEV; return -ENODEV;
} }

View file

@ -47,6 +47,7 @@
reg = <0x0 0x0>; reg = <0x0 0x0>;
enable-method = "psci"; enable-method = "psci";
next-level-cache = <&l2>; next-level-cache = <&l2>;
clocks = <&scpi_dvfs 0>;
}; };
cpu1: cpu@1 { cpu1: cpu@1 {
@ -55,6 +56,7 @@
reg = <0x0 0x1>; reg = <0x0 0x1>;
enable-method = "psci"; enable-method = "psci";
next-level-cache = <&l2>; next-level-cache = <&l2>;
clocks = <&scpi_dvfs 0>;
}; };
cpu2: cpu@2 { cpu2: cpu@2 {
@ -63,6 +65,7 @@
reg = <0x0 0x2>; reg = <0x0 0x2>;
enable-method = "psci"; enable-method = "psci";
next-level-cache = <&l2>; next-level-cache = <&l2>;
clocks = <&scpi_dvfs 0>;
}; };
cpu3: cpu@3 { cpu3: cpu@3 {
@ -71,6 +74,7 @@
reg = <0x0 0x3>; reg = <0x0 0x3>;
enable-method = "psci"; enable-method = "psci";
next-level-cache = <&l2>; next-level-cache = <&l2>;
clocks = <&scpi_dvfs 0>;
}; };
l2: l2-cache0 { l2: l2-cache0 {
@ -151,6 +155,28 @@
#clock-cells = <0>; #clock-cells = <0>;
}; };
scpi {
compatible = "arm,scpi-pre-1.0";
mboxes = <&mailbox 1 &mailbox 2>;
shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
scpi_clocks: clocks {
compatible = "arm,scpi-clocks";
scpi_dvfs: clocks-0 {
compatible = "arm,scpi-dvfs-clocks";
#clock-cells = <1>;
clock-indices = <0>;
clock-output-names = "vcpu";
};
};
scpi_sensors: sensors {
compatible = "amlogic,meson-gxbb-scpi-sensors", "arm,scpi-sensors";
#thermal-sensor-cells = <1>;
};
};
soc { soc {
compatible = "simple-bus"; compatible = "simple-bus";
#address-cells = <2>; #address-cells = <2>;

View file

@ -150,7 +150,7 @@
reg = <0x14 0x10>; reg = <0x14 0x10>;
}; };
eth_mac: eth_mac@34 { eth_mac: eth-mac@34 {
reg = <0x34 0x10>; reg = <0x34 0x10>;
}; };
@ -167,7 +167,7 @@
scpi_clocks: clocks { scpi_clocks: clocks {
compatible = "arm,scpi-clocks"; compatible = "arm,scpi-clocks";
scpi_dvfs: scpi_clocks@0 { scpi_dvfs: clocks-0 {
compatible = "arm,scpi-dvfs-clocks"; compatible = "arm,scpi-dvfs-clocks";
#clock-cells = <1>; #clock-cells = <1>;
clock-indices = <0>; clock-indices = <0>;
@ -423,7 +423,7 @@
#size-cells = <2>; #size-cells = <2>;
ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>; ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>;
hwrng: rng { hwrng: rng@0 {
compatible = "amlogic,meson-rng"; compatible = "amlogic,meson-rng";
reg = <0x0 0x0 0x0 0x4>; reg = <0x0 0x0 0x0 0x4>;
}; };

View file

@ -636,7 +636,7 @@
}; };
}; };
eth-phy-mux { eth-phy-mux@55c {
compatible = "mdio-mux-mmioreg", "mdio-mux"; compatible = "mdio-mux-mmioreg", "mdio-mux";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;

View file

@ -380,6 +380,7 @@
pwm: pwm@11006000 { pwm: pwm@11006000 {
compatible = "mediatek,mt7622-pwm"; compatible = "mediatek,mt7622-pwm";
reg = <0 0x11006000 0 0x1000>; reg = <0 0x11006000 0 0x1000>;
#pwm-cells = <2>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>; interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_PWM_SEL>, clocks = <&topckgen CLK_TOP_PWM_SEL>,
<&pericfg CLK_PERI_PWM_PD>, <&pericfg CLK_PERI_PWM_PD>,

View file

@ -47,6 +47,8 @@ do_trace:
jbsr syscall_trace_enter jbsr syscall_trace_enter
RESTORE_SWITCH_STACK RESTORE_SWITCH_STACK
addql #4,%sp addql #4,%sp
addql #1,%d0
jeq ret_from_exception
movel %sp@(PT_OFF_ORIG_D0),%d1 movel %sp@(PT_OFF_ORIG_D0),%d1
movel #-ENOSYS,%d0 movel #-ENOSYS,%d0
cmpl #NR_syscalls,%d1 cmpl #NR_syscalls,%d1

View file

@ -19,6 +19,7 @@ config HEARTBEAT
# We have a dedicated heartbeat LED. :-) # We have a dedicated heartbeat LED. :-)
config PROC_HARDWARE config PROC_HARDWARE
bool "/proc/hardware support" bool "/proc/hardware support"
depends on PROC_FS
help help
Say Y here to support the /proc/hardware file, which gives you Say Y here to support the /proc/hardware file, which gives you
access to information about the machine you're running on, access to information about the machine you're running on,

View file

@ -92,6 +92,8 @@ ENTRY(system_call)
jbsr syscall_trace_enter jbsr syscall_trace_enter
RESTORE_SWITCH_STACK RESTORE_SWITCH_STACK
addql #4,%sp addql #4,%sp
addql #1,%d0
jeq ret_from_exception
movel %d3,%a0 movel %d3,%a0
jbsr %a0@ jbsr %a0@
movel %d0,%sp@(PT_OFF_D0) /* save the return value */ movel %d0,%sp@(PT_OFF_D0) /* save the return value */

View file

@ -160,9 +160,12 @@ do_trace_entry:
jbsr syscall_trace jbsr syscall_trace
RESTORE_SWITCH_STACK RESTORE_SWITCH_STACK
addql #4,%sp addql #4,%sp
addql #1,%d0 | optimization for cmpil #-1,%d0
jeq ret_from_syscall
movel %sp@(PT_OFF_ORIG_D0),%d0 movel %sp@(PT_OFF_ORIG_D0),%d0
cmpl #NR_syscalls,%d0 cmpl #NR_syscalls,%d0
jcs syscall jcs syscall
jra ret_from_syscall
badsys: badsys:
movel #-ENOSYS,%sp@(PT_OFF_D0) movel #-ENOSYS,%sp@(PT_OFF_D0)
jra ret_from_syscall jra ret_from_syscall

View file

@ -377,7 +377,7 @@ struct pci_msu {
PCI_CFG04_STAT_SSE | \ PCI_CFG04_STAT_SSE | \
PCI_CFG04_STAT_PE) PCI_CFG04_STAT_PE)
#define KORINA_CNFG1 ((KORINA_STAT<<16)|KORINA_CMD) #define KORINA_CNFG1 (KORINA_STAT | KORINA_CMD)
#define KORINA_REVID 0 #define KORINA_REVID 0
#define KORINA_CLASS_CODE 0 #define KORINA_CLASS_CODE 0

View file

@ -38,7 +38,7 @@ static inline bool mips_syscall_is_indirect(struct task_struct *task,
static inline long syscall_get_nr(struct task_struct *task, static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
return current_thread_info()->syscall; return task_thread_info(task)->syscall;
} }
static inline void mips_syscall_update_nr(struct task_struct *task, static inline void mips_syscall_update_nr(struct task_struct *task,

View file

@ -104,7 +104,6 @@ struct vpe_control {
struct list_head tc_list; /* Thread contexts */ struct list_head tc_list; /* Thread contexts */
}; };
extern unsigned long physical_memsize;
extern struct vpe_control vpecontrol; extern struct vpe_control vpecontrol;
extern const struct file_operations vpe_fops; extern const struct file_operations vpe_fops;

View file

@ -92,12 +92,11 @@ int vpe_run(struct vpe *v)
write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
/* /*
* The sde-kit passes 'memsize' to __start in $a3, so set something * We don't pass the memsize here, so VPE programs need to be
* here... Or set $a3 to zero and define DFLT_STACK_SIZE and * compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined.
* DFLT_HEAP_SIZE when you compile your program
*/ */
mttgpr(7, 0);
mttgpr(6, v->ntcs); mttgpr(6, v->ntcs);
mttgpr(7, physical_memsize);
/* set up VPE1 */ /* set up VPE1 */
/* /*

View file

@ -24,12 +24,6 @@
DEFINE_SPINLOCK(ebu_lock); DEFINE_SPINLOCK(ebu_lock);
EXPORT_SYMBOL_GPL(ebu_lock); EXPORT_SYMBOL_GPL(ebu_lock);
/*
* This is needed by the VPE loader code, just set it to 0 and assume
* that the firmware hardcodes this value to something useful.
*/
unsigned long physical_memsize = 0L;
/* /*
* this struct is filled by the soc specific detection code and holds * this struct is filled by the soc specific detection code and holds
* information about the specific soc type, revision and name * information about the specific soc type, revision and name

View file

@ -109,7 +109,7 @@ aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
ifeq ($(HAS_BIARCH),y) ifeq ($(HAS_BIARCH),y)
KBUILD_CFLAGS += -m$(BITS) KBUILD_CFLAGS += -m$(BITS)
KBUILD_AFLAGS += -m$(BITS) -Wl,-a$(BITS) KBUILD_AFLAGS += -m$(BITS)
KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION) KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION)
KBUILD_ARFLAGS += --target=elf$(BITS)-$(GNUTARGET) KBUILD_ARFLAGS += --target=elf$(BITS)-$(GNUTARGET)
endif endif

View file

@ -54,10 +54,10 @@ struct rtas_t rtas = {
EXPORT_SYMBOL(rtas); EXPORT_SYMBOL(rtas);
DEFINE_SPINLOCK(rtas_data_buf_lock); DEFINE_SPINLOCK(rtas_data_buf_lock);
EXPORT_SYMBOL(rtas_data_buf_lock); EXPORT_SYMBOL_GPL(rtas_data_buf_lock);
char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; char rtas_data_buf[RTAS_DATA_BUF_SIZE] __aligned(SZ_4K);
EXPORT_SYMBOL(rtas_data_buf); EXPORT_SYMBOL_GPL(rtas_data_buf);
unsigned long rtas_rmo_buf; unsigned long rtas_rmo_buf;
@ -66,7 +66,7 @@ unsigned long rtas_rmo_buf;
* This is done like this so rtas_flash can be a module. * This is done like this so rtas_flash can be a module.
*/ */
void (*rtas_flash_term_hook)(int); void (*rtas_flash_term_hook)(int);
EXPORT_SYMBOL(rtas_flash_term_hook); EXPORT_SYMBOL_GPL(rtas_flash_term_hook);
/* RTAS use home made raw locking instead of spin_lock_irqsave /* RTAS use home made raw locking instead of spin_lock_irqsave
* because those can be called from within really nasty contexts * because those can be called from within really nasty contexts
@ -314,7 +314,7 @@ void rtas_progress(char *s, unsigned short hex)
spin_unlock(&progress_lock); spin_unlock(&progress_lock);
} }
EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */ EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */
int rtas_token(const char *service) int rtas_token(const char *service)
{ {
@ -324,7 +324,7 @@ int rtas_token(const char *service)
tokp = of_get_property(rtas.dev, service, NULL); tokp = of_get_property(rtas.dev, service, NULL);
return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE; return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
} }
EXPORT_SYMBOL(rtas_token); EXPORT_SYMBOL_GPL(rtas_token);
int rtas_service_present(const char *service) int rtas_service_present(const char *service)
{ {
@ -484,7 +484,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
} }
return ret; return ret;
} }
EXPORT_SYMBOL(rtas_call); EXPORT_SYMBOL_GPL(rtas_call);
/* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
* code of 990n, perform the hinted delay of 10^n (last digit) milliseconds. * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
@ -519,7 +519,7 @@ unsigned int rtas_busy_delay(int status)
return ms; return ms;
} }
EXPORT_SYMBOL(rtas_busy_delay); EXPORT_SYMBOL_GPL(rtas_busy_delay);
static int rtas_error_rc(int rtas_rc) static int rtas_error_rc(int rtas_rc)
{ {
@ -565,7 +565,7 @@ int rtas_get_power_level(int powerdomain, int *level)
return rtas_error_rc(rc); return rtas_error_rc(rc);
return rc; return rc;
} }
EXPORT_SYMBOL(rtas_get_power_level); EXPORT_SYMBOL_GPL(rtas_get_power_level);
int rtas_set_power_level(int powerdomain, int level, int *setlevel) int rtas_set_power_level(int powerdomain, int level, int *setlevel)
{ {
@ -583,7 +583,7 @@ int rtas_set_power_level(int powerdomain, int level, int *setlevel)
return rtas_error_rc(rc); return rtas_error_rc(rc);
return rc; return rc;
} }
EXPORT_SYMBOL(rtas_set_power_level); EXPORT_SYMBOL_GPL(rtas_set_power_level);
int rtas_get_sensor(int sensor, int index, int *state) int rtas_get_sensor(int sensor, int index, int *state)
{ {
@ -601,7 +601,7 @@ int rtas_get_sensor(int sensor, int index, int *state)
return rtas_error_rc(rc); return rtas_error_rc(rc);
return rc; return rc;
} }
EXPORT_SYMBOL(rtas_get_sensor); EXPORT_SYMBOL_GPL(rtas_get_sensor);
int rtas_get_sensor_fast(int sensor, int index, int *state) int rtas_get_sensor_fast(int sensor, int index, int *state)
{ {
@ -662,7 +662,7 @@ int rtas_set_indicator(int indicator, int index, int new_value)
return rtas_error_rc(rc); return rtas_error_rc(rc);
return rc; return rc;
} }
EXPORT_SYMBOL(rtas_set_indicator); EXPORT_SYMBOL_GPL(rtas_set_indicator);
/* /*
* Ignoring RTAS extended delay * Ignoring RTAS extended delay

View file

@ -3123,7 +3123,8 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
int index; int index;
int64_t rc; int64_t rc;
if (!res || !res->flags || res->start > res->end) if (!res || !res->flags || res->start > res->end ||
res->flags & IORESOURCE_UNSET)
return; return;
if (res->flags & IORESOURCE_IO) { if (res->flags & IORESOURCE_IO) {

View file

@ -291,6 +291,7 @@ static void parse_mpp_x_data(struct seq_file *m)
*/ */
static void parse_system_parameter_string(struct seq_file *m) static void parse_system_parameter_string(struct seq_file *m)
{ {
const s32 token = rtas_token("ibm,get-system-parameter");
int call_status; int call_status;
unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
@ -300,16 +301,15 @@ static void parse_system_parameter_string(struct seq_file *m)
return; return;
} }
do {
spin_lock(&rtas_data_buf_lock); spin_lock(&rtas_data_buf_lock);
memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH); memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, call_status = rtas_call(token, 3, 1, NULL, SPLPAR_CHARACTERISTICS_TOKEN,
NULL, __pa(rtas_data_buf), RTAS_DATA_BUF_SIZE);
SPLPAR_CHARACTERISTICS_TOKEN,
__pa(rtas_data_buf),
RTAS_DATA_BUF_SIZE);
memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH); memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
local_buffer[SPLPAR_MAXLENGTH - 1] = '\0'; local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
spin_unlock(&rtas_data_buf_lock); spin_unlock(&rtas_data_buf_lock);
} while (rtas_busy_delay(call_status));
if (call_status != 0) { if (call_status != 0) {
printk(KERN_INFO printk(KERN_INFO

View file

@ -13,6 +13,7 @@
*/ */
#include <linux/of_clk.h> #include <linux/of_clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/sbi.h> #include <asm/sbi.h>
@ -33,4 +34,6 @@ void __init time_init(void)
of_clk_init(NULL); of_clk_init(NULL);
timer_probe(); timer_probe();
tick_setup_hrtimer_broadcast();
} }

View file

@ -254,6 +254,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb)
{ {
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->prev_kprobe.kp = NULL;
} }
NOKPROBE_SYMBOL(pop_kprobe); NOKPROBE_SYMBOL(pop_kprobe);
@ -508,12 +509,11 @@ static int post_kprobe_handler(struct pt_regs *regs)
if (!p) if (!p)
return 0; return 0;
resume_execution(p, regs);
if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE; kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0); p->post_handler(p, regs, 0);
} }
resume_execution(p, regs);
pop_kprobe(kcb); pop_kprobe(kcb);
preempt_enable_no_resched(); preempt_enable_no_resched();

View file

@ -153,5 +153,6 @@ SECTIONS
DISCARDS DISCARDS
/DISCARD/ : { /DISCARD/ : {
*(.eh_frame) *(.eh_frame)
*(.interp)
} }
} }

View file

@ -58,14 +58,20 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
*/ */
void notrace s390_kernel_write(void *dst, const void *src, size_t size) void notrace s390_kernel_write(void *dst, const void *src, size_t size)
{ {
unsigned long flags;
long copied; long copied;
flags = arch_local_save_flags();
if (!(flags & PSW_MASK_DAT)) {
memcpy(dst, src, size);
} else {
while (size) { while (size) {
copied = s390_kernel_write_odd(dst, src, size); copied = s390_kernel_write_odd(dst, src, size);
dst += copied; dst += copied;
src += copied; src += copied;
size -= copied; size -= copied;
} }
}
} }
static int __memcpy_real(void *dest, void *src, size_t count) static int __memcpy_real(void *dest, void *src, size_t count)

View file

@ -329,7 +329,7 @@ config FORCE_MAX_ZONEORDER
This config option is actually maximum order plus one. For example, This config option is actually maximum order plus one. For example,
a value of 13 means that the largest free memory block is 2^12 pages. a value of 13 means that the largest free memory block is 2^12 pages.
if SPARC64 if SPARC64 || COMPILE_TEST
source "kernel/power/Kconfig" source "kernel/power/Kconfig"
endif endif

View file

@ -741,6 +741,7 @@ static int vector_config(char *str, char **error_out)
if (parsed == NULL) { if (parsed == NULL) {
*error_out = "vector_config failed to parse parameters"; *error_out = "vector_config failed to parse parameters";
kfree(params);
return -EINVAL; return -EINVAL;
} }

View file

@ -130,7 +130,7 @@ static inline unsigned int x86_cpuid_family(void)
int __init microcode_init(void); int __init microcode_init(void);
extern void __init load_ucode_bsp(void); extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void); extern void load_ucode_ap(void);
void reload_early_microcode(void); void reload_early_microcode(unsigned int cpu);
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
extern bool initrd_gone; extern bool initrd_gone;
void microcode_bsp_resume(void); void microcode_bsp_resume(void);
@ -138,7 +138,7 @@ void microcode_bsp_resume(void);
static inline int __init microcode_init(void) { return 0; }; static inline int __init microcode_init(void) { return 0; };
static inline void __init load_ucode_bsp(void) { } static inline void __init load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { } static inline void load_ucode_ap(void) { }
static inline void reload_early_microcode(void) { } static inline void reload_early_microcode(unsigned int cpu) { }
static inline void microcode_bsp_resume(void) { } static inline void microcode_bsp_resume(void) { }
static inline bool static inline bool
get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; } get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }

View file

@ -47,12 +47,12 @@ struct microcode_amd {
extern void __init load_ucode_amd_bsp(unsigned int family); extern void __init load_ucode_amd_bsp(unsigned int family);
extern void load_ucode_amd_ap(unsigned int family); extern void load_ucode_amd_ap(unsigned int family);
extern int __init save_microcode_in_initrd_amd(unsigned int family); extern int __init save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(void); void reload_ucode_amd(unsigned int cpu);
#else #else
static inline void __init load_ucode_amd_bsp(unsigned int family) {} static inline void __init load_ucode_amd_bsp(unsigned int family) {}
static inline void load_ucode_amd_ap(unsigned int family) {} static inline void load_ucode_amd_ap(unsigned int family) {}
static inline int __init static inline int __init
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
void reload_ucode_amd(void) {} static inline void reload_ucode_amd(unsigned int cpu) {}
#endif #endif
#endif /* _ASM_X86_MICROCODE_AMD_H */ #endif /* _ASM_X86_MICROCODE_AMD_H */

View file

@ -50,6 +50,10 @@
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */ #define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT) #define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
/* A mask for bits which the kernel toggles when controlling mitigations */
#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
| SPEC_CTRL_RRSBA_DIS_S)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */

View file

@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type);
#define MRR_BIOS 0 #define MRR_BIOS 0
#define MRR_APM 1 #define MRR_APM 1
void cpu_emergency_disable_virtualization(void);
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
void nmi_shootdown_cpus(nmi_shootdown_cb callback); void nmi_shootdown_cpus(nmi_shootdown_cb callback);
void run_crash_ipi_callback(struct pt_regs *regs); void run_crash_ipi_callback(struct pt_regs *regs);

View file

@ -114,7 +114,21 @@ static inline void cpu_svm_disable(void)
wrmsrl(MSR_VM_HSAVE_PA, 0); wrmsrl(MSR_VM_HSAVE_PA, 0);
rdmsrl(MSR_EFER, efer); rdmsrl(MSR_EFER, efer);
if (efer & EFER_SVME) {
/*
* Force GIF=1 prior to disabling SVM to ensure INIT and NMI
* aren't blocked, e.g. if a fatal error occurred between CLGI
* and STGI. Note, STGI may #UD if SVM is disabled from NMI
* context between reading EFER and executing STGI. In that
* case, GIF must already be set, otherwise the NMI would have
* been blocked, so just eat the fault.
*/
asm_volatile_goto("1: stgi\n\t"
_ASM_EXTABLE(1b, %l[fault])
::: "memory" : fault);
fault:
wrmsrl(MSR_EFER, efer & ~EFER_SVME); wrmsrl(MSR_EFER, efer & ~EFER_SVME);
}
} }
/** Makes sure SVM is disabled, if it is supported on the CPU /** Makes sure SVM is disabled, if it is supported on the CPU

View file

@ -199,6 +199,15 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
return; return;
} }
#endif #endif
/*
* Work around Erratum 1386. The XSAVES instruction malfunctions in
* certain circumstances on Zen1/2 uarch, and not all parts have had
* updated microcode at the time of writing (March 2023).
*
* Affected parts all have no supervisor XSAVE states, meaning that
* the XSAVEC instruction (which works fine) is equivalent.
*/
clear_cpu_cap(c, X86_FEATURE_XSAVES);
} }
static void init_amd_k7(struct cpuinfo_x86 *c) static void init_amd_k7(struct cpuinfo_x86 *c)

View file

@ -135,9 +135,17 @@ void __init check_bugs(void)
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
* init code as it is not enumerated and depends on the family. * init code as it is not enumerated and depends on the family.
*/ */
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
/*
* Previously running kernel (kexec), may have some controls
* turned ON. Clear them and let the mitigations setup below
* rediscover them based on configuration.
*/
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
}
/* Select the proper CPU mitigations before patching alternatives: */ /* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation(); spectre_v1_select_mitigation();
spectre_v2_select_mitigation(); spectre_v2_select_mitigation();
@ -975,14 +983,18 @@ spectre_v2_parse_user_cmdline(void)
return SPECTRE_V2_USER_CMD_AUTO; return SPECTRE_V2_USER_CMD_AUTO;
} }
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
{ {
return mode == SPECTRE_V2_IBRS || return mode == SPECTRE_V2_EIBRS ||
mode == SPECTRE_V2_EIBRS ||
mode == SPECTRE_V2_EIBRS_RETPOLINE || mode == SPECTRE_V2_EIBRS_RETPOLINE ||
mode == SPECTRE_V2_EIBRS_LFENCE; mode == SPECTRE_V2_EIBRS_LFENCE;
} }
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
{
return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
}
static void __init static void __init
spectre_v2_user_select_mitigation(void) spectre_v2_user_select_mitigation(void)
{ {
@ -1045,12 +1057,19 @@ spectre_v2_user_select_mitigation(void)
} }
/* /*
* If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible, * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
* STIBP is not required. * is not required.
*
* Enhanced IBRS also protects against cross-thread branch target
* injection in user-mode as the IBRS bit remains always set which
* implicitly enables cross-thread protections. However, in legacy IBRS
* mode, the IBRS bit is set only on kernel entry and cleared on return
* to userspace. This disables the implicit cross-thread protection,
* so allow for STIBP to be selected in that case.
*/ */
if (!boot_cpu_has(X86_FEATURE_STIBP) || if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!smt_possible || !smt_possible ||
spectre_v2_in_ibrs_mode(spectre_v2_enabled)) spectre_v2_in_eibrs_mode(spectre_v2_enabled))
return; return;
/* /*
@ -2102,7 +2121,7 @@ static ssize_t mmio_stale_data_show_state(char *buf)
static char *stibp_state(void) static char *stibp_state(void)
{ {
if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
return ""; return "";
switch (spectre_v2_user_stibp) { switch (spectre_v2_user_stibp) {

View file

@ -54,7 +54,9 @@ struct cont_desc {
}; };
static u32 ucode_new_rev; static u32 ucode_new_rev;
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
/* One blob per node. */
static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE];
/* /*
* Microcode patch container file is prepended to the initrd in cpio * Microcode patch container file is prepended to the initrd in cpio
@ -210,7 +212,7 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
#else #else
new_rev = &ucode_new_rev; new_rev = &ucode_new_rev;
patch = &amd_ucode_patch; patch = &amd_ucode_patch[0];
#endif #endif
desc.cpuid_1_eax = cpuid_1_eax; desc.cpuid_1_eax = cpuid_1_eax;
@ -329,8 +331,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false); apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
} }
static enum ucode_state static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
{ {
@ -348,19 +349,19 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
if (!desc.mc) if (!desc.mc)
return -EINVAL; return -EINVAL;
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret > UCODE_UPDATED) if (ret > UCODE_UPDATED)
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
void reload_ucode_amd(void) void reload_ucode_amd(unsigned int cpu)
{ {
struct microcode_amd *mc;
u32 rev, dummy; u32 rev, dummy;
struct microcode_amd *mc;
mc = (struct microcode_amd *)amd_ucode_patch; mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)];
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
@ -698,9 +699,10 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
return UCODE_OK; return UCODE_OK;
} }
static enum ucode_state static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
{ {
struct cpuinfo_x86 *c;
unsigned int nid, cpu;
struct ucode_patch *p; struct ucode_patch *p;
enum ucode_state ret; enum ucode_state ret;
@ -713,23 +715,23 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
return ret; return ret;
} }
p = find_patch(0); for_each_node(nid) {
if (!p) { cpu = cpumask_first(cpumask_of_node(nid));
return ret; c = &cpu_data(cpu);
} else {
if (boot_cpu_data.microcode >= p->patch_id) p = find_patch(cpu);
return ret; if (!p)
continue;
if (c->microcode >= p->patch_id)
continue;
ret = UCODE_NEW; ret = UCODE_NEW;
memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE);
memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
} }
/* save BSP's matching patch for early load */
if (!save)
return ret;
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
return ret; return ret;
} }
@ -754,12 +756,11 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
{ {
char fw_name[36] = "amd-ucode/microcode_amd.bin"; char fw_name[36] = "amd-ucode/microcode_amd.bin";
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
enum ucode_state ret = UCODE_NFOUND; enum ucode_state ret = UCODE_NFOUND;
const struct firmware *fw; const struct firmware *fw;
/* reload ucode container only on the boot cpu */ /* reload ucode container only on the boot cpu */
if (!refresh_fw || !bsp) if (!refresh_fw)
return UCODE_OK; return UCODE_OK;
if (c->x86 >= 0x15) if (c->x86 >= 0x15)
@ -776,7 +777,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
goto fw_release; goto fw_release;
} }
ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); ret = load_microcode_amd(c->x86, fw->data, fw->size);
fw_release: fw_release:
release_firmware(fw); release_firmware(fw);

View file

@ -326,7 +326,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
#endif #endif
} }
void reload_early_microcode(void) void reload_early_microcode(unsigned int cpu)
{ {
int vendor, family; int vendor, family;
@ -340,7 +340,7 @@ void reload_early_microcode(void)
break; break;
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (family >= 0x10) if (family >= 0x10)
reload_ucode_amd(); reload_ucode_amd(cpu);
break; break;
default: default:
break; break;
@ -783,7 +783,7 @@ void microcode_bsp_resume(void)
if (uci->valid && uci->mc) if (uci->valid && uci->mc)
microcode_ops->apply_microcode(cpu); microcode_ops->apply_microcode(cpu);
else if (!uci->mc) else if (!uci->mc)
reload_early_microcode(); reload_early_microcode(cpu);
} }
static struct syscore_ops mc_syscore_ops = { static struct syscore_ops mc_syscore_ops = {

View file

@ -35,7 +35,6 @@
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/virtext.h>
#include <asm/intel_pt.h> #include <asm/intel_pt.h>
/* Used while preparing memory map entries for second kernel */ /* Used while preparing memory map entries for second kernel */
@ -86,15 +85,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
*/ */
cpu_crash_vmclear_loaded_vmcss(); cpu_crash_vmclear_loaded_vmcss();
/* Disable VMX or SVM if needed.
*
* We need to disable virtualization on all CPUs.
* Having VMX or SVM enabled on any CPU may break rebooting
* after the kdump kernel has finished its task.
*/
cpu_emergency_vmxoff();
cpu_emergency_svm_disable();
/* /*
* Disable Intel PT to stop its logging * Disable Intel PT to stop its logging
*/ */
@ -153,12 +143,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
*/ */
cpu_crash_vmclear_loaded_vmcss(); cpu_crash_vmclear_loaded_vmcss();
/* Booting kdump kernel with VMX or SVM enabled won't work, cpu_emergency_disable_virtualization();
* because (among other limitations) we can't disable paging
* with the virt flags.
*/
cpu_emergency_vmxoff();
cpu_emergency_svm_disable();
/* /*
* Disable Intel PT to stop its logging * Disable Intel PT to stop its logging

View file

@ -56,8 +56,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
/* This function only handles jump-optimized kprobe */ /* This function only handles jump-optimized kprobe */
if (kp && kprobe_optimized(kp)) { if (kp && kprobe_optimized(kp)) {
op = container_of(kp, struct optimized_kprobe, kp); op = container_of(kp, struct optimized_kprobe, kp);
/* If op->list is not empty, op is under optimizing */ /* If op is optimized or under unoptimizing */
if (list_empty(&op->list)) if (list_empty(&op->list) || optprobe_queued_unopt(op))
goto found; goto found;
} }
} }
@ -328,7 +328,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
for (i = 1; i < op->optinsn.size; i++) { for (i = 1; i < op->optinsn.size; i++) {
p = get_kprobe(op->kp.addr + i); p = get_kprobe(op->kp.addr + i);
if (p && !kprobe_disabled(p)) if (p && !kprobe_disarmed(p))
return -EEXIST; return -EEXIST;
} }

View file

@ -536,33 +536,29 @@ static inline void kb_wait(void)
} }
} }
static void vmxoff_nmi(int cpu, struct pt_regs *regs) static inline void nmi_shootdown_cpus_on_restart(void);
{
cpu_emergency_vmxoff();
}
/* Use NMIs as IPIs to tell all CPUs to disable virtualization */ static void emergency_reboot_disable_virtualization(void)
static void emergency_vmx_disable_all(void)
{ {
/* Just make sure we won't change CPUs while doing this */ /* Just make sure we won't change CPUs while doing this */
local_irq_disable(); local_irq_disable();
/* /*
* Disable VMX on all CPUs before rebooting, otherwise we risk hanging * Disable virtualization on all CPUs before rebooting to avoid hanging
* the machine, because the CPU blocks INIT when it's in VMX root. * the system, as VMX and SVM block INIT when running in the host.
* *
* We can't take any locks and we may be on an inconsistent state, so * We can't take any locks and we may be on an inconsistent state, so
* use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. * use NMIs as IPIs to tell the other CPUs to disable VMX/SVM and halt.
* *
* Do the NMI shootdown even if VMX if off on _this_ CPU, as that * Do the NMI shootdown even if virtualization is off on _this_ CPU, as
* doesn't prevent a different CPU from being in VMX root operation. * other CPUs may have virtualization enabled.
*/ */
if (cpu_has_vmx()) { if (cpu_has_vmx() || cpu_has_svm(NULL)) {
/* Safely force _this_ CPU out of VMX root operation. */ /* Safely force _this_ CPU out of VMX/SVM operation. */
__cpu_emergency_vmxoff(); cpu_emergency_disable_virtualization();
/* Halt and exit VMX root operation on the other CPUs. */ /* Disable VMX/SVM and halt on other CPUs. */
nmi_shootdown_cpus(vmxoff_nmi); nmi_shootdown_cpus_on_restart();
} }
} }
@ -599,7 +595,7 @@ static void native_machine_emergency_restart(void)
unsigned short mode; unsigned short mode;
if (reboot_emergency) if (reboot_emergency)
emergency_vmx_disable_all(); emergency_reboot_disable_virtualization();
tboot_shutdown(TB_SHUTDOWN_REBOOT); tboot_shutdown(TB_SHUTDOWN_REBOOT);
@ -804,6 +800,17 @@ void machine_crash_shutdown(struct pt_regs *regs)
/* This is the CPU performing the emergency shutdown work. */ /* This is the CPU performing the emergency shutdown work. */
int crashing_cpu = -1; int crashing_cpu = -1;
/*
* Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during
* reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if
* GIF=0, i.e. if the crash occurred between CLGI and STGI.
*/
void cpu_emergency_disable_virtualization(void)
{
cpu_emergency_vmxoff();
cpu_emergency_svm_disable();
}
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
static nmi_shootdown_cb shootdown_callback; static nmi_shootdown_cb shootdown_callback;
@ -826,8 +833,15 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
return NMI_HANDLED; return NMI_HANDLED;
local_irq_disable(); local_irq_disable();
if (shootdown_callback)
shootdown_callback(cpu, regs); shootdown_callback(cpu, regs);
/*
* Prepare the CPU for reboot _after_ invoking the callback so that the
* callback can safely use virtualization instructions, e.g. VMCLEAR.
*/
cpu_emergency_disable_virtualization();
atomic_dec(&waiting_for_crash_ipi); atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */ /* Assume hlt works */
halt(); halt();
@ -842,18 +856,32 @@ static void smp_send_nmi_allbutself(void)
apic->send_IPI_allbutself(NMI_VECTOR); apic->send_IPI_allbutself(NMI_VECTOR);
} }
/* /**
* Halt all other CPUs, calling the specified function on each of them * nmi_shootdown_cpus - Stop other CPUs via NMI
* @callback: Optional callback to be invoked from the NMI handler
* *
* This function can be used to halt all other CPUs on crash * The NMI handler on the remote CPUs invokes @callback, if not
* or emergency reboot time. The function passed as parameter * NULL, first and then disables virtualization to ensure that
* will be called inside a NMI handler on all CPUs. * INIT is recognized during reboot.
*
* nmi_shootdown_cpus() can only be invoked once. After the first
* invocation all other CPUs are stuck in crash_nmi_callback() and
* cannot respond to a second NMI.
*/ */
void nmi_shootdown_cpus(nmi_shootdown_cb callback) void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{ {
unsigned long msecs; unsigned long msecs;
local_irq_disable(); local_irq_disable();
/*
* Avoid certain doom if a shootdown already occurred; re-registering
* the NMI handler will cause list corruption, modifying the callback
* will do who knows what, etc...
*/
if (WARN_ON_ONCE(crash_ipi_issued))
return;
/* Make a note of crashing cpu. Will be used in NMI callback. */ /* Make a note of crashing cpu. Will be used in NMI callback. */
crashing_cpu = safe_smp_processor_id(); crashing_cpu = safe_smp_processor_id();
@ -881,7 +909,17 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
msecs--; msecs--;
} }
/* Leave the nmi callback set */ /*
* Leave the nmi callback set, shootdown is a one-time thing. Clearing
* the callback could result in a NULL pointer dereference if a CPU
* (finally) responds after the timeout expires.
*/
}
static inline void nmi_shootdown_cpus_on_restart(void)
{
if (!crash_ipi_issued)
nmi_shootdown_cpus(NULL);
} }
/* /*
@ -911,6 +949,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
/* No other CPUs to shoot down */ /* No other CPUs to shoot down */
} }
static inline void nmi_shootdown_cpus_on_restart(void) { }
void run_crash_ipi_callback(struct pt_regs *regs) void run_crash_ipi_callback(struct pt_regs *regs)
{ {
} }

View file

@ -33,7 +33,7 @@
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/trace/irq_vectors.h> #include <asm/trace/irq_vectors.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/virtext.h> #include <asm/reboot.h>
/* /*
* Some notes on x86 processor bugs affecting SMP operation: * Some notes on x86 processor bugs affecting SMP operation:
@ -163,7 +163,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
return NMI_HANDLED; return NMI_HANDLED;
cpu_emergency_vmxoff(); cpu_emergency_disable_virtualization();
stop_this_cpu(NULL); stop_this_cpu(NULL);
return NMI_HANDLED; return NMI_HANDLED;
@ -176,7 +176,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
asmlinkage __visible void smp_reboot_interrupt(void) asmlinkage __visible void smp_reboot_interrupt(void)
{ {
ipi_entering_ack_irq(); ipi_entering_ack_irq();
cpu_emergency_vmxoff(); cpu_emergency_disable_virtualization();
stop_this_cpu(NULL); stop_this_cpu(NULL);
irq_exit(); irq_exit();
} }

View file

@ -265,6 +265,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"Lenovo ideapad D330-10IGM"), "Lenovo ideapad D330-10IGM"),
}, },
}, },
{
/* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
"IdeaPad Duet 3 10IGL5"),
},
},
{}, {},
}; };

View file

@ -563,7 +563,8 @@ void __init sme_enable(struct boot_params *bp)
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
((u64)bp->ext_cmd_line_ptr << 32)); ((u64)bp->ext_cmd_line_ptr << 32));
cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
return;
if (!strncmp(buffer, cmdline_on, sizeof(buffer))) if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
sme_me_mask = me_mask; sme_me_mask = me_mask;

View file

@ -20,8 +20,10 @@ int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{ {
long ret; long ret;
asm("syscall" : "=a" (ret) : asm("syscall"
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); : "=a" (ret)
: "0" (__NR_clock_gettime), "D" (clock), "S" (ts)
: "rcx", "r11", "memory");
return ret; return ret;
} }
@ -32,8 +34,10 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{ {
long ret; long ret;
asm("syscall" : "=a" (ret) : asm("syscall"
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); : "=a" (ret)
: "0" (__NR_gettimeofday), "D" (tv), "S" (tz)
: "rcx", "r11", "memory");
return ret; return ret;
} }

View file

@ -444,6 +444,7 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
bip->bip_vcnt = bip_src->bip_vcnt; bip->bip_vcnt = bip_src->bip_vcnt;
bip->bip_iter = bip_src->bip_iter; bip->bip_iter = bip_src->bip_iter;
bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
return 0; return 0;
} }

View file

@ -51,8 +51,7 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
} }
/* /*
* Mark a hardware queue as needing a restart. For shared queues, maintain * Mark a hardware queue as needing a restart.
* a count of how many hardware queues are marked for restart.
*/ */
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{ {

View file

@ -377,18 +377,21 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
int node, int alloc_policy) int node, int alloc_policy)
{ {
struct blk_mq_tags *tags; struct blk_mq_tags *tags;
struct ext_blk_mq_tags *etags;
if (total_tags > BLK_MQ_TAG_MAX) { if (total_tags > BLK_MQ_TAG_MAX) {
pr_err("blk-mq: tag depth too large\n"); pr_err("blk-mq: tag depth too large\n");
return NULL; return NULL;
} }
tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); etags = kzalloc_node(sizeof(*etags), GFP_KERNEL, node);
if (!tags) if (!etags)
return NULL; return NULL;
tags = &etags->tags;
tags->nr_tags = total_tags; tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags; tags->nr_reserved_tags = reserved_tags;
spin_lock_init(&etags->lock);
return blk_mq_init_bitmap_tags(tags, node, alloc_policy); return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
} }

View file

@ -21,6 +21,21 @@ struct blk_mq_tags {
struct list_head page_list; struct list_head page_list;
}; };
/*
* Extended tag address space map. This was needed
* to add a spinlock to blk_mq_tags in a KMI compliant
* way (no changes could be made to struct blk_mq_tags).
*/
struct ext_blk_mq_tags {
struct blk_mq_tags tags;
/*
* used to clear request reference in rqs[] before freeing one
* request pool
*/
spinlock_t lock;
};
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy); extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern void blk_mq_free_tags(struct blk_mq_tags *tags);

View file

@ -1834,6 +1834,47 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
} }
} }
static size_t order_to_size(unsigned int order)
{
return (size_t)PAGE_SIZE << order;
}
/* called before freeing request pool in @tags */
static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags, unsigned int hctx_idx)
{
struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
struct ext_blk_mq_tags *drv_etags;
struct page *page;
unsigned long flags;
list_for_each_entry(page, &tags->page_list, lru) {
unsigned long start = (unsigned long)page_address(page);
unsigned long end = start + order_to_size(page->private);
int i;
for (i = 0; i < set->queue_depth; i++) {
struct request *rq = drv_tags->rqs[i];
unsigned long rq_addr = (unsigned long)rq;
if (rq_addr >= start && rq_addr < end) {
WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
cmpxchg(&drv_tags->rqs[i], rq, NULL);
}
}
}
/*
* Wait until all pending iteration is done.
*
* Request reference is cleared and it is guaranteed to be observed
* after the ->lock is released.
*/
drv_etags = container_of(drv_tags, struct ext_blk_mq_tags, tags);
spin_lock_irqsave(&drv_etags->lock, flags);
spin_unlock_irqrestore(&drv_etags->lock, flags);
}
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{ {
const int is_sync = op_is_sync(bio->bi_opf); const int is_sync = op_is_sync(bio->bi_opf);
@ -1966,6 +2007,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
} }
} }
blk_mq_clear_rq_mapping(set, tags, hctx_idx);
while (!list_empty(&tags->page_list)) { while (!list_empty(&tags->page_list)) {
page = list_first_entry(&tags->page_list, struct page, lru); page = list_first_entry(&tags->page_list, struct page, lru);
list_del_init(&page->lru); list_del_init(&page->lru);
@ -2025,11 +2068,6 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
return tags; return tags;
} }
static size_t order_to_size(unsigned int order)
{
return (size_t)PAGE_SIZE << order;
}
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, int node) unsigned int hctx_idx, int node)
{ {

View file

@ -216,16 +216,14 @@ static void pkcs1pad_encrypt_sign_complete_cb(
struct crypto_async_request *child_async_req, int err) struct crypto_async_request *child_async_req, int err)
{ {
struct akcipher_request *req = child_async_req->data; struct akcipher_request *req = child_async_req->data;
struct crypto_async_request async_req;
if (err == -EINPROGRESS) if (err == -EINPROGRESS)
return; goto out;
async_req.data = req->base.data; err = pkcs1pad_encrypt_sign_complete(req, err);
async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
async_req.flags = child_async_req->flags; out:
req->base.complete(&async_req, akcipher_request_complete(req, err);
pkcs1pad_encrypt_sign_complete(req, err));
} }
static int pkcs1pad_encrypt(struct akcipher_request *req) static int pkcs1pad_encrypt(struct akcipher_request *req)
@ -334,15 +332,14 @@ static void pkcs1pad_decrypt_complete_cb(
struct crypto_async_request *child_async_req, int err) struct crypto_async_request *child_async_req, int err)
{ {
struct akcipher_request *req = child_async_req->data; struct akcipher_request *req = child_async_req->data;
struct crypto_async_request async_req;
if (err == -EINPROGRESS) if (err == -EINPROGRESS)
return; goto out;
async_req.data = req->base.data; err = pkcs1pad_decrypt_complete(req, err);
async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
async_req.flags = child_async_req->flags; out:
req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err)); akcipher_request_complete(req, err);
} }
static int pkcs1pad_decrypt(struct akcipher_request *req) static int pkcs1pad_decrypt(struct akcipher_request *req)
@ -500,15 +497,14 @@ static void pkcs1pad_verify_complete_cb(
struct crypto_async_request *child_async_req, int err) struct crypto_async_request *child_async_req, int err)
{ {
struct akcipher_request *req = child_async_req->data; struct akcipher_request *req = child_async_req->data;
struct crypto_async_request async_req;
if (err == -EINPROGRESS) if (err == -EINPROGRESS)
return; goto out;
async_req.data = req->base.data; err = pkcs1pad_verify_complete(req, err);
async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
async_req.flags = child_async_req->flags; out:
req->base.complete(&async_req, pkcs1pad_verify_complete(req, err)); akcipher_request_complete(req, err);
} }
/* /*

View file

@ -30,7 +30,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
struct aead_request *subreq = aead_request_ctx(req); struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *geniv; struct crypto_aead *geniv;
if (err == -EINPROGRESS) if (err == -EINPROGRESS || err == -EBUSY)
return; return;
if (err) if (err)

View file

@ -3,7 +3,7 @@
# Makefile for ACPICA Core interpreter # Makefile for ACPICA Core interpreter
# #
ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA ccflags-y := -D_LINUX -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace # use acpi.o to put all files here into acpi.o modparam namespace

View file

@ -23,8 +23,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
* *
* The table is used to implement the Microsoft port access rules that * The table is used to implement the Microsoft port access rules that
* first appeared in Windows XP. Some ports are always illegal, and some * first appeared in Windows XP. Some ports are always illegal, and some
* ports are only illegal if the BIOS calls _OSI with a win_XP string or * ports are only illegal if the BIOS calls _OSI with nothing newer than
* later (meaning that the BIOS itelf is post-XP.) * the specific _OSI strings.
* *
* This provides ACPICA with the desired port protections and * This provides ACPICA with the desired port protections and
* Microsoft compatibility. * Microsoft compatibility.
@ -145,7 +145,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Port illegality may depend on the _OSI calls made by the BIOS */ /* Port illegality may depend on the _OSI calls made by the BIOS */
if (acpi_gbl_osi_data >= port_info->osi_dependency) { if (port_info->osi_dependency == ACPI_ALWAYS_ILLEGAL ||
acpi_gbl_osi_data == port_info->osi_dependency) {
ACPI_DEBUG_PRINT((ACPI_DB_VALUES, ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
"Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n", "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
ACPI_FORMAT_UINT64(address), ACPI_FORMAT_UINT64(address),

View file

@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
* Try to fix if there was no return object. Warning if failed to fix. * Try to fix if there was no return object. Warning if failed to fix.
*/ */
if (!return_object) { if (!return_object) {
if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) { if (expected_btypes) {
if (package_index != ACPI_NOT_PACKAGE_ELEMENT) { if (!(expected_btypes & ACPI_RTYPE_NONE) &&
package_index != ACPI_NOT_PACKAGE_ELEMENT) {
ACPI_WARN_PREDEFINED((AE_INFO, ACPI_WARN_PREDEFINED((AE_INFO,
info->full_pathname, info->full_pathname,
ACPI_WARN_ALWAYS, ACPI_WARN_ALWAYS,
@ -196,16 +197,17 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
return (AE_OK); /* Repair was successful */ return (AE_OK); /* Repair was successful */
} }
} else { }
if (expected_btypes != ACPI_RTYPE_NONE) {
ACPI_WARN_PREDEFINED((AE_INFO, ACPI_WARN_PREDEFINED((AE_INFO,
info->full_pathname, info->full_pathname,
ACPI_WARN_ALWAYS, ACPI_WARN_ALWAYS,
"Missing expected return value")); "Missing expected return value"));
}
return (AE_AML_NO_RETURN_VALUE); return (AE_AML_NO_RETURN_VALUE);
} }
} }
}
if (expected_btypes & ACPI_RTYPE_INTEGER) { if (expected_btypes & ACPI_RTYPE_INTEGER) {
status = acpi_ns_convert_to_integer(return_object, &new_object); status = acpi_ns_convert_to_integer(return_object, &new_object);

View file

@ -478,7 +478,7 @@ static int extract_package(struct acpi_battery *battery,
u8 *ptr = (u8 *)battery + offsets[i].offset; u8 *ptr = (u8 *)battery + offsets[i].offset;
if (element->type == ACPI_TYPE_STRING || if (element->type == ACPI_TYPE_STRING ||
element->type == ACPI_TYPE_BUFFER) element->type == ACPI_TYPE_BUFFER)
strncpy(ptr, element->string.pointer, 32); strscpy(ptr, element->string.pointer, 32);
else if (element->type == ACPI_TYPE_INTEGER) { else if (element->type == ACPI_TYPE_INTEGER) {
strncpy(ptr, (u8 *)&element->integer.value, strncpy(ptr, (u8 *)&element->integer.value,
sizeof(u64)); sizeof(u64));

View file

@ -300,7 +300,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
.ident = "Lenovo Ideapad Z570", .ident = "Lenovo Ideapad Z570",
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "102434U"), DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
}, },
}, },
{ {

View file

@ -4381,8 +4381,7 @@ static void rbd_dev_release(struct device *dev)
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }
static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
struct rbd_spec *spec)
{ {
struct rbd_device *rbd_dev; struct rbd_device *rbd_dev;
@ -4421,9 +4420,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->dev.parent = &rbd_root_dev; rbd_dev->dev.parent = &rbd_root_dev;
device_initialize(&rbd_dev->dev); device_initialize(&rbd_dev->dev);
rbd_dev->rbd_client = rbdc;
rbd_dev->spec = spec;
return rbd_dev; return rbd_dev;
} }
@ -4436,12 +4432,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
{ {
struct rbd_device *rbd_dev; struct rbd_device *rbd_dev;
rbd_dev = __rbd_dev_create(rbdc, spec); rbd_dev = __rbd_dev_create(spec);
if (!rbd_dev) if (!rbd_dev)
return NULL; return NULL;
rbd_dev->opts = opts;
/* get an id and fill in device name */ /* get an id and fill in device name */
rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0, rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
minor_to_rbd_dev_id(1 << MINORBITS), minor_to_rbd_dev_id(1 << MINORBITS),
@ -4458,6 +4452,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
/* we have a ref from do_rbd_add() */ /* we have a ref from do_rbd_add() */
__module_get(THIS_MODULE); __module_get(THIS_MODULE);
rbd_dev->rbd_client = rbdc;
rbd_dev->spec = spec;
rbd_dev->opts = opts;
dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id); dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
return rbd_dev; return rbd_dev;
@ -5618,7 +5616,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
goto out_err; goto out_err;
} }
parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec); parent = __rbd_dev_create(rbd_dev->parent_spec);
if (!parent) { if (!parent) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_err;
@ -5628,8 +5626,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
* Images related by parent/child relationships always share * Images related by parent/child relationships always share
* rbd_client and spec/parent_spec, so bump their refcounts. * rbd_client and spec/parent_spec, so bump their refcounts.
*/ */
__rbd_get_client(rbd_dev->rbd_client); parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
rbd_spec_get(rbd_dev->parent_spec); parent->spec = rbd_spec_get(rbd_dev->parent_spec);
ret = rbd_dev_image_probe(parent, depth); ret = rbd_dev_image_probe(parent, depth);
if (ret < 0) if (ret < 0)

View file

@ -947,6 +947,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
print_version(); print_version();
hp = mdesc_grab(); hp = mdesc_grab();
if (!hp)
return -ENODEV;
err = -ENODEV; err = -ENODEV;
if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {

View file

@ -63,7 +63,7 @@ config COMMON_CLK_RK808
config COMMON_CLK_HI655X config COMMON_CLK_HI655X
tristate "Clock driver for Hi655x" if EXPERT tristate "Clock driver for Hi655x" if EXPERT
depends on (MFD_HI655X_PMIC || COMPILE_TEST) depends on (MFD_HI655X_PMIC || COMPILE_TEST)
depends on REGMAP select REGMAP
default MFD_HI655X_PMIC default MFD_HI655X_PMIC
---help--- ---help---
This driver supports the hi655x PMIC clock. This This driver supports the hi655x PMIC clock. This

View file

@ -272,6 +272,17 @@ static bool clk_core_is_enabled(struct clk_core *core)
} }
} }
/*
* This could be called with the enable lock held, or from atomic
* context. If the parent isn't enabled already, we can't do
* anything here. We can also assume this clock isn't enabled.
*/
if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent)
if (!clk_core_is_enabled(core->parent)) {
ret = false;
goto done;
}
ret = core->ops->is_enabled(core->hw); ret = core->ops->is_enabled(core->hw);
done: done:
if (core->rpm_enabled) if (core->rpm_enabled)

View file

@ -529,7 +529,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
{ {
struct skcipher_request *req; struct skcipher_request *req;
struct scatterlist *dst; struct scatterlist *dst;
dma_addr_t addr;
req = skcipher_request_cast(pd_uinfo->async_req); req = skcipher_request_cast(pd_uinfo->async_req);
@ -538,8 +537,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
req->cryptlen, req->dst); req->cryptlen, req->dst);
} else { } else {
dst = pd_uinfo->dest_va; dst = pd_uinfo->dest_va;
addr = dma_map_page(dev->core_dev->device, sg_page(dst), dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
dst->offset, dst->length, DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) { if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
@ -564,10 +563,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct ahash_request *ahash_req; struct ahash_request *ahash_req;
ahash_req = ahash_request_cast(pd_uinfo->async_req); ahash_req = ahash_request_cast(pd_uinfo->async_req);
ctx = crypto_tfm_ctx(ahash_req->base.tfm); ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
crypto_tfm_ctx(ahash_req->base.tfm));
crypto4xx_ret_sg_desc(dev, pd_uinfo); crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd_uinfo->state & PD_ENTRY_BUSY) if (pd_uinfo->state & PD_ENTRY_BUSY)

View file

@ -51,9 +51,7 @@ static int framebuffer_probe(struct coreboot_device *dev)
fb->green_mask_pos == formats[i].green.offset && fb->green_mask_pos == formats[i].green.offset &&
fb->green_mask_size == formats[i].green.length && fb->green_mask_size == formats[i].green.length &&
fb->blue_mask_pos == formats[i].blue.offset && fb->blue_mask_pos == formats[i].blue.offset &&
fb->blue_mask_size == formats[i].blue.length && fb->blue_mask_size == formats[i].blue.length)
fb->reserved_mask_pos == formats[i].transp.offset &&
fb->reserved_mask_size == formats[i].transp.length)
pdata.format = formats[i].name; pdata.format = formats[i].name;
} }
if (!pdata.format) if (!pdata.format)

View file

@ -279,7 +279,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc = &port->gc; gc = &port->gc;
gc->of_node = np; gc->of_node = np;
gc->parent = dev; gc->parent = dev;
gc->label = "vf610-gpio"; gc->label = dev_name(dev);
gc->ngpio = VF610_GPIO_PER_PORT; gc->ngpio = VF610_GPIO_PER_PORT;
gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT; gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;

View file

@ -529,16 +529,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
struct kfd_event_waiter *event_waiters; struct kfd_event_waiter *event_waiters;
uint32_t i; uint32_t i;
event_waiters = kmalloc_array(num_events, event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
sizeof(struct kfd_event_waiter),
GFP_KERNEL); GFP_KERNEL);
if (!event_waiters) if (!event_waiters)
return NULL; return NULL;
for (i = 0; (event_waiters) && (i < num_events) ; i++) { for (i = 0; i < num_events; i++)
init_wait(&event_waiters[i].wait); init_wait(&event_waiters[i].wait);
event_waiters[i].activated = false;
}
return event_waiters; return event_waiters;
} }

View file

@ -773,12 +773,14 @@ static int dm_resume(void *handle)
list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector); aconnector = to_amdgpu_dm_connector(connector);
if (!aconnector->dc_link)
continue;
/* /*
* this is the case when traversing through already created * this is the case when traversing through already created
* MST connectors, should be skipped * MST connectors, should be skipped
*/ */
if (aconnector->dc_link && if (aconnector->dc_link->type == dc_connection_mst_branch)
aconnector->dc_link->type == dc_connection_mst_branch)
continue; continue;
mutex_lock(&aconnector->hpd_lock); mutex_lock(&aconnector->hpd_lock);

View file

@ -437,7 +437,11 @@ static int __init stdpxxxx_ge_b850v3_init(void)
if (ret) if (ret)
return ret; return ret;
return i2c_add_driver(&stdp2690_ge_b850v3_fw_driver); ret = i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
if (ret)
i2c_del_driver(&stdp4028_ge_b850v3_fw_driver);
return ret;
} }
module_init(stdpxxxx_ge_b850v3_init); module_init(stdpxxxx_ge_b850v3_init);

View file

@ -1097,6 +1097,58 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
} }
EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness); EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
/**
* mipi_dsi_dcs_set_display_brightness_large() - sets the 16-bit brightness value
* of the display
* @dsi: DSI peripheral device
* @brightness: brightness value
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
u16 brightness)
{
u8 payload[2] = { brightness >> 8, brightness & 0xff };
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
payload, sizeof(payload));
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_large);
/**
* mipi_dsi_dcs_get_display_brightness_large() - gets the current 16-bit
* brightness value of the display
* @dsi: DSI peripheral device
* @brightness: brightness value
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
u16 *brightness)
{
u8 brightness_be[2];
ssize_t err;
err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
brightness_be, sizeof(brightness_be));
if (err <= 0) {
if (err == 0)
err = -ENODATA;
return err;
}
*brightness = (brightness_be[0] << 8) | brightness_be[1];
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness_large);
static int mipi_dsi_drv_probe(struct device *dev) static int mipi_dsi_drv_probe(struct device *dev)
{ {
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver); struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);

View file

@ -1083,7 +1083,7 @@ int intel_ring_pin(struct intel_ring *ring,
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
if (i915_vma_is_map_and_fenceable(vma)) if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
addr = (void __force *)i915_vma_pin_iomap(vma); addr = (void __force *)i915_vma_pin_iomap(vma);
else else
addr = i915_gem_object_pin_map(vma->obj, map); addr = i915_gem_object_pin_map(vma->obj, map);
@ -1118,7 +1118,7 @@ void intel_ring_unpin(struct intel_ring *ring)
/* Discard any unused bytes beyond that submitted to hw. */ /* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail); intel_ring_reset(ring, ring->tail);
if (i915_vma_is_map_and_fenceable(ring->vma)) if (i915_vma_is_map_and_fenceable(ring->vma) && !HAS_LLC(ring->vma->vm->i915))
i915_vma_unpin_iomap(ring->vma); i915_vma_unpin_iomap(ring->vma);
else else
i915_gem_object_unpin_map(ring->vma->obj); i915_gem_object_unpin_map(ring->vma->obj);
@ -1132,9 +1132,10 @@ static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
{ {
struct i915_address_space *vm = &dev_priv->ggtt.vm; struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj = NULL;
struct i915_vma *vma; struct i915_vma *vma;
if (!HAS_LLC(dev_priv))
obj = i915_gem_object_create_stolen(dev_priv, size); obj = i915_gem_object_create_stolen(dev_priv, size);
if (!obj) if (!obj)
obj = i915_gem_object_create_internal(dev_priv, size); obj = i915_gem_object_create_internal(dev_priv, size);

View file

@ -425,6 +425,7 @@ static int mtk_drm_bind(struct device *dev)
err_deinit: err_deinit:
mtk_drm_kms_deinit(drm); mtk_drm_kms_deinit(drm);
err_free: err_free:
private->drm = NULL;
drm_dev_put(drm); drm_dev_put(drm);
return ret; return ret;
} }

View file

@ -148,8 +148,6 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
if (ret)
drm_gem_vm_close(vma);
return ret; return ret;
} }

View file

@ -1477,6 +1477,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
} }
pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
if (!pstates)
return -ENOMEM;
dpu_crtc = to_dpu_crtc(crtc); dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(state); cstate = to_dpu_crtc_state(state);

View file

@ -1883,6 +1883,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */ /* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
if (!msm_host->workqueue)
return -ENOMEM;
INIT_WORK(&msm_host->err_work, dsi_err_worker); INIT_WORK(&msm_host->err_work, dsi_err_worker);
INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);

View file

@ -254,6 +254,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0); hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
if (!hdmi->workq) {
ret = -ENOMEM;
goto fail;
}
hdmi->i2c = msm_hdmi_i2c_init(hdmi); hdmi->i2c = msm_hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) { if (IS_ERR(hdmi->i2c)) {

View file

@ -31,7 +31,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
fctx->dev = dev; fctx->dev = dev;
strncpy(fctx->name, name, sizeof(fctx->name)); strscpy(fctx->name, name, sizeof(fctx->name));
fctx->context = dma_fence_context_alloc(1); fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event); init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock); spin_lock_init(&fctx->spinlock);

View file

@ -7,6 +7,7 @@ config DRM_MXSFB
tristate "i.MX23/i.MX28/i.MX6SX MXSFB LCD controller" tristate "i.MX23/i.MX28/i.MX6SX MXSFB LCD controller"
depends on DRM && OF depends on DRM && OF
depends on COMMON_CLK depends on COMMON_CLK
depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST
select DRM_MXS select DRM_MXS
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_KMS_CMA_HELPER

View file

@ -2188,11 +2188,12 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
/* /*
* On DCE32 any encoder can drive any block so usually just use crtc id, * On DCE32 any encoder can drive any block so usually just use crtc id,
* but Apple thinks different at least on iMac10,1, so there use linkb, * but Apple thinks different at least on iMac10,1 and iMac11,2, so there use linkb,
* otherwise the internal eDP panel will stay dark. * otherwise the internal eDP panel will stay dark.
*/ */
if (ASIC_IS_DCE32(rdev)) { if (ASIC_IS_DCE32(rdev)) {
if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1")) if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1") ||
dmi_match(DMI_PRODUCT_NAME, "iMac11,2"))
enc_idx = (dig->linkb) ? 1 : 0; enc_idx = (dig->linkb) ? 1 : 0;
else else
enc_idx = radeon_crtc->crtc_id; enc_idx = radeon_crtc->crtc_id;

View file

@ -1015,6 +1015,7 @@ void radeon_atombios_fini(struct radeon_device *rdev)
{ {
if (rdev->mode_info.atom_context) { if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch); kfree(rdev->mode_info.atom_context->scratch);
kfree(rdev->mode_info.atom_context->iio);
} }
kfree(rdev->mode_info.atom_context); kfree(rdev->mode_info.atom_context);
rdev->mode_info.atom_context = NULL; rdev->mode_info.atom_context = NULL;

View file

@ -186,7 +186,8 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
} }
drm_connector_list_iter_end(&conn_iter); drm_connector_list_iter_end(&conn_iter);
if (connector && connector->display_info.num_bus_formats) { if (connector) {
if (connector->display_info.num_bus_formats) {
u32 bus_format = connector->display_info.bus_formats[0]; u32 bus_format = connector->display_info.bus_formats[0];
switch (bus_format) { switch (bus_format) {
@ -197,7 +198,8 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
case MEDIA_BUS_FMT_BGR888_1X24: case MEDIA_BUS_FMT_BGR888_1X24:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
DPI_FORMAT); DPI_FORMAT);
dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER); dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR,
DPI_ORDER);
break; break;
case MEDIA_BUS_FMT_RGB666_1X24_CPADHI: case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2, dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
@ -208,13 +210,21 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
DPI_FORMAT); DPI_FORMAT);
break; break;
case MEDIA_BUS_FMT_RGB565_1X16: case MEDIA_BUS_FMT_RGB565_1X16:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3, dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_1,
DPI_FORMAT); DPI_FORMAT);
break; break;
default: default:
DRM_ERROR("Unknown media bus format %d\n", bus_format); DRM_ERROR("Unknown media bus format %d\n",
bus_format);
break; break;
} }
}
if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
dpi_c |= DPI_PIXEL_CLK_INVERT;
if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
dpi_c |= DPI_OUTPUT_ENABLE_INVERT;
} else { } else {
/* Default to 24bit if no connector found. */ /* Default to 24bit if no connector found. */
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT); dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT);

View file

@ -113,9 +113,6 @@ static void syncpt_assign_to_channel(struct host1x_syncpt *sp,
#if HOST1X_HW >= 6 #if HOST1X_HW >= 6
struct host1x *host = sp->host; struct host1x *host = sp->host;
if (!host->hv_regs)
return;
host1x_sync_writel(host, host1x_sync_writel(host,
HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff), HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff),
HOST1X_SYNC_SYNCPT_CH_APP(sp->id)); HOST1X_SYNC_SYNCPT_CH_APP(sp->id));

View file

@ -1238,6 +1238,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
pdev = platform_device_alloc(reg->name, id++); pdev = platform_device_alloc(reg->name, id++);
if (!pdev) { if (!pdev) {
ret = -ENOMEM; ret = -ENOMEM;
of_node_put(of_node);
goto err_register; goto err_register;
} }

View file

@ -84,6 +84,7 @@ struct asus_kbd_leds {
struct hid_device *hdev; struct hid_device *hdev;
struct work_struct work; struct work_struct work;
unsigned int brightness; unsigned int brightness;
spinlock_t lock;
bool removed; bool removed;
}; };
@ -313,24 +314,42 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
return ret; return ret;
} }
static void asus_schedule_work(struct asus_kbd_leds *led)
{
unsigned long flags;
spin_lock_irqsave(&led->lock, flags);
if (!led->removed)
schedule_work(&led->work);
spin_unlock_irqrestore(&led->lock, flags);
}
static void asus_kbd_backlight_set(struct led_classdev *led_cdev, static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
enum led_brightness brightness) enum led_brightness brightness)
{ {
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev); cdev);
if (led->brightness == brightness) unsigned long flags;
return;
spin_lock_irqsave(&led->lock, flags);
led->brightness = brightness; led->brightness = brightness;
schedule_work(&led->work); spin_unlock_irqrestore(&led->lock, flags);
asus_schedule_work(led);
} }
static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev) static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev)
{ {
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev); cdev);
enum led_brightness brightness;
unsigned long flags;
return led->brightness; spin_lock_irqsave(&led->lock, flags);
brightness = led->brightness;
spin_unlock_irqrestore(&led->lock, flags);
return brightness;
} }
static void asus_kbd_backlight_work(struct work_struct *work) static void asus_kbd_backlight_work(struct work_struct *work)
@ -338,11 +357,11 @@ static void asus_kbd_backlight_work(struct work_struct *work)
struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work); struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work);
u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 }; u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 };
int ret; int ret;
unsigned long flags;
if (led->removed) spin_lock_irqsave(&led->lock, flags);
return;
buf[4] = led->brightness; buf[4] = led->brightness;
spin_unlock_irqrestore(&led->lock, flags);
ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf)); ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf));
if (ret < 0) if (ret < 0)
@ -383,6 +402,7 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set; drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set;
drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get; drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get;
INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work); INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work);
spin_lock_init(&drvdata->kbd_backlight->lock);
ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev); ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev);
if (ret < 0) { if (ret < 0) {
@ -692,9 +712,13 @@ err_stop_hw:
static void asus_remove(struct hid_device *hdev) static void asus_remove(struct hid_device *hdev)
{ {
struct asus_drvdata *drvdata = hid_get_drvdata(hdev); struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
unsigned long flags;
if (drvdata->kbd_backlight) { if (drvdata->kbd_backlight) {
spin_lock_irqsave(&drvdata->kbd_backlight->lock, flags);
drvdata->kbd_backlight->removed = true; drvdata->kbd_backlight->removed = true;
spin_unlock_irqrestore(&drvdata->kbd_backlight->lock, flags);
cancel_work_sync(&drvdata->kbd_backlight->work); cancel_work_sync(&drvdata->kbd_backlight->work);
} }

View file

@ -258,6 +258,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
{ {
struct hid_report *report; struct hid_report *report;
struct hid_field *field; struct hid_field *field;
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int usages; unsigned int usages;
unsigned int offset; unsigned int offset;
unsigned int i; unsigned int i;
@ -288,8 +289,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size; offset = report->size;
report->size += parser->global.report_size * parser->global.report_count; report->size += parser->global.report_size * parser->global.report_count;
if (parser->device->ll_driver->max_buffer_size)
max_buffer_size = parser->device->ll_driver->max_buffer_size;
/* Total size check: Allow for possible report index byte */ /* Total size check: Allow for possible report index byte */
if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) { if (report->size > (max_buffer_size - 1) << 3) {
hid_err(parser->device, "report is too long\n"); hid_err(parser->device, "report is too long\n");
return -1; return -1;
} }
@ -1567,6 +1571,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
struct hid_report_enum *report_enum = hid->report_enum + type; struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report; struct hid_report *report;
struct hid_driver *hdrv; struct hid_driver *hdrv;
int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int a; unsigned int a;
u32 rsize, csize = size; u32 rsize, csize = size;
u8 *cdata = data; u8 *cdata = data;
@ -1583,10 +1588,13 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
rsize = hid_compute_report_size(report); rsize = hid_compute_report_size(report);
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) if (hid->ll_driver->max_buffer_size)
rsize = HID_MAX_BUFFER_SIZE - 1; max_buffer_size = hid->ll_driver->max_buffer_size;
else if (rsize > HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE; if (report_enum->numbered && rsize >= max_buffer_size)
rsize = max_buffer_size - 1;
else if (rsize > max_buffer_size)
rsize = max_buffer_size;
if (csize < rsize) { if (csize < rsize) {
dbg_hid("report %d is too short, (%d < %d)\n", report->id, dbg_hid("report %d is too short, (%d < %d)\n", report->id,

View file

@ -398,6 +398,7 @@ struct hid_ll_driver uhid_hid_driver = {
.parse = uhid_hid_parse, .parse = uhid_hid_parse,
.raw_request = uhid_hid_raw_request, .raw_request = uhid_hid_raw_request,
.output_report = uhid_hid_output_report, .output_report = uhid_hid_output_report,
.max_buffer_size = UHID_DATA_MAX,
}; };
EXPORT_SYMBOL_GPL(uhid_hid_driver); EXPORT_SYMBOL_GPL(uhid_hid_driver);

View file

@ -485,10 +485,10 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
val = (temp - val) / 1000; val = (temp - val) / 1000;
if (sattr->index != 1) { if (sattr->index != 1) {
data->temp[HYSTERSIS][sattr->index] &= 0xF0; data->temp[HYSTERSIS][sattr->index] &= 0x0F;
data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4; data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4;
} else { } else {
data->temp[HYSTERSIS][sattr->index] &= 0x0F; data->temp[HYSTERSIS][sattr->index] &= 0xF0;
data->temp[HYSTERSIS][sattr->index] |= (val & 0xF); data->temp[HYSTERSIS][sattr->index] |= (val & 0xF);
} }
@ -554,11 +554,11 @@ static ssize_t show_temp_st(struct device *dev, struct device_attribute *attr,
val = data->enh_acoustics[0] & 0xf; val = data->enh_acoustics[0] & 0xf;
break; break;
case 1: case 1:
val = (data->enh_acoustics[1] >> 4) & 0xf; val = data->enh_acoustics[1] & 0xf;
break; break;
case 2: case 2:
default: default:
val = data->enh_acoustics[1] & 0xf; val = (data->enh_acoustics[1] >> 4) & 0xf;
break; break;
} }

View file

@ -257,6 +257,8 @@ static ssize_t ltc2945_set_value(struct device *dev,
/* convert to register value, then clamp and write result */ /* convert to register value, then clamp and write result */
regval = ltc2945_val_to_reg(dev, reg, val); regval = ltc2945_val_to_reg(dev, reg, val);
if (regval < 0)
return regval;
if (is_power_reg(reg)) { if (is_power_reg(reg)) {
regval = clamp_val(regval, 0, 0xffffff); regval = clamp_val(regval, 0, 0xffffff);
regbuf[0] = regval >> 16; regbuf[0] = regval >> 16;

View file

@ -125,6 +125,12 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (err) if (err)
return err; return err;
if (MLXREG_FAN_GET_FAULT(regval, tacho->mask)) {
/* FAN is broken - return zero for FAN speed. */
*val = 0;
return 0;
}
*val = MLXREG_FAN_GET_RPM(regval, fan->divider, *val = MLXREG_FAN_GET_RPM(regval, fan->divider,
fan->samples); fan->samples);
break; break;

View file

@ -780,6 +780,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
{ {
struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev); struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
cancel_work_sync(&ctx->workq);
hwmon_device_unregister(ctx->hwmon_dev); hwmon_device_unregister(ctx->hwmon_dev);
kfifo_free(&ctx->async_msg_fifo); kfifo_free(&ctx->async_msg_fifo);
if (acpi_disabled) if (acpi_disabled)

View file

@ -304,9 +304,12 @@ int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG, ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
reg, NULL, 0, (u8 *)&v, 2); reg, NULL, 0, (u8 *)&v, 2);
if (ret < 0)
return ret;
*val = be16_to_cpu(v); *val = be16_to_cpu(v);
return ret; return 0;
} }
EXPORT_SYMBOL(mma9551_read_config_word); EXPORT_SYMBOL(mma9551_read_config_word);
@ -362,9 +365,12 @@ int mma9551_read_status_word(struct i2c_client *client, u8 app_id,
ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS, ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
reg, NULL, 0, (u8 *)&v, 2); reg, NULL, 0, (u8 *)&v, 2);
if (ret < 0)
return ret;
*val = be16_to_cpu(v); *val = be16_to_cpu(v);
return ret; return 0;
} }
EXPORT_SYMBOL(mma9551_read_status_word); EXPORT_SYMBOL(mma9551_read_status_word);

View file

@ -790,14 +790,8 @@ static void ads7846_report_state(struct ads7846 *ts)
if (x == MAX_12BIT) if (x == MAX_12BIT)
x = 0; x = 0;
if (ts->model == 7843) { if (ts->model == 7843 || ts->model == 7845) {
Rt = ts->pressure_max / 2; Rt = ts->pressure_max / 2;
} else if (ts->model == 7845) {
if (get_pendown_state(ts))
Rt = ts->pressure_max / 2;
else
Rt = 0;
dev_vdbg(&ts->spi->dev, "x/y: %d/%d, PD %d\n", x, y, Rt);
} else if (likely(x && z1)) { } else if (likely(x && z1)) {
/* compute touch pressure resistance using equation #2 */ /* compute touch pressure resistance using equation #2 */
Rt = z2; Rt = z2;
@ -1374,6 +1368,7 @@ static int ads7846_probe(struct spi_device *spi)
pdata->y_min ? : 0, pdata->y_min ? : 0,
pdata->y_max ? : MAX_12BIT, pdata->y_max ? : MAX_12BIT,
0, 0); 0, 0);
if (ts->model != 7845)
input_set_abs_params(input_dev, ABS_PRESSURE, input_set_abs_params(input_dev, ABS_PRESSURE,
pdata->pressure_min, pdata->pressure_max, 0, 0); pdata->pressure_min, pdata->pressure_max, 0, 0);

View file

@ -199,6 +199,7 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv,
} }
gic_domain = irq_find_host(gic_node); gic_domain = irq_find_host(gic_node);
of_node_put(gic_node);
if (!gic_domain) { if (!gic_domain) {
pr_err("Failed to find the GIC domain\n"); pr_err("Failed to find the GIC domain\n");
return -ENXIO; return -ENXIO;

Some files were not shown because too many files have changed in this diff Show more