Merge "Merge android-4.19.55 (65f49f0) into msm-4.19"

This commit is contained in:
qctecmdr 2019-07-30 19:02:41 -07:00 committed by Gerrit - the friendly Code Review server
commit 7ece8a38da
433 changed files with 3464 additions and 1504 deletions

View file

@ -37,7 +37,7 @@ needs_sphinx = '1.3'
extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain', 'kfigure', 'sphinx.ext.ifconfig']
# The name of the math extension changed on Sphinx 1.4
if major == 1 and minor > 3:
if (major == 1 and minor > 3) or (major > 1):
extensions.append("sphinx.ext.imgmath")
else:
extensions.append("sphinx.ext.pngmath")

View file

@ -250,6 +250,14 @@ tcp_base_mss - INTEGER
Path MTU discovery (MTU probing). If MTU probing is enabled,
this is the initial MSS used by the connection.
tcp_min_snd_mss - INTEGER
TCP SYN and SYNACK messages usually advertise an ADVMSS option,
as described in RFC 1122 and RFC 6691.
If this ADVMSS option is smaller than tcp_min_snd_mss,
it is silently capped to tcp_min_snd_mss.
Default : 48 (at least 8 bytes of payload per segment)
tcp_congestion_control - STRING
Set the congestion control algorithm to be used for new
connections. The algorithm "reno" is always available, but

View file

@ -37,7 +37,19 @@ import glob
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
from sphinx.ext.autodoc import AutodocReporter
#
# AutodocReporter is only good up to Sphinx 1.7
#
import sphinx
Use_SSI = sphinx.__version__[:3] >= '1.7'
if Use_SSI:
from sphinx.util.docutils import switch_source_input
else:
from sphinx.ext.autodoc import AutodocReporter
import kernellog
__version__ = '1.0'
@ -90,7 +102,8 @@ class KernelDocDirective(Directive):
cmd += [filename]
try:
env.app.verbose('calling kernel-doc \'%s\'' % (" ".join(cmd)))
kernellog.verbose(env.app,
'calling kernel-doc \'%s\'' % (" ".join(cmd)))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
@ -100,7 +113,8 @@ class KernelDocDirective(Directive):
if p.returncode != 0:
sys.stderr.write(err)
env.app.warn('kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
kernellog.warn(env.app,
'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
elif env.config.kerneldoc_verbosity > 0:
sys.stderr.write(err)
@ -121,20 +135,28 @@ class KernelDocDirective(Directive):
lineoffset += 1
node = nodes.section()
buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
self.do_parse(result, node)
return node.children
except Exception as e: # pylint: disable=W0703
kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' %
(" ".join(cmd), str(e)))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def do_parse(self, result, node):
if Use_SSI:
with switch_source_input(self.state, result):
self.state.nested_parse(result, 0, node, match_titles=1)
else:
save = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter)
self.state.memo.title_styles, self.state.memo.section_level = [], 0
try:
self.state.nested_parse(result, 0, node, match_titles=1)
finally:
self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf
self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = save
return node.children
except Exception as e: # pylint: disable=W0703
env.app.warn('kernel-doc \'%s\' processing failed with: %s' %
(" ".join(cmd), str(e)))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def setup(app):
app.add_config_value('kerneldoc_bin', None, 'env')

View file

@ -0,0 +1,28 @@
# SPDX-License-Identifier: GPL-2.0
#
# Sphinx has deprecated its older logging interface, but the replacement
# only goes back to 1.6. So here's a wrapper layer to keep around for
# as long as we support 1.4.
#
import sphinx
if sphinx.__version__[:3] >= '1.6':
UseLogging = True
from sphinx.util import logging
logger = logging.getLogger('kerneldoc')
else:
UseLogging = False
def warn(app, message):
if UseLogging:
logger.warning(message)
else:
app.warn(message)
def verbose(app, message):
if UseLogging:
logger.verbose(message)
else:
app.verbose(message)

View file

@ -60,6 +60,8 @@ import sphinx
from sphinx.util.nodes import clean_astext
from six import iteritems
import kernellog
PY3 = sys.version_info[0] == 3
if PY3:
@ -171,20 +173,20 @@ def setupTools(app):
This function is called once, when the builder is initiated.
"""
global dot_cmd, convert_cmd # pylint: disable=W0603
app.verbose("kfigure: check installed tools ...")
kernellog.verbose(app, "kfigure: check installed tools ...")
dot_cmd = which('dot')
convert_cmd = which('convert')
if dot_cmd:
app.verbose("use dot(1) from: " + dot_cmd)
kernellog.verbose(app, "use dot(1) from: " + dot_cmd)
else:
app.warn("dot(1) not found, for better output quality install "
"graphviz from http://www.graphviz.org")
kernellog.warn(app, "dot(1) not found, for better output quality install "
"graphviz from http://www.graphviz.org")
if convert_cmd:
app.verbose("use convert(1) from: " + convert_cmd)
kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
else:
app.warn(
kernellog.warn(app,
"convert(1) not found, for SVG to PDF conversion install "
"ImageMagick (https://www.imagemagick.org)")
@ -220,12 +222,13 @@ def convert_image(img_node, translator, src_fname=None):
# in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages
app.verbose('assert best format for: ' + img_node['uri'])
kernellog.verbose(app, 'assert best format for: ' + img_node['uri'])
if in_ext == '.dot':
if not dot_cmd:
app.verbose("dot from graphviz not available / include DOT raw.")
kernellog.verbose(app,
"dot from graphviz not available / include DOT raw.")
img_node.replace_self(file2literal(src_fname))
elif translator.builder.format == 'latex':
@ -252,7 +255,8 @@ def convert_image(img_node, translator, src_fname=None):
if translator.builder.format == 'latex':
if convert_cmd is None:
app.verbose("no SVG to PDF conversion available / include SVG raw.")
kernellog.verbose(app,
"no SVG to PDF conversion available / include SVG raw.")
img_node.replace_self(file2literal(src_fname))
else:
dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
@ -265,18 +269,19 @@ def convert_image(img_node, translator, src_fname=None):
_name = dst_fname[len(translator.builder.outdir) + 1:]
if isNewer(dst_fname, src_fname):
app.verbose("convert: {out}/%s already exists and is newer" % _name)
kernellog.verbose(app,
"convert: {out}/%s already exists and is newer" % _name)
else:
ok = False
mkdir(path.dirname(dst_fname))
if in_ext == '.dot':
app.verbose('convert DOT to: {out}/' + _name)
kernellog.verbose(app, 'convert DOT to: {out}/' + _name)
ok = dot2format(app, src_fname, dst_fname)
elif in_ext == '.svg':
app.verbose('convert SVG to: {out}/' + _name)
kernellog.verbose(app, 'convert SVG to: {out}/' + _name)
ok = svg2pdf(app, src_fname, dst_fname)
if not ok:
@ -305,7 +310,8 @@ def dot2format(app, dot_fname, out_fname):
with open(out_fname, "w") as out:
exit_code = subprocess.call(cmd, stdout = out)
if exit_code != 0:
app.warn("Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
kernellog.warn(app,
"Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
return bool(exit_code == 0)
def svg2pdf(app, svg_fname, pdf_fname):
@ -322,7 +328,7 @@ def svg2pdf(app, svg_fname, pdf_fname):
# use stdout and stderr from parent
exit_code = subprocess.call(cmd)
if exit_code != 0:
app.warn("Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
return bool(exit_code == 0)
@ -415,15 +421,15 @@ def visit_kernel_render(self, node):
app = self.builder.app
srclang = node.get('srclang')
app.verbose('visit kernel-render node lang: "%s"' % (srclang))
kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang))
tmp_ext = RENDER_MARKUP_EXT.get(srclang, None)
if tmp_ext is None:
app.warn('kernel-render: "%s" unknown / include raw.' % (srclang))
kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang))
return
if not dot_cmd and tmp_ext == '.dot':
app.verbose("dot from graphviz not available / include raw.")
kernellog.verbose(app, "dot from graphviz not available / include raw.")
return
literal_block = node[0]

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 48
SUBLEVEL = 55
EXTRAVERSION =
NAME = "People's Front"
@ -767,6 +767,11 @@ KBUILD_CFLAGS += -fomit-frame-pointer
endif
endif
# Initialize all stack variables with a pattern, if desired.
ifdef CONFIG_INIT_STACK_ALL
KBUILD_CFLAGS += -ftrivial-auto-var-init=pattern
endif
KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
KBUILD_CFLAGS += $(call cc-option, -Wvla)

View file

@ -106,6 +106,7 @@
regulator-name = "PVDD_APIO_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
};
ldo3_reg: LDO3 {
@ -144,6 +145,7 @@
regulator-name = "PVDD_ABB_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
};
ldo9_reg: LDO9 {

View file

@ -420,7 +420,7 @@
reg = <0x63fb0000 0x4000>;
interrupts = <6>;
clocks = <&clks IMX5_CLK_SDMA_GATE>,
<&clks IMX5_CLK_SDMA_GATE>;
<&clks IMX5_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx50.bin";

View file

@ -481,7 +481,7 @@
reg = <0x83fb0000 0x4000>;
interrupts = <6>;
clocks = <&clks IMX5_CLK_SDMA_GATE>,
<&clks IMX5_CLK_SDMA_GATE>;
<&clks IMX5_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin";

View file

@ -701,7 +701,7 @@
reg = <0x63fb0000 0x4000>;
interrupts = <6>;
clocks = <&clks IMX5_CLK_SDMA_GATE>,
<&clks IMX5_CLK_SDMA_GATE>;
<&clks IMX5_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin";

View file

@ -905,7 +905,7 @@
compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_SDMA>,
clocks = <&clks IMX6QDL_CLK_IPG>,
<&clks IMX6QDL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;

View file

@ -739,7 +739,7 @@
reg = <0x020ec000 0x4000>;
interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SL_CLK_SDMA>,
<&clks IMX6SL_CLK_SDMA>;
<&clks IMX6SL_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
/* imx6sl reuses imx6q sdma firmware */

View file

@ -591,7 +591,7 @@
compatible = "fsl,imx6sll-sdma", "fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SLL_CLK_SDMA>,
clocks = <&clks IMX6SLL_CLK_IPG>,
<&clks IMX6SLL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;

View file

@ -803,7 +803,7 @@
compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_SDMA>,
clocks = <&clks IMX6SX_CLK_IPG>,
<&clks IMX6SX_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;

View file

@ -707,7 +707,7 @@
"fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_SDMA>,
clocks = <&clks IMX6UL_CLK_IPG>,
<&clks IMX6UL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;

View file

@ -1050,8 +1050,8 @@
compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma";
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_SDMA_CORE_CLK>,
<&clks IMX7D_AHB_CHANNEL_ROOT_CLK>;
clocks = <&clks IMX7D_IPG_ROOT_CLK>,
<&clks IMX7D_SDMA_CORE_CLK>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";

View file

@ -6,6 +6,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
#define NR_IPI 7
typedef struct {

View file

@ -76,6 +76,10 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
/*
* CPU_BACKTRACE is special and not included in NR_IPI
* or tracable with trace_ipi_*
*/
IPI_CPU_BACKTRACE,
/*
* SGI8-15 can be reserved by secure firmware, and thus may
@ -818,16 +822,7 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
/*
* Generate the backtrace directly if we are running in a calling
* context that is not preemptible by the backtrace IPI. Note
* that nmi_cpu_backtrace() automatically removes the current cpu
* from mask.
*/
if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
nmi_cpu_backtrace(NULL);
smp_cross_call_common(mask, IPI_CPU_BACKTRACE);
__smp_cross_call(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)

View file

@ -11,6 +11,7 @@ CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o

View file

@ -434,8 +434,27 @@ early_wakeup:
static void exynos5420_prepare_pm_resume(void)
{
unsigned int mpidr, cluster;
mpidr = read_cpuid_mpidr();
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
WARN_ON(mcpm_cpu_powered_up());
if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) {
/*
* When system is resumed on the LITTLE/KFC core (cluster 1),
* the DSCR is not properly updated until the power is turned
* on also for the cluster 0. Enable it for a while to
* propagate the SPNIDEN and SPIDEN signals from Secure JTAG
* block and avoid undefined instruction issue on CP14 reset.
*/
pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
EXYNOS_COMMON_CONFIGURATION(0));
pmu_raw_writel(0,
EXYNOS_COMMON_CONFIGURATION(0));
}
}
static void exynos5420_pm_resume(void)

View file

@ -51,10 +51,12 @@ static int amx3_common_init(void)
/* CEFUSE domain can be turned off post bootup */
cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
if (cefuse_pwrdm)
omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
else
if (!cefuse_pwrdm)
pr_err("PM: Failed to get cefuse_pwrdm\n");
else if (omap_type() != OMAP2_DEVICE_TYPE_GP)
pr_info("PM: Leaving EFUSE power domain active\n");
else
omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
return 0;
}

View file

@ -71,7 +71,6 @@ CONFIG_ARM_SCMI_PROTOCOL=y
CONFIG_ARM_SCPI_PROTOCOL=y
# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_LTO_CLANG=y
CONFIG_CFI_CLANG=y
CONFIG_MODULES=y

View file

@ -49,7 +49,7 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
asmlinkage long sys_ni_syscall(void);
SYSCALL_DEFINE0(ni_syscall)
asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused)
{
return sys_ni_syscall();
}
@ -57,7 +57,7 @@ SYSCALL_DEFINE0(ni_syscall)
/*
* Wrappers to pass the pt_regs argument.
*/
#define sys_personality sys_arm64_personality
#define __arm64_sys_personality __arm64_sys_arm64_personality
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);

View file

@ -133,13 +133,6 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
}
asmlinkage long sys_ni_syscall(void);
COMPAT_SYSCALL_DEFINE0(ni_syscall)
{
return sys_ni_syscall();
}
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd32.h>

View file

@ -15,6 +15,7 @@ KVM=../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o
obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o

View file

@ -1371,13 +1371,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
int __init arch_ioremap_pud_supported(void)
{
/* only 4k granule supports level 1 block mappings */
return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
/*
* Only 4k granule supports level 1 block mappings.
* SW table walks can't handle removal of intermediate entries.
*/
return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
!IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}
int __init arch_ioremap_pmd_supported(void)
{
return 1;
/* See arch_ioremap_pud_supported() */
return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)

View file

@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)
return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
}
EXPORT_SYMBOL(paddr_to_nid);
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
/*

View file

@ -211,6 +211,12 @@ const char *get_system_type(void)
return ath79_sys_type;
}
int get_c0_perfcount_int(void)
{
return ATH79_MISC_IRQ(5);
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;

View file

@ -41,7 +41,19 @@ char *mips_get_machine_name(void)
#ifdef CONFIG_USE_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
return add_memory_region(base, size, BOOT_MEM_RAM);
if (base >= PHYS_ADDR_MAX) {
pr_warn("Trying to add an invalid memory region, skipped\n");
return;
}
/* Truncate the passed memory region instead of type casting */
if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) {
pr_warn("Truncate memory region %llx @ %llx to size %llx\n",
size, base, PHYS_ADDR_MAX - base);
size = PHYS_ADDR_MAX - base;
}
add_memory_region(base, size, BOOT_MEM_RAM);
}
int __init early_init_dt_reserve_memory_arch(phys_addr_t base,

View file

@ -1099,6 +1099,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPU_ID:
r = KVM_MAX_VCPU_ID;
break;
case KVM_CAP_MIPS_FPU:
/* We don't handle systems with inconsistent cpu_has_fpu */
r = !!raw_cpu_has_fpu;

View file

@ -203,6 +203,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
int __virt_addr_valid(const volatile void *kaddr)
{
unsigned long vaddr = (unsigned long)vaddr;
if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
return 0;
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
}
EXPORT_SYMBOL_GPL(__virt_addr_valid);

View file

@ -6,3 +6,4 @@ cflags-$(CONFIG_MACH_PISTACHIO) += \
-I$(srctree)/arch/mips/include/asm/mach-pistachio
load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
all-$(CONFIG_MACH_PISTACHIO) := uImage.gz

View file

@ -299,6 +299,7 @@ struct kvm_arch {
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
struct list_head rtas_tokens;
struct mutex rtas_token_lock;
DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#endif
#ifdef CONFIG_KVM_MPIC

View file

@ -563,8 +563,6 @@ static int nvram_pstore_init(void)
nvram_pstore_info.buf = oops_data;
nvram_pstore_info.bufsize = oops_data_sz;
spin_lock_init(&nvram_pstore_info.buf_lock);
rc = pstore_register(&nvram_pstore_info);
if (rc && (rc != -EPERM))
/* Print error only when pstore.backend == nvram */

View file

@ -840,6 +840,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
#ifdef CONFIG_PPC64
INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
mutex_init(&kvm->arch.rtas_token_lock);
#endif
return kvm->arch.kvm_ops->init_vm(kvm);

View file

@ -426,12 +426,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
struct kvm_vcpu *ret;
mutex_lock(&kvm->lock);
ret = kvm_get_vcpu_by_id(kvm, id);
mutex_unlock(&kvm->lock);
return ret;
return kvm_get_vcpu_by_id(kvm, id);
}
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
@ -1309,7 +1304,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask;
mutex_lock(&kvm->lock);
spin_lock(&vc->lock);
/*
* If ILE (interrupt little-endian) has changed, update the
@ -1349,7 +1343,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask &= 0xFFFFFFFF;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
mutex_unlock(&kvm->lock);
}
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,

View file

@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
{
struct rtas_token_definition *d, *tmp;
lockdep_assert_held(&kvm->lock);
lockdep_assert_held(&kvm->arch.rtas_token_lock);
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
if (rtas_name_matches(d->handler->name, name)) {
@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
bool found;
int i;
lockdep_assert_held(&kvm->lock);
lockdep_assert_held(&kvm->arch.rtas_token_lock);
list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
if (d->token == token)
@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
if (copy_from_user(&args, argp, sizeof(args)))
return -EFAULT;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.rtas_token_lock);
if (args.token)
rc = rtas_token_define(kvm, args.name, args.token);
else
rc = rtas_token_undefine(kvm, args.name);
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.rtas_token_lock);
return rc;
}
@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
orig_rets = args.rets;
args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
rc = -ENOENT;
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
}
}
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
if (rc == 0) {
args.rets = orig_rets;
@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
{
struct rtas_token_definition *d, *tmp;
lockdep_assert_held(&kvm->lock);
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
list_del(&d->list);
kfree(d);

View file

@ -1723,7 +1723,6 @@ static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
{
xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
xive_native_configure_irq(hw_num, 0, MASKED, 0);
xive_cleanup_irq_data(xd);
}
static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
@ -1737,9 +1736,10 @@ static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
continue;
kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
xive_cleanup_irq_data(&state->ipi_data);
xive_native_free_irq(state->ipi_number);
/* Pass-through, cleanup too */
/* Pass-through, cleanup too but keep IRQ hw data */
if (state->pt_number)
kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);

View file

@ -632,6 +632,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPU_ID:
r = KVM_MAX_VCPU_ID;
break;
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_PPC_GET_SMMU_INFO:
r = 1;

View file

@ -1827,6 +1827,7 @@ static int power_pmu_event_init(struct perf_event *event)
int n;
int err;
struct cpu_hw_events *cpuhw;
u64 bhrb_filter;
if (!ppmu)
return -ENOENT;
@ -1932,13 +1933,14 @@ static int power_pmu_event_init(struct perf_event *event)
err = power_check_constraints(cpuhw, events, cflags, n + 1);
if (has_branch_stack(event)) {
cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
if (cpuhw->bhrb_filter == -1) {
if (bhrb_filter == -1) {
put_cpu_var(cpu_hw_events);
return -EOPNOTSUPP;
}
cpuhw->bhrb_filter = bhrb_filter;
}
put_cpu_var(cpu_hw_events);

View file

@ -29,6 +29,7 @@ enum {
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
/*
* Raw event encoding for PowerISA v2.07 (Power8):
@ -243,6 +244,8 @@ static u64 power8_bhrb_filter_map(u64 branch_sample_type)
static void power8_config_bhrb(u64 pmu_bhrb_filter)
{
pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}

View file

@ -100,6 +100,7 @@ enum {
#define POWER9_MMCRA_IFM1 0x0000000040000000UL
#define POWER9_MMCRA_IFM2 0x0000000080000000UL
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
/* Nasty Power9 specific hack */
#define PVR_POWER9_CUMULUS 0x00002000
@ -308,6 +309,8 @@ static u64 power9_bhrb_filter_map(u64 branch_sample_type)
static void power9_config_bhrb(u64 pmu_bhrb_filter)
{
pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK;
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}

View file

@ -161,6 +161,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
struct imc_pmu *pmu_ptr;
u32 offset;
/* Return for unknown domain */
if (domain < 0)
return -EINVAL;
/* memory for pmu */
pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
if (!pmu_ptr)

View file

@ -27,14 +27,14 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/fips.h>
#include <linux/string.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
kma_functions;
@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
unsigned int n, nbytes;
int ret, locked;
locked = spin_trylock(&ctrblk_lock);
locked = mutex_trylock(&ctrblk_lock);
ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (locked)
spin_unlock(&ctrblk_lock);
mutex_unlock(&ctrblk_lock);
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
@ -826,19 +826,45 @@ static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
return 0;
}
static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
unsigned int len)
static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
unsigned int len)
{
memset(gw, 0, sizeof(*gw));
gw->walk_bytes_remain = len;
scatterwalk_start(&gw->walk, sg);
}
static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
{
struct scatterlist *nextsg;
gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
while (!gw->walk_bytes) {
nextsg = sg_next(gw->walk.sg);
if (!nextsg)
return 0;
scatterwalk_start(&gw->walk, nextsg);
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
gw->walk_bytes_remain);
}
gw->walk_ptr = scatterwalk_map(&gw->walk);
return gw->walk_bytes;
}
static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
unsigned int nbytes)
{
gw->walk_bytes_remain -= nbytes;
scatterwalk_unmap(&gw->walk);
scatterwalk_advance(&gw->walk, nbytes);
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
gw->walk_ptr = NULL;
}
static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
{
int n;
/* minbytesneeded <= AES_BLOCK_SIZE */
if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
gw->ptr = gw->buf;
gw->nbytes = gw->buf_bytes;
@ -851,13 +877,11 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
goto out;
}
gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
if (!gw->walk_bytes) {
scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
gw->walk_bytes_remain);
if (!_gcm_sg_clamp_and_map(gw)) {
gw->ptr = NULL;
gw->nbytes = 0;
goto out;
}
gw->walk_ptr = scatterwalk_map(&gw->walk);
if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
gw->ptr = gw->walk_ptr;
@ -869,51 +893,90 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
gw->buf_bytes += n;
gw->walk_bytes_remain -= n;
scatterwalk_unmap(&gw->walk);
scatterwalk_advance(&gw->walk, n);
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
_gcm_sg_unmap_and_advance(gw, n);
if (gw->buf_bytes >= minbytesneeded) {
gw->ptr = gw->buf;
gw->nbytes = gw->buf_bytes;
goto out;
}
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
gw->walk_bytes_remain);
if (!gw->walk_bytes) {
scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
gw->walk_bytes_remain);
if (!_gcm_sg_clamp_and_map(gw)) {
gw->ptr = NULL;
gw->nbytes = 0;
goto out;
}
gw->walk_ptr = scatterwalk_map(&gw->walk);
}
out:
return gw->nbytes;
}
static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
{
int n;
if (gw->walk_bytes_remain == 0) {
gw->ptr = NULL;
gw->nbytes = 0;
goto out;
}
if (!_gcm_sg_clamp_and_map(gw)) {
gw->ptr = NULL;
gw->nbytes = 0;
goto out;
}
if (gw->walk_bytes >= minbytesneeded) {
gw->ptr = gw->walk_ptr;
gw->nbytes = gw->walk_bytes;
goto out;
}
scatterwalk_unmap(&gw->walk);
gw->walk_ptr = NULL;
gw->ptr = gw->buf;
gw->nbytes = sizeof(gw->buf);
out:
return gw->nbytes;
}
static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
{
if (gw->ptr == NULL)
return;
return 0;
if (gw->ptr == gw->buf) {
n = gw->buf_bytes - bytesdone;
int n = gw->buf_bytes - bytesdone;
if (n > 0) {
memmove(gw->buf, gw->buf + bytesdone, n);
gw->buf_bytes -= n;
gw->buf_bytes = n;
} else
gw->buf_bytes = 0;
} else {
gw->walk_bytes_remain -= bytesdone;
scatterwalk_unmap(&gw->walk);
scatterwalk_advance(&gw->walk, bytesdone);
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
}
} else
_gcm_sg_unmap_and_advance(gw, bytesdone);
return bytesdone;
}
static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
{
int i, n;
if (gw->ptr == NULL)
return 0;
if (gw->ptr == gw->buf) {
for (i = 0; i < bytesdone; i += n) {
if (!_gcm_sg_clamp_and_map(gw))
return i;
n = min(gw->walk_bytes, bytesdone - i);
memcpy(gw->walk_ptr, gw->buf + i, n);
_gcm_sg_unmap_and_advance(gw, n);
}
} else
_gcm_sg_unmap_and_advance(gw, bytesdone);
return bytesdone;
}
static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
@ -926,7 +989,7 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
unsigned int pclen = req->cryptlen;
int ret = 0;
unsigned int len, in_bytes, out_bytes,
unsigned int n, len, in_bytes, out_bytes,
min_bytes, bytes, aad_bytes, pc_bytes;
struct gcm_sg_walk gw_in, gw_out;
u8 tag[GHASH_DIGEST_SIZE];
@ -963,14 +1026,14 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
*(u32 *)(param.j0 + ivsize) = 1;
memcpy(param.k, ctx->key, ctx->key_len);
gcm_sg_walk_start(&gw_in, req->src, len);
gcm_sg_walk_start(&gw_out, req->dst, len);
gcm_walk_start(&gw_in, req->src, len);
gcm_walk_start(&gw_out, req->dst, len);
do {
min_bytes = min_t(unsigned int,
aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
bytes = min(in_bytes, out_bytes);
if (aadlen + pclen <= bytes) {
@ -997,8 +1060,11 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
gw_in.ptr + aad_bytes, pc_bytes,
gw_in.ptr, aad_bytes);
gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
n = aad_bytes + pc_bytes;
if (gcm_in_walk_done(&gw_in, n) != n)
return -ENOMEM;
if (gcm_out_walk_done(&gw_out, n) != n)
return -ENOMEM;
aadlen -= aad_bytes;
pclen -= pc_bytes;
} while (aadlen + pclen > 0);

View file

@ -14,6 +14,7 @@
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/fips.h>
#include <linux/mutex.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <asm/cpacf.h>
@ -21,7 +22,7 @@
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
@ -387,7 +388,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
unsigned int n, nbytes;
int ret, locked;
locked = spin_trylock(&ctrblk_lock);
locked = mutex_trylock(&ctrblk_lock);
ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
@ -404,7 +405,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (locked)
spin_unlock(&ctrblk_lock);
mutex_unlock(&ctrblk_lock);
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,

View file

@ -56,8 +56,10 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n);
unsigned long __must_check
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
#ifndef CONFIG_KASAN
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
#endif
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES

View file

@ -489,6 +489,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
case KVM_CAP_MAX_VCPU_ID:
r = KVM_S390_BSCA_CPU_SLOTS;
if (!kvm_s390_use_sca_entries())
r = KVM_MAX_VCPUS;
@ -4155,21 +4156,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int rc;
int rc = 0;
/* If the basics of the memslot do not change, we do not want
* to update the gmap. Every update causes several unnecessary
* segment translation exceptions. This is usually handled just
* fine by the normal fault handler + gmap, but it will also
* cause faults on the prefix page of running guest CPUs.
*/
if (old->userspace_addr == mem->userspace_addr &&
old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
old->npages * PAGE_SIZE == mem->memory_size)
return;
rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
mem->guest_phys_addr, mem->memory_size);
switch (change) {
case KVM_MR_DELETE:
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
old->npages * PAGE_SIZE);
break;
case KVM_MR_MOVE:
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
old->npages * PAGE_SIZE);
if (rc)
break;
/* FALLTHROUGH */
case KVM_MR_CREATE:
rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
mem->guest_phys_addr, mem->memory_size);
break;
case KVM_MR_FLAGS_ONLY:
break;
default:
WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
}
if (rc)
pr_warn("failed to commit memory region\n");
return;

View file

@ -107,7 +107,6 @@ void bust_spinlocks(int yes)
/*
* Find out which address space caused the exception.
* Access register mode is impossible, ignore space == 3.
*/
static inline enum fault_type get_fault_type(struct pt_regs *regs)
{
@ -132,6 +131,10 @@ static inline enum fault_type get_fault_type(struct pt_regs *regs)
}
return VDSO_FAULT;
}
if (trans_exc_code == 1) {
/* access register mode, not used in the kernel */
return USER_FAULT;
}
/* home space exception -> access via kernel ASCE */
return KERNEL_FAULT;
}

View file

@ -587,7 +587,7 @@ xcall_flush_tlb_kernel_range: /* 44 insns */
sub %g7, %g1, %g3
srlx %g3, 18, %g2
brnz,pn %g2, 2f
add %g2, 1, %g2
sethi %hi(PAGE_SIZE), %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
@ -751,7 +751,7 @@ __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
sub %g7, %g1, %g3
srlx %g3, 18, %g2
brnz,pn %g2, 2f
add %g2, 1, %g2
sethi %hi(PAGE_SIZE), %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP

View file

@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt)
static struct clock_event_device timer_clockevent = {
.name = "posix-timer",
.rating = 250,
.cpumask = cpu_all_mask,
.cpumask = cpu_possible_mask,
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = itimer_shutdown,

View file

@ -308,6 +308,8 @@ PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
archprepare: checkbin
checkbin:
ifdef CONFIG_RETPOLINE
ifeq ($(RETPOLINE_CFLAGS),)
@echo "You are building kernel with non-retpoline compiler." >&2

View file

@ -68,7 +68,6 @@ CONFIG_IA32_EMULATION=y
# CONFIG_FIRMWARE_MEMMAP is not set
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_LTO_CLANG=y
CONFIG_CFI_CLANG=y
CONFIG_REFCOUNT_FULL=y

View file

@ -3072,7 +3072,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
return ret;
if (event->attr.precise_ip) {
if (!(event->attr.freq || event->attr.wakeup_events)) {
if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type &
~intel_pmu_large_pebs_flags(event)))

View file

@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
EVENT_CONSTRAINT_END
};
@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
struct event_constraint intel_slm_pebs_event_constraints[] = {
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
@ -726,7 +726,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END
};
@ -743,7 +743,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END
};
@ -752,7 +752,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@ -767,9 +767,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@ -783,9 +783,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
@ -806,9 +806,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
@ -829,9 +829,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
struct event_constraint intel_skl_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */

View file

@ -819,8 +819,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_ZEN);
/* Fix erratum 1076: CPB feature bit not being set in CPUID. */
if (!cpu_has(c, X86_FEATURE_CPB))
/*
* Fix erratum 1076: CPB feature bit not being set in CPUID.
* Always set it, except when running under a hypervisor.
*/
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
}

View file

@ -371,6 +371,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
struct list_head *head;
struct rdtgroup *entry;
if (!is_mbm_local_enabled())
return;
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
closid = rgrp->closid;
rmid = rgrp->mon.rmid;

View file

@ -873,7 +873,7 @@ int __init microcode_init(void)
goto out_ucode_group;
register_syscore_ops(&mc_syscore_ops);
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep);
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);

View file

@ -752,18 +752,21 @@ union ftrace_op_code_union {
} __attribute__((packed));
};
#define RET_SIZE 1
static unsigned long
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
{
unsigned const char *jmp;
unsigned long start_offset;
unsigned long end_offset;
unsigned long op_offset;
unsigned long offset;
unsigned long npages;
unsigned long size;
unsigned long ip;
unsigned long retq;
unsigned long *ptr;
void *trampoline;
void *ip;
/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
union ftrace_op_code_union op_ptr;
@ -783,27 +786,28 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/*
* Allocate enough size to store the ftrace_caller code,
* the jmp to ftrace_epilogue, as well as the address of
* the ftrace_ops this trampoline is used for.
* the iret , as well as the address of the ftrace_ops this
* trampoline is used for.
*/
trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
if (!trampoline)
return 0;
*tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
*tramp_size = size + RET_SIZE + sizeof(void *);
npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
/* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
if (WARN_ON(ret < 0)) {
tramp_free(trampoline, *tramp_size);
return 0;
}
if (WARN_ON(ret < 0))
goto fail;
ip = (unsigned long)trampoline + size;
ip = trampoline + size;
/* The trampoline ends with a jmp to ftrace_epilogue */
jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
/* The trampoline ends with ret(q) */
retq = (unsigned long)ftrace_stub;
ret = probe_kernel_read(ip, (void *)retq, RET_SIZE);
if (WARN_ON(ret < 0))
goto fail;
/*
* The address of the ftrace_ops that is used for this trampoline
@ -813,17 +817,15 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
* the global function_trace_op variable.
*/
ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
ptr = (unsigned long *)(trampoline + size + RET_SIZE);
*ptr = (unsigned long)ops;
op_offset -= start_offset;
memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
/* Are we pointing to the reference? */
if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
tramp_free(trampoline, *tramp_size);
return 0;
}
if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
goto fail;
/* Load the contents of ptr into the callback parameter */
offset = (unsigned long)ptr;
@ -837,7 +839,16 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/* ALLOC_TRAMP flags lets us know we created it */
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
/*
* Module allocation needs to be completed by making the page
* executable. The page is still writable, which is a security hazard,
* but anyhow ftrace breaks W^X completely.
*/
set_memory_x((unsigned long)trampoline, npages);
return (unsigned long)trampoline;
fail:
tramp_free(trampoline, *tramp_size);
return 0;
}
static unsigned long calc_trampoline_call_offset(bool save_regs)

View file

@ -171,9 +171,6 @@ GLOBAL(ftrace_call)
restore_mcount_regs
/*
* The copied trampoline must call ftrace_epilogue as it
* still may need to call the function graph tracer.
*
* The code up to this label is copied into trampolines so
* think twice before adding any new code or changing the
* layout here.
@ -185,7 +182,10 @@ GLOBAL(ftrace_graph_call)
jmp ftrace_stub
#endif
/* This is weak to keep gas from relaxing the jumps */
/*
* This is weak to keep gas from relaxing the jumps.
* It is also used to copy the retq for trampolines.
*/
WEAK(ftrace_stub)
retq
ENDPROC(ftrace_caller)

View file

@ -431,8 +431,20 @@ void *alloc_insn_page(void)
void *page;
page = module_alloc(PAGE_SIZE);
if (page)
set_memory_ro((unsigned long)page & PAGE_MASK, 1);
if (!page)
return NULL;
/*
* First make the page read-only, and only then make it executable to
* prevent it from being W+X in between.
*/
set_memory_ro((unsigned long)page, 1);
/*
* TODO: Once additional kernel code protection mechanisms are set, ensure
* that the page was not maliciously altered and it is still zeroed.
*/
set_memory_x((unsigned long)page, 1);
return page;
}
@ -440,8 +452,12 @@ void *alloc_insn_page(void)
/* Recover page to RW mode before releasing it */
void free_insn_page(void *page)
{
set_memory_nx((unsigned long)page & PAGE_MASK, 1);
set_memory_rw((unsigned long)page & PAGE_MASK, 1);
/*
* First make the page non-executable, and only then make it writable to
* prevent it from being W+X in between.
*/
set_memory_nx((unsigned long)page, 1);
set_memory_rw((unsigned long)page, 1);
module_memfree(page);
}

View file

@ -151,10 +151,10 @@ SECTIONS
*(.text.__x86.indirect_thunk)
__indirect_thunk_end = .;
#endif
} :text = 0x9090
/* End of text section */
_etext = .;
/* End of text section */
_etext = .;
} :text = 0x9090
NOTES :text :note

View file

@ -282,20 +282,16 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
bool fast_mode = idx & (1u << 31);
struct kvm_pmc *pmc;
u64 ctr_val;
u64 mask = fast_mode ? ~0u : ~0ull;
if (is_vmware_backdoor_pmc(idx))
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx);
pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
if (!pmc)
return 1;
ctr_val = pmc_read_counter(pmc);
if (fast_mode)
ctr_val = (u32)ctr_val;
*data = ctr_val;
*data = pmc_read_counter(pmc) & mask;
return 0;
}

View file

@ -25,7 +25,8 @@ struct kvm_pmu_ops {
unsigned (*find_fixed_event)(int idx);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
u64 *mask);
int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);

View file

@ -186,7 +186,7 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
}
/* idx is the ECX register of RDPMC instruction */
static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx)
static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *counters;

View file

@ -126,7 +126,7 @@ static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
}
static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
unsigned idx)
unsigned idx, u64 *mask)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
@ -138,6 +138,7 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
if (fixed && idx >= pmu->nr_arch_fixed_counters)
return NULL;
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
return &counters[idx];
}
@ -183,9 +184,13 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
*data = pmu->global_ovf_ctrl;
return 0;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, msr))) {
*data = pmc_read_counter(pmc);
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
u64 val = pmc_read_counter(pmc);
*data = val & pmu->counter_bitmask[KVM_PMC_GP];
return 0;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
u64 val = pmc_read_counter(pmc);
*data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
*data = pmc->eventsel;
@ -235,11 +240,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, msr))) {
if (!msr_info->host_initiated)
data = (s64)(s32)data;
pmc->counter += data - pmc_read_counter(pmc);
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
if (msr_info->host_initiated)
pmc->counter = data;
else
pmc->counter = (s32)data;
return 0;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc->counter = data;
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)

View file

@ -2987,6 +2987,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPU_ID:
r = KVM_MAX_VCPU_ID;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;

View file

@ -555,7 +555,8 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
}
/**
* get_desc() - Obtain pointer to a segment descriptor
* get_desc() - Obtain contents of a segment descriptor
* @out: Segment descriptor contents on success
* @sel: Segment selector
*
* Given a segment selector, obtain a pointer to the segment descriptor.
@ -563,18 +564,18 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
*
* Returns:
*
* Pointer to segment descriptor on success.
* True on success, false on failure.
*
* NULL on error.
*/
static struct desc_struct *get_desc(unsigned short sel)
static bool get_desc(struct desc_struct *out, unsigned short sel)
{
struct desc_ptr gdt_desc = {0, 0};
unsigned long desc_base;
#ifdef CONFIG_MODIFY_LDT_SYSCALL
if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) {
struct desc_struct *desc = NULL;
bool success = false;
struct ldt_struct *ldt;
/* Bits [15:3] contain the index of the desired entry. */
@ -582,12 +583,14 @@ static struct desc_struct *get_desc(unsigned short sel)
mutex_lock(&current->active_mm->context.lock);
ldt = current->active_mm->context.ldt;
if (ldt && sel < ldt->nr_entries)
desc = &ldt->entries[sel];
if (ldt && sel < ldt->nr_entries) {
*out = ldt->entries[sel];
success = true;
}
mutex_unlock(&current->active_mm->context.lock);
return desc;
return success;
}
#endif
native_store_gdt(&gdt_desc);
@ -602,9 +605,10 @@ static struct desc_struct *get_desc(unsigned short sel)
desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK);
if (desc_base > gdt_desc.size)
return NULL;
return false;
return (struct desc_struct *)(gdt_desc.address + desc_base);
*out = *(struct desc_struct *)(gdt_desc.address + desc_base);
return true;
}
/**
@ -626,7 +630,7 @@ static struct desc_struct *get_desc(unsigned short sel)
*/
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
{
struct desc_struct *desc;
struct desc_struct desc;
short sel;
sel = get_segment_selector(regs, seg_reg_idx);
@ -664,11 +668,10 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
if (!sel)
return -1L;
desc = get_desc(sel);
if (!desc)
if (!get_desc(&desc, sel))
return -1L;
return get_desc_base(desc);
return get_desc_base(&desc);
}
/**
@ -690,7 +693,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
*/
static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
{
struct desc_struct *desc;
struct desc_struct desc;
unsigned long limit;
short sel;
@ -704,8 +707,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
if (!sel)
return 0;
desc = get_desc(sel);
if (!desc)
if (!get_desc(&desc, sel))
return 0;
/*
@ -714,8 +716,8 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
* not tested when checking the segment limits. In practice,
* this means that the segment ends in (limit << 12) + 0xfff.
*/
limit = get_desc_limit(desc);
if (desc->g)
limit = get_desc_limit(&desc);
if (desc.g)
limit = (limit << 12) + 0xfff;
return limit;
@ -739,7 +741,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
*/
int insn_get_code_seg_params(struct pt_regs *regs)
{
struct desc_struct *desc;
struct desc_struct desc;
short sel;
if (v8086_mode(regs))
@ -750,8 +752,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
if (sel < 0)
return sel;
desc = get_desc(sel);
if (!desc)
if (!get_desc(&desc, sel))
return -EINVAL;
/*
@ -759,10 +760,10 @@ int insn_get_code_seg_params(struct pt_regs *regs)
* determines whether a segment contains data or code. If this is a data
* segment, return error.
*/
if (!(desc->type & BIT(3)))
if (!(desc.type & BIT(3)))
return -EINVAL;
switch ((desc->l << 1) | desc->d) {
switch ((desc.l << 1) | desc.d) {
case 0: /*
* Legacy mode. CS.L=0, CS.D=0. Address and operand size are
* both 16-bit.

View file

@ -198,7 +198,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
if (!pgtable_l5_enabled())
return (p4d_t *)pgd;
p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
p4d = pgd_val(*pgd) & PTE_PFN_MASK;
p4d += __START_KERNEL_map - phys_base;
return (p4d_t *)p4d + p4d_index(addr);
}

View file

@ -51,7 +51,7 @@ static __initdata struct kaslr_memory_region {
} kaslr_regions[] = {
{ &page_offset_base, 0 },
{ &vmalloc_base, 0 },
{ &vmemmap_base, 1 },
{ &vmemmap_base, 0 },
};
/* Get size in bytes used by the memory region */
@ -77,6 +77,7 @@ void __init kernel_randomize_memory(void)
unsigned long rand, memory_tb;
struct rnd_state rand_state;
unsigned long remain_entropy;
unsigned long vmemmap_size;
vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
vaddr = vaddr_start;
@ -108,6 +109,14 @@ void __init kernel_randomize_memory(void)
if (memory_tb < kaslr_regions[0].size_tb)
kaslr_regions[0].size_tb = memory_tb;
/*
* Calculate the vmemmap region size in TBs, aligned to a TB
* boundary.
*/
vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
sizeof(struct page);
kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
/* Calculate entropy available between regions */
remain_entropy = vaddr_end - vaddr_start;
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)

View file

@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = {
void __init pcibios_irq_init(void)
{
struct irq_routing_table *rtable = NULL;
DBG(KERN_DEBUG "PCI: IRQ init\n");
if (raw_pci_ops == NULL)
@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void)
pirq_table = pirq_find_routing_table();
#ifdef CONFIG_PCI_BIOS
if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
pirq_table = pcibios_get_irq_routing_table();
rtable = pirq_table;
}
#endif
if (pirq_table) {
pirq_peer_trick();
@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void)
* If we're using the I/O APIC, avoid using the PCI IRQ
* routing table
*/
if (io_apic_assign_pci_irqs)
if (io_apic_assign_pci_irqs) {
kfree(rtable);
pirq_table = NULL;
}
}
x86_init.pci.fixup_irqs();

View file

@ -299,7 +299,17 @@ int hibernate_resume_nonboot_cpu_disable(void)
* address in its instruction pointer may not be possible to resolve
* any more at that point (the page tables used by it previously may
* have been overwritten by hibernate image data).
*
* First, make sure that we wake up all the potentially disabled SMT
* threads which have been initially brought up and then put into
* mwait/cpuidle sleep.
* Those will be put to proper (not interfering with hibernation
* resume) sleep afterwards, and the resumed kernel will decide itself
* what to do with them.
*/
ret = cpuhp_smt_enable();
if (ret)
return ret;
smp_ops.play_dead = resume_play_dead;
ret = disable_nonboot_cpus();
smp_ops.play_dead = play_dead;

View file

@ -13,6 +13,7 @@
#include <linux/suspend.h>
#include <linux/scatterlist.h>
#include <linux/kdebug.h>
#include <linux/cpu.h>
#include <crypto/hash.h>
@ -363,3 +364,35 @@ int arch_hibernation_header_restore(void *addr)
return 0;
}
int arch_resume_nosmt(void)
{
int ret = 0;
/*
* We reached this while coming out of hibernation. This means
* that SMT siblings are sleeping in hlt, as mwait is not safe
* against control transition during resume (see comment in
* hibernate_resume_nonboot_cpu_disable()).
*
* If the resumed kernel has SMT disabled, we have to take all the
* SMT siblings out of hlt, and offline them again so that they
* end up in mwait proper.
*
* Called with hotplug disabled.
*/
cpu_hotplug_enable();
if (cpu_smt_control == CPU_SMT_DISABLED ||
cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
enum cpuhp_smt_control old = cpu_smt_control;
ret = cpuhp_smt_enable();
if (ret)
goto out;
ret = cpuhp_smt_disable(old);
if (ret)
goto out;
}
out:
cpu_hotplug_disable();
return ret;
}

View file

@ -2509,6 +2509,8 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
bfq_symmetric_scenario(bfqd))
sl = min_t(u64, sl, BFQ_MIN_TT);
else if (bfqq->wr_coeff > 1)
sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
bfqd->last_idling_start = ktime_get();
hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),

View file

@ -411,7 +411,6 @@ void blk_sync_queue(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i;
cancel_delayed_work_sync(&q->requeue_work);
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
} else {

View file

@ -2465,6 +2465,8 @@ void blk_mq_release(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
unsigned int i;
cancel_delayed_work_sync(&q->requeue_work);
/* hctx kobj stays in hctx */
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx)

View file

@ -1176,7 +1176,6 @@ static int __init erst_init(void)
"Error Record Serialization Table (ERST) support is initialized.\n");
buf = kmalloc(erst_erange.size, GFP_KERNEL);
spin_lock_init(&erst_info.buf_lock);
if (buf) {
erst_info.buf = buf + sizeof(struct cper_pstore_record);
erst_info.bufsize = erst_erange.size -

View file

@ -948,8 +948,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
u32 sys_target = acpi_target_system_state();
int ret, state;
if (!pm_runtime_suspended(dev) || !adev ||
device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid &&
device_may_wakeup(dev) != !!adev->wakeup.prepare_count))
return true;
if (sys_target == ACPI_STATE_S0)

View file

@ -3231,6 +3231,7 @@ static void binder_transaction(struct binder_proc *proc,
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
security_task_getsecid(proc->tsk, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
@ -3240,7 +3241,15 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
added_size = ALIGN(secctx_sz, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
/* integer overflow of extra_buffers_size */
return_error = BR_FAILED_REPLY;
return_error_param = EINVAL;
return_error_line = __LINE__;
goto err_bad_extra_size;
}
}
trace_binder_transaction(reply, t, target_node);
@ -3589,6 +3598,7 @@ err_copy_data_failed:
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
err_bad_extra_size:
if (secctx)
security_release_secctx(secctx, secctx_sz);
err_get_secctx_failed:

View file

@ -4476,9 +4476,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
the ST disks also have LPM issues */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
ATA_HORKAGE_NOLPM, },
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
ATA_HORKAGE_NOLPM, },
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132

View file

@ -1310,11 +1310,11 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
}
free_shadow:
kfree(rinfo->shadow[i].grants_used);
kvfree(rinfo->shadow[i].grants_used);
rinfo->shadow[i].grants_used = NULL;
kfree(rinfo->shadow[i].indirect_grants);
kvfree(rinfo->shadow[i].indirect_grants);
rinfo->shadow[i].indirect_grants = NULL;
kfree(rinfo->shadow[i].sg);
kvfree(rinfo->shadow[i].sg);
rinfo->shadow[i].sg = NULL;
}
@ -1353,7 +1353,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
for (i = 0; i < info->nr_rings; i++)
blkif_free_ring(&info->rinfo[i]);
kfree(info->rinfo);
kvfree(info->rinfo);
info->rinfo = NULL;
info->nr_rings = 0;
}
@ -1914,9 +1914,9 @@ static int negotiate_mq(struct blkfront_info *info)
if (!info->nr_rings)
info->nr_rings = 1;
info->rinfo = kcalloc(info->nr_rings,
sizeof(struct blkfront_ring_info),
GFP_KERNEL);
info->rinfo = kvcalloc(info->nr_rings,
sizeof(struct blkfront_ring_info),
GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
info->nr_rings = 0;
@ -2232,17 +2232,17 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
for (i = 0; i < BLK_RING_SIZE(info); i++) {
rinfo->shadow[i].grants_used =
kcalloc(grants,
sizeof(rinfo->shadow[i].grants_used[0]),
GFP_NOIO);
rinfo->shadow[i].sg = kcalloc(psegs,
sizeof(rinfo->shadow[i].sg[0]),
GFP_NOIO);
kvcalloc(grants,
sizeof(rinfo->shadow[i].grants_used[0]),
GFP_NOIO);
rinfo->shadow[i].sg = kvcalloc(psegs,
sizeof(rinfo->shadow[i].sg[0]),
GFP_NOIO);
if (info->max_indirect_segments)
rinfo->shadow[i].indirect_grants =
kcalloc(INDIRECT_GREFS(grants),
sizeof(rinfo->shadow[i].indirect_grants[0]),
GFP_NOIO);
kvcalloc(INDIRECT_GREFS(grants),
sizeof(rinfo->shadow[i].indirect_grants[0]),
GFP_NOIO);
if ((rinfo->shadow[i].grants_used == NULL) ||
(rinfo->shadow[i].sg == NULL) ||
(info->max_indirect_segments &&
@ -2256,11 +2256,11 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
out_of_memory:
for (i = 0; i < BLK_RING_SIZE(info); i++) {
kfree(rinfo->shadow[i].grants_used);
kvfree(rinfo->shadow[i].grants_used);
rinfo->shadow[i].grants_used = NULL;
kfree(rinfo->shadow[i].sg);
kvfree(rinfo->shadow[i].sg);
rinfo->shadow[i].sg = NULL;
kfree(rinfo->shadow[i].indirect_grants);
kvfree(rinfo->shadow[i].indirect_grants);
rinfo->shadow[i].indirect_grants = NULL;
}
if (!list_empty(&rinfo->indirect_pages)) {

View file

@ -835,6 +835,9 @@ static const int rk3288_saved_cru_reg_ids[] = {
RK3288_CLKSEL_CON(10),
RK3288_CLKSEL_CON(33),
RK3288_CLKSEL_CON(37),
/* We turn aclk_dmac1 on for suspend; this will restore it */
RK3288_CLKGATE_CON(10),
};
static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
@ -850,6 +853,14 @@ static int rk3288_clk_suspend(void)
readl_relaxed(rk3288_cru_base + reg_id);
}
/*
* Going into deep sleep (specifically setting PMU_CLR_DMA in
* RK3288_PMU_PWRMODE_CON1) appears to fail unless
* "aclk_dmac1" is on.
*/
writel_relaxed(1 << (12 + 16),
rk3288_cru_base + RK3288_CLKGATE_CON(10));
/*
* Switch PLLs other than DPLL (for SDRAM) to slow mode to
* avoid crashes on resume. The Mask ROM on the system will

View file

@ -137,9 +137,6 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
int ret;
union omap4_timeout timeout = { 0 };
if (!clk->enable_bit)
return 0;
if (clk->clkdm) {
ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
if (ret) {
@ -151,6 +148,9 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
}
}
if (!clk->enable_bit)
return 0;
val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
val &= ~OMAP4_MODULEMODE_MASK;
@ -179,7 +179,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
union omap4_timeout timeout = { 0 };
if (!clk->enable_bit)
return;
goto exit;
val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);

View file

@ -597,7 +597,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
idma64->dma.dev = chip->dev;
idma64->dma.dev = chip->sysdev;
dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
@ -637,6 +637,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
{
struct idma64_chip *chip;
struct device *dev = &pdev->dev;
struct device *sysdev = dev->parent;
struct resource *mem;
int ret;
@ -653,11 +654,12 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret)
return ret;
chip->dev = dev;
chip->sysdev = sysdev;
ret = idma64_probe(chip);
if (ret)

View file

@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
/**
* struct idma64_chip - representation of iDMA 64-bit controller hardware
* @dev: struct device of the DMA controller
* @sysdev: struct device of the physical device that does DMA
* @irq: irq line
* @regs: memory mapped I/O space
* @idma64: struct idma64 that is filed by idma64_probe()
*/
struct idma64_chip {
struct device *dev;
struct device *sysdev;
int irq;
void __iomem *regs;
struct idma64 *idma64;

View file

@ -250,8 +250,8 @@ config EDAC_PND2
micro-server but may appear on others in the future.
config EDAC_MPC85XX
tristate "Freescale MPC83xx / MPC85xx"
depends on FSL_SOC
bool "Freescale MPC83xx / MPC85xx"
depends on FSL_SOC && EDAC=y
help
Support for error detection and correction on the Freescale
MPC8349, MPC8560, MPC8540, MPC8548, T4240

View file

@ -259,8 +259,7 @@ static int efi_pstore_write(struct pstore_record *record)
efi_name[i] = name[i];
ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
!pstore_cannot_block_path(record->reason),
record->size, record->psi->buf);
preemptible(), record->size, record->psi->buf);
if (record->reason == KMSG_DUMP_OOPS)
efivar_run_worker();
@ -369,7 +368,6 @@ static __init int efivars_pstore_init(void)
return -ENOMEM;
efi_pstore_info.bufsize = 1024;
spin_lock_init(&efi_pstore_info.buf_lock);
if (pstore_register(&efi_pstore_info)) {
kfree(efi_pstore_info.buf);

View file

@ -784,6 +784,7 @@ config GPIO_ADP5588
config GPIO_ADP5588_IRQ
bool "Interrupt controller support for ADP5588"
depends on GPIO_ADP5588=y
select GPIOLIB_IRQCHIP
help
Say yes here to enable the adp5588 to be used as an interrupt
controller. It requires the driver to be built in the kernel.

View file

@ -343,6 +343,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
}
}
/*
* Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
* See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
* in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
* are capable waking up the system from off mode.
*/
static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask)
{
u32 no_wake = bank->non_wakeup_gpios;
if (no_wake)
return !!(~no_wake & gpio_mask);
return false;
}
static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
unsigned trigger)
{
@ -374,13 +390,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
}
/* This part needs to be executed always for OMAP{34xx, 44xx} */
if (!bank->regs->irqctrl) {
/* On omap24xx proceed only when valid GPIO bit is set */
if (bank->non_wakeup_gpios) {
if (!(bank->non_wakeup_gpios & gpio_bit))
goto exit;
}
if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) {
/*
* Log the edge gpio and manually trigger the IRQ
* after resume if the input level changes
@ -393,7 +403,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
bank->enabled_non_wakeup_gpios &= ~gpio_bit;
}
exit:
bank->level_mask =
readl_relaxed(bank->base + bank->regs->leveldetect0) |
readl_relaxed(bank->base + bank->regs->leveldetect1);

View file

@ -37,6 +37,7 @@ struct fsl_gpio_soc_data {
struct vf610_gpio_port {
struct gpio_chip gc;
struct irq_chip ic;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@ -66,8 +67,6 @@ struct vf610_gpio_port {
#define PORT_INT_EITHER_EDGE 0xb
#define PORT_INT_LOGIC_ONE 0xc
static struct irq_chip vf610_gpio_irq_chip;
static const struct fsl_gpio_soc_data imx_data = {
.have_paddr = true,
};
@ -243,15 +242,6 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
return 0;
}
static struct irq_chip vf610_gpio_irq_chip = {
.name = "gpio-vf610",
.irq_ack = vf610_gpio_irq_ack,
.irq_mask = vf610_gpio_irq_mask,
.irq_unmask = vf610_gpio_irq_unmask,
.irq_set_type = vf610_gpio_irq_set_type,
.irq_set_wake = vf610_gpio_irq_set_wake,
};
static int vf610_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@ -259,6 +249,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct resource *iores;
struct gpio_chip *gc;
struct irq_chip *ic;
int i;
int ret;
@ -295,6 +286,14 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc->direction_output = vf610_gpio_direction_output;
gc->set = vf610_gpio_set;
ic = &port->ic;
ic->name = "gpio-vf610";
ic->irq_ack = vf610_gpio_irq_ack;
ic->irq_mask = vf610_gpio_irq_mask;
ic->irq_unmask = vf610_gpio_irq_unmask;
ic->irq_set_type = vf610_gpio_irq_set_type;
ic->irq_set_wake = vf610_gpio_irq_set_wake;
ret = gpiochip_add_data(gc, port);
if (ret < 0)
return ret;
@ -306,14 +305,13 @@ static int vf610_gpio_probe(struct platform_device *pdev)
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);
ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
handle_edge_irq, IRQ_TYPE_NONE);
ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add irqchip\n");
gpiochip_remove(gc);
return ret;
}
gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq,
gpiochip_set_chained_irqchip(gc, ic, port->irq,
vf610_gpio_irq_handler);
return 0;

View file

@ -416,8 +416,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
}
}
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if ((adev->flags & AMD_IS_PX) &&
amdgpu_atpx_dgpu_req_power_for_displays()) {
if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev->ddev->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev->ddev);

View file

@ -37,18 +37,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
psp_set_funcs(adev);
return 0;
}
static int psp_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
int ret;
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
@ -67,6 +59,15 @@ static int psp_sw_init(void *handle)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
return 0;
}
static int psp_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
int ret;
ret = psp_init_microcode(psp);
if (ret) {
DRM_ERROR("Failed to load psp firmware!\n");

View file

@ -388,6 +388,10 @@ void dpp1_cnv_setup (
default:
break;
}
/* Set default color space based on format if none is given. */
color_space = input_color_space ? input_color_space : color_space;
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
@ -399,7 +403,7 @@ void dpp1_cnv_setup (
for (i = 0; i < 12; i++)
tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
tbl_entry.color_space = input_color_space;
tbl_entry.color_space = color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
select = INPUT_CSC_SELECT_ICSC;

View file

@ -1890,7 +1890,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
COLOR_SPACE_YCBCR601_LIMITED);
plane_state->color_space);
//set scale and bias registers
build_prescale_params(&bns_params, plane_state);

View file

@ -747,11 +747,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
if (mode->vrefresh <= 24000)
if (drm_mode_vrefresh(mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
else if (mode->vrefresh <= 25000)
else if (drm_mode_vrefresh(mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
else if (mode->vrefresh <= 30000)
else if (drm_mode_vrefresh(mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;

View file

@ -1573,15 +1573,6 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
if (old_plane_state->fb != new_plane_state->fb)
return -EINVAL;
/*
* FIXME: Since prepare_fb and cleanup_fb are always called on
* the new_plane_state for async updates we need to block framebuffer
* changes. This prevents use of a fb that's been cleaned up and
* double cleanups from occuring.
*/
if (old_plane_state->fb != new_plane_state->fb)
return -EINVAL;
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
@ -1612,6 +1603,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check);
* drm_atomic_async_check() succeeds. Async commits are not supposed to swap
* the states like normal sync commits, but just do in-place changes on the
* current state.
*
* TODO: Implement full swap instead of doing in-place changes.
*/
void drm_atomic_helper_async_commit(struct drm_device *dev,
struct drm_atomic_state *state)
@ -1622,6 +1615,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
int i;
for_each_new_plane_in_state(state, plane, plane_state, i) {
struct drm_framebuffer *new_fb = plane_state->fb;
struct drm_framebuffer *old_fb = plane->state->fb;
funcs = plane->helper_private;
funcs->atomic_async_update(plane, plane_state);
@ -1630,11 +1626,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
* plane->state in-place, make sure at least common
* properties have been properly updated.
*/
WARN_ON_ONCE(plane->state->fb != plane_state->fb);
WARN_ON_ONCE(plane->state->fb != new_fb);
WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
/*
* Make sure the FBs have been swapped so that cleanups in the
* new_state performs a cleanup in the old FB.
*/
WARN_ON_ONCE(plane_state->fb != old_fb);
}
}
EXPORT_SYMBOL(drm_atomic_helper_async_commit);

View file

@ -595,6 +595,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
plane = crtc->primary;
/* allow disabling with the primary plane leased */
if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id))
return -EACCES;
mutex_lock(&crtc->dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:

View file

@ -180,6 +180,25 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
/* Valve Index Headset */
{ "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP },
{ "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP },
/* HTC Vive and Vive Pro VR Headsets */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
@ -201,6 +220,12 @@ static const struct edid_quirk {
/* Sony PlayStation VR Headset */
{ "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
/* Sensics VR Headsets */
{ "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP },
/* OSVR HDK and HDK2 VR Headsets */
{ "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP },
};
/*
@ -1565,6 +1590,50 @@ static void connector_bad_edid(struct drm_connector *connector,
}
}
/* Get override or firmware EDID */
static struct edid *drm_get_override_edid(struct drm_connector *connector)
{
struct edid *override = NULL;
if (connector->override_edid)
override = drm_edid_duplicate(connector->edid_blob_ptr->data);
if (!override)
override = drm_load_edid_firmware(connector);
return IS_ERR(override) ? NULL : override;
}
/**
* drm_add_override_edid_modes - add modes from override/firmware EDID
* @connector: connector we're probing
*
* Add modes from the override/firmware EDID, if available. Only to be used from
* drm_helper_probe_single_connector_modes() as a fallback for when DDC probe
* failed during drm_get_edid() and caused the override/firmware EDID to be
* skipped.
*
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_override_edid_modes(struct drm_connector *connector)
{
struct edid *override;
int num_modes = 0;
override = drm_get_override_edid(connector);
if (override) {
drm_connector_update_edid_property(connector, override);
num_modes = drm_add_edid_modes(connector, override);
kfree(override);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
connector->base.id, connector->name, num_modes);
}
return num_modes;
}
EXPORT_SYMBOL(drm_add_override_edid_modes);
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@ -1592,15 +1661,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
{
int i, j = 0, valid_extensions = 0;
u8 *edid, *new;
struct edid *override = NULL;
struct edid *override;
if (connector->override_edid)
override = drm_edid_duplicate(connector->edid_blob_ptr->data);
if (!override)
override = drm_load_edid_firmware(connector);
if (!IS_ERR_OR_NULL(override))
override = drm_get_override_edid(connector);
if (override)
return override;
if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)

View file

@ -940,6 +940,11 @@ retry:
if (ret)
goto out;
if (!drm_lease_held(file_priv, crtc->cursor->base.id)) {
ret = -EACCES;
goto out;
}
ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx);
goto out;
}
@ -1042,6 +1047,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
plane = crtc->primary;
if (!drm_lease_held(file_priv, plane->base.id))
return -EACCES;
if (crtc->funcs->page_flip_target) {
u32 current_vblank;
int r;

View file

@ -479,6 +479,13 @@ retry:
count = (*connector_funcs->get_modes)(connector);
/*
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
* override/firmware EDID.
*/
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_override_edid_modes(connector);
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
count += drm_helper_probe_add_cmdline_mode(connector);

View file

@ -124,6 +124,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
return;
etnaviv_dump_core = false;
mutex_lock(&gpu->mmu->lock);
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
/* We always dump registers, mmu, ring and end marker */
@ -166,6 +168,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
PAGE_KERNEL);
if (!iter.start) {
mutex_unlock(&gpu->mmu->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
}
@ -233,6 +236,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
obj->base.size);
}
mutex_unlock(&gpu->mmu->lock);
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);

Some files were not shown because too many files have changed in this diff Show more