Merge "Merge android-4.19-q.78 (d9e388f) into msm-4.19"

This commit is contained in:
qctecmdr 2019-10-24 19:01:02 -07:00 committed by Gerrit - the friendly Code Review server
commit 3225a4fef0
355 changed files with 2845 additions and 1308 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 76
SUBLEVEL = 78
EXTRAVERSION =
NAME = "People's Front"

View file

@ -1586,8 +1586,9 @@ config ARM_PATCH_IDIV
code to do integer division.
config AEABI
bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \
!CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG
default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG
help
This option allows for the kernel to be compiled using the latest
ARM ABI (aka EABI). This is only useful if you are using a user

View file

@ -437,6 +437,7 @@
regulator-name = "vdd_ldo10";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
};

View file

@ -437,6 +437,7 @@
regulator-name = "vdd_ldo10";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
};

View file

@ -323,6 +323,7 @@
vmmc-supply = <&reg_module_3v3>;
vqmmc-supply = <&reg_DCDC3>;
non-removable;
sdhci-caps-mask = <0x80000000 0x0>;
};
&iomuxc {

View file

@ -43,7 +43,7 @@
<&clks IMX7D_ENET1_TIME_ROOT_CLK>;
assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
assigned-clock-rates = <0>, <100000000>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
fsl,magic-packet;
status = "okay";
@ -69,7 +69,7 @@
<&clks IMX7D_ENET2_TIME_ROOT_CLK>;
assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
assigned-clock-rates = <0>, <100000000>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&ethphy1>;
fsl,magic-packet;
status = "okay";

View file

@ -65,7 +65,7 @@ int zynq_cpun_start(u32 address, int cpu)
* 0x4: Jump by mov instruction
* 0x8: Jumping address
*/
memcpy((__force void *)zero, &zynq_secondary_trampoline,
memcpy_toio(zero, &zynq_secondary_trampoline,
trampoline_size);
writel(address, zero + trampoline_size);

View file

@ -216,7 +216,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
if (fsr & FSR_WRITE)
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
mask = VM_WRITE;
if (fsr & FSR_LNX_PF)
mask = VM_EXEC;
@ -287,7 +287,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (fsr & FSR_WRITE)
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
flags |= FAULT_FLAG_WRITE;
/*

View file

@ -6,6 +6,7 @@
* Fault status register encodings. We steal bit 31 for our own purposes.
*/
#define FSR_LNX_PF (1 << 31)
#define FSR_CM (1 << 13)
#define FSR_WRITE (1 << 11)
#define FSR_FS4 (1 << 10)
#define FSR_FS3_0 (15)

View file

@ -18,8 +18,9 @@
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((TASK_SIZE)/6*5)
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((STACK_TOP)/6*5)
#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
static int mmap_is_legacy(struct rlimit *rlim_stack)
{
@ -35,13 +36,22 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_guard_gap;
/* Account for stack randomization if necessary */
if (current->flags & PF_RANDOMIZE)
pad += (STACK_RND_MASK << PAGE_SHIFT);
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
gap += pad;
if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return PAGE_ALIGN(TASK_SIZE - gap - rnd);
return PAGE_ALIGN(STACK_TOP - gap - rnd);
}
/*

View file

@ -1175,6 +1175,22 @@ void __init adjust_lowmem_bounds(void)
*/
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
/*
* The first usable region must be PMD aligned. Mark its start
* as MEMBLOCK_NOMAP if it isn't
*/
for_each_memblock(memory, reg) {
if (!memblock_is_nomap(reg)) {
if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
phys_addr_t len;
len = round_up(reg->base, PMD_SIZE) - reg->base;
memblock_mark_nomap(reg->base, len);
}
break;
}
}
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;

View file

@ -62,6 +62,7 @@ void samsung_wdt_reset(void)
#ifdef CONFIG_OF
static const struct of_device_id s3c2410_wdt_match[] = {
{ .compatible = "samsung,s3c2410-wdt" },
{ .compatible = "samsung,s3c6410-wdt" },
{},
};

View file

@ -708,6 +708,7 @@
<&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
max-frequency = <150000000>;
status = "disabled";
};
@ -719,6 +720,7 @@
<&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
max-frequency = <150000000>;
status = "disabled";
};
@ -730,6 +732,7 @@
<&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
max-frequency = <150000000>;
status = "disabled";
};

View file

@ -74,7 +74,7 @@ __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
#undef __XCHG_CASE
#define __XCHG_GEN(sfx) \
static inline unsigned long __xchg##sfx(unsigned long x, \
static __always_inline unsigned long __xchg##sfx(unsigned long x, \
volatile void *ptr, \
int size) \
{ \
@ -116,7 +116,7 @@ __XCHG_GEN(_mb)
#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
#define __CMPXCHG_GEN(sfx) \
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
unsigned long old, \
unsigned long new, \
int size) \
@ -223,7 +223,7 @@ __CMPWAIT_CASE( , , 8);
#undef __CMPWAIT_CASE
#define __CMPWAIT_GEN(sfx) \
static inline void __cmpwait##sfx(volatile void *ptr, \
static __always_inline void __cmpwait##sfx(volatile void *ptr, \
unsigned long val, \
int size) \
{ \

View file

@ -62,14 +62,6 @@
#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
MIDR_ARCHITECTURE_MASK)
#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
({ \
u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
\
_model == (model) && rv >= (rv_min) && rv <= (rv_max); \
})
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_IMP_CAVIUM 0x43
@ -163,10 +155,19 @@ struct midr_range {
#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
u32 rv_max)
{
u32 _model = midr & MIDR_CPU_MODEL_MASK;
u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
return _model == model && rv >= rv_min && rv <= rv_max;
}
static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
{
return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
range->rv_min, range->rv_max);
return midr_is_cpu_model_range(midr, range->model,
range->rv_min, range->rv_max);
}
static inline bool

View file

@ -253,8 +253,10 @@ pte_ok:
* Only if the new pte is valid and kernel, otherwise TLB maintenance
* or update_mmu_cache() have the necessary barriers.
*/
if (pte_valid_not_user(pte))
if (pte_valid_not_user(pte)) {
dsb(ishst);
isb();
}
}
extern void __sync_icache_dcache(pte_t pteval);
@ -461,6 +463,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
WRITE_ONCE(*pmdp, pmd);
dsb(ishst);
isb();
}
static inline void pmd_clear(pmd_t *pmdp)
@ -517,6 +520,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
{
WRITE_ONCE(*pudp, pud);
dsb(ishst);
isb();
}
static inline void pud_clear(pud_t *pudp)

View file

@ -224,6 +224,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
__tlbi(vaae1is, addr);
dsb(ish);
isb();
}
#endif

View file

@ -850,7 +850,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
u32 midr = read_cpuid_id();
/* Cavium ThunderX pass 1.x and 2.x */
return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
MIDR_CPU_VAR_REV(0, 0),
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
}

View file

@ -65,7 +65,11 @@ unsigned long arch_mmap_rnd(void)
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
unsigned long pad = stack_guard_gap;
/* Account for stack randomization if necessary */
if (current->flags & PF_RANDOMIZE)
pad += (STACK_RND_MASK << PAGE_SHIFT);
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)

View file

@ -340,6 +340,15 @@ skip_pgd:
msr sctlr_el1, x18
isb
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively from the PoC are discarded, since they may have
* been dynamically patched at the PoU.
*/
ic iallu
dsb nsh
isb
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]
ret

View file

@ -914,10 +914,14 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
void
module_arch_cleanup (struct module *mod)
{
if (mod->arch.init_unw_table)
if (mod->arch.init_unw_table) {
unw_remove_unwind_table(mod->arch.init_unw_table);
if (mod->arch.core_unw_table)
mod->arch.init_unw_table = NULL;
}
if (mod->arch.core_unw_table) {
unw_remove_unwind_table(mod->arch.core_unw_table);
mod->arch.core_unw_table = NULL;
}
}
void *dereference_module_function_descriptor(struct module *mod, void *ptr)

View file

@ -22,7 +22,6 @@
#include <linux/types.h>
#include <asm/bootinfo-atari.h>
#include <asm/raw_io.h>
#include <asm/kmap.h>
extern u_long atari_mch_cookie;
@ -126,14 +125,6 @@ extern struct atari_hw_present atari_hw_present;
*/
#define atari_readb raw_inb
#define atari_writeb raw_outb
#define atari_inb_p raw_inb
#define atari_outb_p raw_outb
#include <linux/mm.h>
#include <asm/cacheflush.h>

View file

@ -29,7 +29,11 @@
#include <asm-generic/iomap.h>
#ifdef CONFIG_ATARI
#include <asm/atarihw.h>
#define atari_readb raw_inb
#define atari_writeb raw_outb
#define atari_inb_p raw_inb
#define atari_outb_p raw_outb
#endif

View file

@ -4,6 +4,7 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/bootinfo-mac.h>

View file

@ -688,6 +688,9 @@
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
/* Ingenic Config7 bits */
#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
/* Config7 Bits specific to MIPS Technologies. */
/* Performance counters implemented Per TC */
@ -2774,6 +2777,7 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)

View file

@ -1879,6 +1879,13 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_JZRISC;
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
__cpu_name[cpu] = "Ingenic JZRISC";
/*
* The XBurst core by default attempts to avoid branch target
* buffer lookups by detecting & special casing loops. This
* feature will cause BogoMIPS and lpj calculate in error.
* Set cp0 config7 bit 4 to disable this feature.
*/
set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
break;
default:
panic("Unknown Ingenic Processor ID!");

View file

@ -21,8 +21,9 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((TASK_SIZE)/6*5)
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((TASK_SIZE)/6*5)
#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
static int mmap_is_legacy(struct rlimit *rlim_stack)
{
@ -38,6 +39,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_guard_gap;
/* Account for stack randomization if necessary */
if (current->flags & PF_RANDOMIZE)
pad += (STACK_RND_MASK << PAGE_SHIFT);
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
gap += pad;
if (gap < MIN_GAP)
gap = MIN_GAP;

View file

@ -630,7 +630,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
return;
}
if (cpu_has_rixi && _PAGE_NO_EXEC) {
if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
if (fill_includes_sw_bits) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
} else {

View file

@ -59,8 +59,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
pagefault_enable();
if (!ret)
*oval = oldval;
*oval = oldval;
return ret;
}

View file

@ -811,6 +811,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
pe->freeze_count, eeh_max_freezes);
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp)
edev->mode &= ~EEH_DEV_NO_HANDLER;
/* Walk the various device drivers attached to this slot through
* a reset sequence, giving each an opportunity to do what it needs
* to accomplish the reset. Each child gets a report of the
@ -1004,7 +1008,8 @@ final:
*/
void eeh_handle_special_event(void)
{
struct eeh_pe *pe, *phb_pe;
struct eeh_pe *pe, *phb_pe, *tmp_pe;
struct eeh_dev *edev, *tmp_edev;
struct pci_bus *bus;
struct pci_controller *hose;
unsigned long flags;
@ -1075,6 +1080,10 @@ void eeh_handle_special_event(void)
(phb_pe->state & EEH_PE_RECOVERING))
continue;
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
edev->mode &= ~EEH_DEV_NO_HANDLER;
/* Notify all devices to be down */
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
eeh_set_channel_state(pe, pci_channel_io_perm_failure);

View file

@ -520,6 +520,10 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
RFI_TO_USER_OR_KERNEL
9:
/* Deliver the machine check to host kernel in V mode. */
BEGIN_FTR_SECTION
ld r10,ORIG_GPR3(r1)
mtspr SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
MACHINE_CHECK_HANDLER_WINDUP
b machine_check_pSeries

View file

@ -875,15 +875,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
return 0;
for_each_cpu(cpu, cpus) {
struct device *dev = get_cpu_device(cpu);
switch (state) {
case DOWN:
cpuret = cpu_down(cpu);
cpuret = device_offline(dev);
break;
case UP:
cpuret = cpu_up(cpu);
cpuret = device_online(dev);
break;
}
if (cpuret) {
if (cpuret < 0) {
pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
__func__,
((state == UP) ? "up" : "down"),
@ -972,6 +974,8 @@ int rtas_ibm_suspend_me(u64 handle)
data.token = rtas_token("ibm,suspend-me");
data.complete = &done;
lock_device_hotplug();
/* All present CPUs must be online */
cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
cpuret = rtas_online_cpus_mask(offline_mask);
@ -1003,6 +1007,7 @@ int rtas_ibm_suspend_me(u64 handle)
__func__);
out:
unlock_device_hotplug();
free_cpumask_var(offline_mask);
return atomic_read(&data.error);
}

View file

@ -399,6 +399,7 @@ void system_reset_exception(struct pt_regs *regs)
if (debugger(regs))
goto out;
kmsg_dump(KMSG_DUMP_OOPS);
/*
* A system reset is a request to dump, so we always send
* it through the crashdump code (if fadump or kdump are

View file

@ -57,9 +57,9 @@ static void export_imc_mode_and_cmd(struct device_node *node,
struct imc_pmu *pmu_ptr)
{
static u64 loc, *imc_mode_addr, *imc_cmd_addr;
int chip = 0, nid;
char mode[16], cmd[16];
u32 cb_offset;
struct imc_mem_info *ptr = pmu_ptr->mem_info;
imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
@ -73,20 +73,20 @@ static void export_imc_mode_and_cmd(struct device_node *node,
if (of_property_read_u32(node, "cb_offset", &cb_offset))
cb_offset = IMC_CNTL_BLK_OFFSET;
for_each_node(nid) {
loc = (u64)(pmu_ptr->mem_info[chip].vbase) + cb_offset;
while (ptr->vbase != NULL) {
loc = (u64)(ptr->vbase) + cb_offset;
imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
sprintf(mode, "imc_mode_%d", nid);
sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
imc_mode_addr))
goto err;
imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
sprintf(cmd, "imc_cmd_%d", nid);
sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
imc_cmd_addr))
goto err;
chip++;
ptr++;
}
return;

View file

@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
struct page *tce_mem = NULL;
__be64 *addr;
tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN,
shift - PAGE_SHIFT);
if (!tce_mem) {
pr_err("Failed to allocate a TCE memory, level shift=%d\n",
shift);
@ -161,6 +162,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
if (ptce)
*ptce = cpu_to_be64(0);
else
/* Skip the rest of the level */
i |= tbl->it_level_size - 1;
}
}
@ -260,7 +264,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
PAGE_SHIFT);
const unsigned long tce_table_size = 1UL << table_shift;
unsigned int tmplevels = levels;
if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
return -EINVAL;
@ -268,9 +271,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
if (!is_power_of_2(window_size))
return -EINVAL;
if (alloc_userspace_copy && (window_size > (1ULL << 32)))
tmplevels = 1;
/* Adjust direct table size from window_size and levels */
entries_shift = (entries_shift + levels - 1) / levels;
level_shift = entries_shift + 3;
@ -281,7 +281,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
/* Allocate TCE table */
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
tmplevels, tce_table_size, &offset, &total_allocated);
1, tce_table_size, &offset, &total_allocated);
/* addr==NULL means that the first level allocation failed */
if (!addr)
@ -292,18 +292,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
* we did not allocate as much as we wanted,
* release partially allocated table.
*/
if (tmplevels == levels && offset < tce_table_size)
if (levels == 1 && offset < tce_table_size)
goto free_tces_exit;
/* Allocate userspace view of the TCE table */
if (alloc_userspace_copy) {
offset = 0;
uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
tmplevels, tce_table_size, &offset,
1, tce_table_size, &offset,
&total_allocated_uas);
if (!uas)
goto free_tces_exit;
if (tmplevels == levels && (offset < tce_table_size ||
if (levels == 1 && (offset < tce_table_size ||
total_allocated_uas != total_allocated))
goto free_uas_exit;
}
@ -318,7 +318,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
window_size, tce_table_size, bus_offset, tbl->it_base,
tbl->it_userspace, tmplevels, levels);
tbl->it_userspace, 1, levels);
return 0;

View file

@ -243,7 +243,7 @@ extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
extern int pnv_npu2_init(struct pnv_phb *phb);
/* pci-ioda-tce.c */
#define POWERNV_IOMMU_DEFAULT_LEVELS 1
#define POWERNV_IOMMU_DEFAULT_LEVELS 2
#define POWERNV_IOMMU_MAX_LEVELS 5
extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,

View file

@ -12,6 +12,7 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/stat.h>
#include <linux/completion.h>
@ -209,7 +210,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
prop_data += vd;
}
cond_resched();
}
cond_resched();
} while (rtas_rc == 1);
of_node_put(dn);
@ -318,8 +323,12 @@ int pseries_devicetree_update(s32 scope)
add_dt_node(phandle, drc_index);
break;
}
cond_resched();
}
}
cond_resched();
} while (rc == 1);
kfree(rtas_buf);

View file

@ -325,6 +325,9 @@ static void pseries_lpar_idle(void)
* low power mode by ceding processor to hypervisor
*/
if (!prep_irq_for_idle())
return;
/* Indicate to hypervisor that we are idle. */
get_lppaca()->idle = 1;

View file

@ -2497,13 +2497,16 @@ static void dump_pacas(void)
static void dump_one_xive(int cpu)
{
unsigned int hwid = get_hard_smp_processor_id(cpu);
bool hv = cpu_has_feature(CPU_FTR_HVMODE);
opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
opal_xive_dump(XIVE_DUMP_VP, hwid);
opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
if (hv) {
opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
opal_xive_dump(XIVE_DUMP_VP, hwid);
opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
}
if (setjmp(bus_error_jmp) != 0) {
catch_memory_errors = 0;

View file

@ -585,6 +585,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
if (!nbytes)
return -EINVAL;
if (unlikely(!xts_ctx->fc))
return xts_fallback_encrypt(desc, dst, src, nbytes);
@ -599,6 +602,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
if (!nbytes)
return -EINVAL;
if (unlikely(!xts_ctx->fc))
return xts_fallback_decrypt(desc, dst, src, nbytes);

View file

@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *root_inode;
struct dentry *root_dentry;
struct dentry *root_dentry, *update_file;
int rc = 0;
struct hypfs_sb_info *sbi;
@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
rc = hypfs_diag_create_files(root_dentry);
if (rc)
return rc;
sbi->update_file = hypfs_create_update_file(root_dentry);
if (IS_ERR(sbi->update_file))
return PTR_ERR(sbi->update_file);
update_file = hypfs_create_update_file(root_dentry);
if (IS_ERR(update_file))
return PTR_ERR(update_file);
sbi->update_file = update_file;
hypfs_update_update(sb);
pr_info("Hypervisor filesystem mounted\n");
return 0;

View file

@ -58,6 +58,9 @@
#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
#define INTEL_FAM6_ICELAKE_NNPI 0x9D
#define INTEL_FAM6_TIGERLAKE_L 0x8C
#define INTEL_FAM6_TIGERLAKE 0x8D
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */

View file

@ -1450,54 +1450,72 @@ static void lapic_setup_esr(void)
oldvalue, value);
}
#define APIC_IR_REGS APIC_ISR_NR
#define APIC_IR_BITS (APIC_IR_REGS * 32)
#define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG)
union apic_ir {
unsigned long map[APIC_IR_MAPSIZE];
u32 regs[APIC_IR_REGS];
};
static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
{
int i, bit;
/* Read the IRRs */
for (i = 0; i < APIC_IR_REGS; i++)
irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
/* Read the ISRs */
for (i = 0; i < APIC_IR_REGS; i++)
isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
/*
* If the ISR map is not empty. ACK the APIC and run another round
* to verify whether a pending IRR has been unblocked and turned
* into a ISR.
*/
if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
/*
* There can be multiple ISR bits set when a high priority
* interrupt preempted a lower priority one. Issue an ACK
* per set bit.
*/
for_each_set_bit(bit, isr->map, APIC_IR_BITS)
ack_APIC_irq();
return true;
}
return !bitmap_empty(irr->map, APIC_IR_BITS);
}
/*
* After a crash, we no longer service the interrupts and a pending
* interrupt from previous kernel might still have ISR bit set.
*
* Most probably by now the CPU has serviced that pending interrupt and it
* might not have done the ack_APIC_irq() because it thought, interrupt
* came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
* the ISR bit and cpu thinks it has already serivced the interrupt. Hence
* a vector might get locked. It was noticed for timer irq (vector
* 0x31). Issue an extra EOI to clear ISR.
*
* If there are pending IRR bits they turn into ISR bits after a higher
* priority ISR bit has been acked.
*/
static void apic_pending_intr_clear(void)
{
long long max_loops = cpu_khz ? cpu_khz : 1000000;
unsigned long long tsc = 0, ntsc;
unsigned int queued;
unsigned long value;
int i, j, acked = 0;
union apic_ir irr, isr;
unsigned int i;
if (boot_cpu_has(X86_FEATURE_TSC))
tsc = rdtsc();
/*
* After a crash, we no longer service the interrupts and a pending
* interrupt from previous kernel might still have ISR bit set.
*
* Most probably by now CPU has serviced that pending interrupt and
* it might not have done the ack_APIC_irq() because it thought,
* interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
* does not clear the ISR bit and cpu thinks it has already serivced
* the interrupt. Hence a vector might get locked. It was noticed
* for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
*/
do {
queued = 0;
for (i = APIC_ISR_NR - 1; i >= 0; i--)
queued |= apic_read(APIC_IRR + i*0x10);
for (i = APIC_ISR_NR - 1; i >= 0; i--) {
value = apic_read(APIC_ISR + i*0x10);
for_each_set_bit(j, &value, 32) {
ack_APIC_irq();
acked++;
}
}
if (acked > 256) {
pr_err("LAPIC pending interrupts after %d EOI\n", acked);
break;
}
if (queued) {
if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
ntsc = rdtsc();
max_loops = (long long)cpu_khz << 10;
max_loops -= ntsc - tsc;
} else {
max_loops--;
}
}
} while (queued && max_loops > 0);
WARN_ON(max_loops <= 0);
/* 512 loops are way oversized and give the APIC a chance to obey. */
for (i = 0; i < 512; i++) {
if (!apic_check_and_ack(&irr, &isr))
return;
}
/* Dump the IRR/ISR content if that failed */
pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
}
/**
@ -1520,6 +1538,14 @@ static void setup_local_APIC(void)
return;
}
/*
* If this comes from kexec/kcrash the APIC might be enabled in
* SPIV. Soft disable it before doing further initialization.
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_SPIV_APIC_ENABLED;
apic_write(APIC_SPIV, value);
#ifdef CONFIG_X86_32
/* Pound the ESR really hard over the head with a big hammer - mbligh */
if (lapic_is_integrated() && apic->disable_esr) {
@ -1565,6 +1591,7 @@ static void setup_local_APIC(void)
value &= ~APIC_TPRI_MASK;
apic_write(APIC_TASKPRI, value);
/* Clear eventually stale ISR/IRR bits */
apic_pending_intr_clear();
/*

View file

@ -400,6 +400,17 @@ static int activate_reserved(struct irq_data *irqd)
if (!irqd_can_reserve(irqd))
apicd->can_reserve = false;
}
/*
* Check to ensure that the effective affinity mask is a subset
* the user supplied affinity mask, and warn the user if it is not
*/
if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
irq_data_get_affinity_mask(irqd))) {
pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
irqd->irq);
}
return ret;
}

View file

@ -181,6 +181,12 @@ asmlinkage __visible void smp_reboot_interrupt(void)
irq_exit();
}
static int register_stop_handler(void)
{
return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
NMI_FLAG_FIRST, "smp_stop");
}
static void native_stop_other_cpus(int wait)
{
unsigned long flags;
@ -214,39 +220,41 @@ static void native_stop_other_cpus(int wait)
apic->send_IPI_allbutself(REBOOT_VECTOR);
/*
* Don't wait longer than a second if the caller
* didn't ask us to wait.
* Don't wait longer than a second for IPI completion. The
* wait request is not checked here because that would
* prevent an NMI shutdown attempt in case that not all
* CPUs reach shutdown state.
*/
timeout = USEC_PER_SEC;
while (num_online_cpus() > 1 && (wait || timeout--))
while (num_online_cpus() > 1 && timeout--)
udelay(1);
}
/* if the REBOOT_VECTOR didn't work, try with the NMI */
if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
NMI_FLAG_FIRST, "smp_stop"))
/* Note: we ignore failures here */
/* Hope the REBOOT_IRQ is good enough */
goto finish;
/* sync above data before sending IRQ */
wmb();
pr_emerg("Shutting down cpus with NMI\n");
apic->send_IPI_allbutself(NMI_VECTOR);
if (num_online_cpus() > 1) {
/*
* Don't wait longer than a 10 ms if the caller
* didn't ask us to wait.
* If NMI IPI is enabled, try to register the stop handler
* and send the IPI. In any case try to wait for the other
* CPUs to stop.
*/
if (!smp_no_nmi_ipi && !register_stop_handler()) {
/* Sync above data before sending IRQ */
wmb();
pr_emerg("Shutting down cpus with NMI\n");
apic->send_IPI_allbutself(NMI_VECTOR);
}
/*
* Don't wait longer than 10 ms if the caller didn't
* reqeust it. If wait is true, the machine hangs here if
* one or more CPUs do not reach shutdown state.
*/
timeout = USEC_PER_MSEC * 10;
while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}
finish:
local_irq_save(flags);
disable_local_APIC();
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));

View file

@ -5368,6 +5368,8 @@ done_prefixes:
ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
if (rc == X86EMUL_PROPAGATE_FAULT)
ctxt->have_exception = true;
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}

View file

@ -581,8 +581,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
data, offset, len, access);
}
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{
return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
rsvd_bits(1, 2);
}
/*
* Load the pae pdptrs. Return true is they are all valid.
* Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
*/
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
{
@ -601,8 +607,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
}
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
if ((pdpte[i] & PT_PRESENT_MASK) &&
(pdpte[i] &
vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
(pdpte[i] & pdptr_rsvd_bits(vcpu))) {
ret = 0;
goto out;
}
@ -6244,8 +6249,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
emulation_type))
return EMULATE_DONE;
if (ctxt->have_exception && inject_emulated_exception(vcpu))
if (ctxt->have_exception) {
/*
* #UD should result in just EMULATION_FAILED, and trap-like
* exception should not be encountered during decode.
*/
WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP);
inject_emulated_exception(vcpu);
return EMULATE_DONE;
}
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu, emulation_type);

View file

@ -338,13 +338,15 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
addr += PUD_SIZE;
WARN_ON_ONCE(addr & ~PUD_MASK);
addr = round_up(addr + 1, PUD_SIZE);
continue;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
addr += PMD_SIZE;
WARN_ON_ONCE(addr & ~PMD_MASK);
addr = round_up(addr + 1, PMD_SIZE);
continue;
}
@ -643,6 +645,8 @@ void __init pti_init(void)
*/
void pti_finalize(void)
{
if (!boot_cpu_has(X86_FEATURE_PTI))
return;
/*
* We need to clone everything (again) that maps parts of the
* kernel image.

View file

@ -232,6 +232,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
if (!refcount_dec_and_test(&flush_rq->ref)) {
fq->rq_status = error;
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
return;
}
if (fq->rq_status != BLK_STS_OK)
error = fq->rq_status;
hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
if (!q->elevator) {
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);

View file

@ -844,7 +844,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
*/
if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved);
if (refcount_dec_and_test(&rq->ref))
if (is_flush_rq(rq, hctx))
rq->end_io(rq, 0);
else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
}

View file

@ -23,6 +23,7 @@ struct blk_flush_queue {
unsigned int flush_queue_delayed:1;
unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1;
blk_status_t rq_status;
unsigned long flush_pending_since;
struct list_head flush_queue[2];
struct list_head flush_data_in_flight;
@ -105,6 +106,12 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj);
}
static inline bool
is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
{
return hctx->fq->flush_rq == req;
}
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);

View file

@ -376,13 +376,6 @@ done:
* hardware queue, but we may return a request that is for a
* different hardware queue. This is because mq-deadline has shared
* state for all hardware queues, in terms of sorting, FIFOs, etc.
*
* For a zoned block device, __dd_dispatch_request() may return NULL
* if all the queued write requests are directed at zones that are already
* locked due to on-going write requests. In this case, make sure to mark
* the queue as needing a restart to ensure that the queue is run again
* and the pending writes dispatched once the target zones for the ongoing
* write requests are unlocked in dd_finish_request().
*/
static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
{
@ -391,9 +384,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
spin_lock(&dd->lock);
rq = __dd_dispatch_request(dd);
if (!rq && blk_queue_is_zoned(hctx->queue) &&
!list_empty(&dd->fifo_list[WRITE]))
blk_mq_sched_mark_restart_hctx(hctx);
spin_unlock(&dd->lock);
return rq;
@ -559,6 +549,13 @@ static void dd_prepare_request(struct request *rq, struct bio *bio)
* spinlock so that the zone is never unlocked while deadline_fifo_request()
* or deadline_next_request() are executing. This function is called for
* all requests, whether or not these requests complete successfully.
*
* For a zoned block device, __dd_dispatch_request() may have stopped
* dispatching requests if all the queued requests are write requests directed
* at zones that are already locked due to on-going write requests. To ensure
* write request dispatch progress in this case, mark the queue as needing a
* restart to ensure that the queue is run again after completion of the
* request and zones being unlocked.
*/
static void dd_finish_request(struct request *rq)
{
@ -570,6 +567,12 @@ static void dd_finish_request(struct request *rq)
spin_lock_irqsave(&dd->zone_lock, flags);
blk_req_zone_write_unlock(rq);
if (!list_empty(&dd->fifo_list[WRITE])) {
struct blk_mq_hw_ctx *hctx;
hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
blk_mq_sched_mark_restart_hctx(hctx);
}
spin_unlock_irqrestore(&dd->zone_lock, flags);
}
}

View file

@ -282,9 +282,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
if (acpi_duplicate_processor_id(pr->acpi_id)) {
dev_err(&device->dev,
"Failed to get unique processor _UID (0x%x)\n",
pr->acpi_id);
if (pr->acpi_id == 0xff)
dev_info_once(&device->dev,
"Entry not well-defined, consider updating BIOS\n");
else
dev_err(&device->dev,
"Failed to get unique processor _UID (0x%x)\n",
pr->acpi_id);
return -ENODEV;
}

View file

@ -369,8 +369,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
union acpi_object *psd = NULL;
struct acpi_psd_package *pdomain;
status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
ACPI_TYPE_PACKAGE);
status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
&buffer, ACPI_TYPE_PACKAGE);
if (status == AE_NOT_FOUND) /* _PSD is optional */
return 0;
if (ACPI_FAILURE(status))
return -ENODEV;

View file

@ -48,8 +48,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if ((*ppos > max_size) ||
(*ppos + count > max_size) ||
(*ppos + count < count) ||
(count > uncopied_bytes))
(count > uncopied_bytes)) {
kfree(buf);
return -EINVAL;
}
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
@ -69,6 +71,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
kfree(buf);
return count;
}

View file

@ -462,8 +462,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
* No IRQ known to the ACPI subsystem - maybe the BIOS /
* driver reported one, then use it. Exit in any case.
*/
if (!acpi_pci_irq_valid(dev, pin))
if (!acpi_pci_irq_valid(dev, pin)) {
kfree(entry);
return 0;
}
if (acpi_isa_register_gsi(dev))
dev_warn(&dev->dev, "PCI INT %c: no GSI\n",

View file

@ -81,6 +81,12 @@ enum board_ids {
board_ahci_sb700, /* for SB700 and SB800 */
board_ahci_vt8251,
/*
* board IDs for Intel chipsets that support more than 6 ports
* *and* end up needing the PCS quirk.
*/
board_ahci_pcs7,
/* aliases */
board_ahci_mcp_linux = board_ahci_mcp65,
board_ahci_mcp67 = board_ahci_mcp65,
@ -236,6 +242,12 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_vt8251_ops,
},
[board_ahci_pcs7] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
};
static const struct pci_device_id ahci_pci_tbl[] = {
@ -280,26 +292,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
@ -639,30 +651,6 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
ahci_save_initial_config(&pdev->dev, hpriv);
}
static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
int rc;
rc = ahci_reset_controller(host);
if (rc)
return rc;
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
struct ahci_host_priv *hpriv = host->private_data;
u16 tmp16;
/* configure PCS */
pci_read_config_word(pdev, 0x92, &tmp16);
if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
tmp16 |= hpriv->port_map;
pci_write_config_word(pdev, 0x92, tmp16);
}
}
return 0;
}
static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
@ -865,7 +853,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ahci_pci_reset_controller(host);
rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_pci_init_controller(host);
@ -900,7 +888,7 @@ static int ahci_pci_device_resume(struct device *dev)
ahci_mcp89_apple_enable(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
rc = ahci_pci_reset_controller(host);
rc = ahci_reset_controller(host);
if (rc)
return rc;
@ -1635,6 +1623,34 @@ update_policy:
ap->target_lpm_policy = policy;
}
static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
{
const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
u16 tmp16;
/*
* Only apply the 6-port PCS quirk for known legacy platforms.
*/
if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
return;
if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
return;
/*
* port_map is determined from PORTS_IMPL PCI register which is
* implemented as write or write-once register. If the register
* isn't programmed, ahci automatically generates it from number
* of ports, which is good enough for PCS programming. It is
* otherwise expected that platform firmware enables the ports
* before the OS boots.
*/
pci_read_config_word(pdev, PCS_6, &tmp16);
if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
tmp16 |= hpriv->port_map;
pci_write_config_word(pdev, PCS_6, tmp16);
}
}
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int board_id = ent->driver_data;
@ -1747,6 +1763,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
/*
* If platform firmware failed to enable ports, try to enable
* them here.
*/
ahci_intel_pcs_quirk(pdev, hpriv);
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
@ -1856,7 +1878,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
rc = ahci_pci_reset_controller(host);
rc = ahci_reset_controller(host);
if (rc)
return rc;

View file

@ -261,6 +261,8 @@ enum {
ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
ICH_MAP = 0x90, /* ICH MAP register */
PCS_6 = 0x92, /* 6 port PCS */
PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
/* em constants */
EM_MAX_SLOTS = 8,

View file

@ -44,7 +44,7 @@ config REGMAP_IRQ
config REGMAP_SOUNDWIRE
tristate
depends on SOUNDWIRE_BUS
depends on SOUNDWIRE
config REGMAP_SCCB
tristate

View file

@ -157,6 +157,7 @@ out2:
out1:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(soc_device_register);
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
@ -166,6 +167,7 @@ void soc_device_unregister(struct soc_device *soc_dev)
device_unregister(&soc_dev->dev);
early_soc_dev_attr = NULL;
}
EXPORT_SYMBOL_GPL(soc_device_unregister);
static int __init soc_bus_register(void)
{

View file

@ -1719,6 +1719,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
case LOOP_SET_BLOCK_SIZE:
case LOOP_SET_DIRECT_IO:
err = lo_ioctl(bdev, mode, cmd, arg);
break;
default:

View file

@ -353,8 +353,10 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
config = nbd->config;
if (!mutex_trylock(&cmd->lock))
if (!mutex_trylock(&cmd->lock)) {
nbd_config_put(nbd);
return BLK_EH_RESET_TIMER;
}
if (config->num_connections > 1) {
dev_err_ratelimited(nbd_to_dev(nbd),

View file

@ -2596,7 +2596,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (ret)
return ret;
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
}

View file

@ -67,7 +67,7 @@ static void add_early_randomness(struct hwrng *rng)
size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
bytes_read = rng_get_data(rng, rng_buffer, size, 1);
bytes_read = rng_get_data(rng, rng_buffer, size, 0);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(rng_buffer, bytes_read);

View file

@ -221,6 +221,9 @@ struct smi_info {
*/
bool irq_enable_broken;
/* Is the driver in maintenance mode? */
bool in_maintenance_mode;
/*
* Did we get an attention that we did not handle?
*/
@ -1013,11 +1016,20 @@ static int ipmi_thread(void *data)
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
&busy_until);
if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
; /* do nothing */
else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
schedule();
else if (smi_result == SI_SM_IDLE) {
} else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
/*
* In maintenance mode we run as fast as
* possible to allow firmware updates to
* complete as fast as possible, but normally
* don't bang on the scheduler.
*/
if (smi_info->in_maintenance_mode)
schedule();
else
usleep_range(100, 200);
} else if (smi_result == SI_SM_IDLE) {
if (atomic_read(&smi_info->need_watch)) {
schedule_timeout_interruptible(100);
} else {
@ -1025,8 +1037,9 @@ static int ipmi_thread(void *data)
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
} else
} else {
schedule_timeout_interruptible(1);
}
}
return 0;
}
@ -1201,6 +1214,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
if (!enable)
atomic_set(&smi_info->req_events, 0);
smi_info->in_maintenance_mode = enable;
}
static void shutdown_smi(void *send_info);

View file

@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
}
#endif
static inline bool should_stop_iteration(void)
{
if (need_resched())
cond_resched();
return fatal_signal_pending(current);
}
/*
* This funcion reads the *physical* memory. The f_pos points directly to the
* memory location.
@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
p += sz;
count -= sz;
read += sz;
if (should_stop_iteration())
break;
}
kfree(bounce);
@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
p += sz;
count -= sz;
written += sz;
if (should_stop_iteration())
break;
}
*ppos += written;
@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
read += sz;
low_count -= sz;
count -= sz;
if (should_stop_iteration()) {
count = 0;
break;
}
}
}
@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
buf += sz;
read += sz;
p += sz;
if (should_stop_iteration())
break;
}
free_page((unsigned long)kbuf);
}
@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
p += sz;
count -= sz;
written += sz;
if (should_stop_iteration())
break;
}
*ppos += written;
@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
buf += sz;
virtr += sz;
p += sz;
if (should_stop_iteration())
break;
}
free_page((unsigned long)kbuf);
}

View file

@ -187,12 +187,13 @@ static int tpm_class_shutdown(struct device *dev)
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
down_write(&chip->ops_sem);
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
down_write(&chip->ops_sem);
tpm2_shutdown(chip, TPM2_SU_CLEAR);
chip->ops = NULL;
up_write(&chip->ops_sem);
}
chip->ops = NULL;
up_write(&chip->ops_sem);
return 0;
}

View file

@ -39,7 +39,6 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
{
struct tpm_buf tpm_buf;
struct tpm_readpubek_out *out;
ssize_t rc;
int i;
char *str = buf;
struct tpm_chip *chip = to_tpm_chip(dev);
@ -47,19 +46,18 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
memset(&anti_replay, 0, sizeof(anti_replay));
rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
if (rc)
return rc;
if (tpm_try_get_ops(chip))
return 0;
if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK))
goto out_ops;
tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
if (tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
"attempting to read the PUBEK");
if (rc) {
tpm_buf_destroy(&tpm_buf);
return 0;
}
"attempting to read the PUBEK"))
goto out_buf;
out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
str +=
@ -90,9 +88,11 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
str += sprintf(str, "\n");
}
rc = str - buf;
out_buf:
tpm_buf_destroy(&tpm_buf);
return rc;
out_ops:
tpm_put_ops(chip);
return str - buf;
}
static DEVICE_ATTR_RO(pubek);
@ -106,12 +106,16 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
char *str = buf;
struct tpm_chip *chip = to_tpm_chip(dev);
rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
"attempting to determine the number of PCRS",
sizeof(cap.num_pcrs));
if (rc)
if (tpm_try_get_ops(chip))
return 0;
if (tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
"attempting to determine the number of PCRS",
sizeof(cap.num_pcrs))) {
tpm_put_ops(chip);
return 0;
}
num_pcrs = be32_to_cpu(cap.num_pcrs);
for (i = 0; i < num_pcrs; i++) {
rc = tpm_pcr_read_dev(chip, i, digest);
@ -122,6 +126,7 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
str += sprintf(str, "%02X ", digest[j]);
str += sprintf(str, "\n");
}
tpm_put_ops(chip);
return str - buf;
}
static DEVICE_ATTR_RO(pcrs);
@ -129,16 +134,21 @@ static DEVICE_ATTR_RO(pcrs);
static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
ssize_t rc = 0;
cap_t cap;
ssize_t rc;
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent enabled state",
sizeof(cap.perm_flags));
if (rc)
if (tpm_try_get_ops(chip))
return 0;
if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent enabled state",
sizeof(cap.perm_flags)))
goto out_ops;
rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
out_ops:
tpm_put_ops(chip);
return rc;
}
static DEVICE_ATTR_RO(enabled);
@ -146,16 +156,21 @@ static DEVICE_ATTR_RO(enabled);
static ssize_t active_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
ssize_t rc = 0;
cap_t cap;
ssize_t rc;
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent active state",
sizeof(cap.perm_flags));
if (rc)
if (tpm_try_get_ops(chip))
return 0;
if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent active state",
sizeof(cap.perm_flags)))
goto out_ops;
rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
out_ops:
tpm_put_ops(chip);
return rc;
}
static DEVICE_ATTR_RO(active);
@ -163,16 +178,21 @@ static DEVICE_ATTR_RO(active);
static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
ssize_t rc = 0;
cap_t cap;
ssize_t rc;
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
"attempting to determine the owner state",
sizeof(cap.owned));
if (rc)
if (tpm_try_get_ops(chip))
return 0;
if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
"attempting to determine the owner state",
sizeof(cap.owned)))
goto out_ops;
rc = sprintf(buf, "%d\n", cap.owned);
out_ops:
tpm_put_ops(chip);
return rc;
}
static DEVICE_ATTR_RO(owned);
@ -180,16 +200,21 @@ static DEVICE_ATTR_RO(owned);
static ssize_t temp_deactivated_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
ssize_t rc = 0;
cap_t cap;
ssize_t rc;
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
"attempting to determine the temporary state",
sizeof(cap.stclear_flags));
if (rc)
if (tpm_try_get_ops(chip))
return 0;
if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
"attempting to determine the temporary state",
sizeof(cap.stclear_flags)))
goto out_ops;
rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
out_ops:
tpm_put_ops(chip);
return rc;
}
static DEVICE_ATTR_RO(temp_deactivated);
@ -198,15 +223,18 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
cap_t cap;
ssize_t rc;
ssize_t rc = 0;
char *str = buf;
cap_t cap;
rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer",
sizeof(cap.manufacturer_id));
if (rc)
if (tpm_try_get_ops(chip))
return 0;
if (tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer",
sizeof(cap.manufacturer_id)))
goto out_ops;
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
@ -223,20 +251,22 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
cap.tpm_version_1_2.revMinor);
} else {
/* Otherwise just use TPM_STRUCT_VER */
rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version",
sizeof(cap.tpm_version));
if (rc)
return 0;
if (tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version",
sizeof(cap.tpm_version)))
goto out_ops;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
cap.tpm_version.Major,
cap.tpm_version.Minor,
cap.tpm_version.revMajor,
cap.tpm_version.revMinor);
}
return str - buf;
}
rc = str - buf;
out_ops:
tpm_put_ops(chip);
return rc;
}
static DEVICE_ATTR_RO(caps);
@ -244,10 +274,12 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct tpm_chip *chip = to_tpm_chip(dev);
if (chip == NULL)
if (tpm_try_get_ops(chip))
return 0;
chip->ops->cancel(chip);
tpm_put_ops(chip);
return count;
}
static DEVICE_ATTR_WO(cancel);

View file

@ -67,16 +67,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks)
struct clk_hw *hw;
for (i = 0; i < hw_clks->num; i++) {
const char *name;
hw = hw_clks->hws[i];
if (IS_ERR_OR_NULL(hw))
continue;
name = hw->init->name;
ret = devm_clk_hw_register(dev, hw);
if (ret) {
dev_err(dev, "Couldn't register clock %d - %s\n",
i, hw->init->name);
i, name);
return ret;
}
}

View file

@ -27,6 +27,10 @@
#define MOR_KEY_MASK (0xff << 16)
#define clk_main_parent_select(s) (((s) & \
(AT91_PMC_MOSCEN | \
AT91_PMC_OSCBYPASS)) ? 1 : 0)
struct clk_main_osc {
struct clk_hw hw;
struct regmap *regmap;
@ -119,7 +123,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
regmap_read(regmap, AT91_PMC_SR, &status);
return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
}
static const struct clk_ops main_osc_ops = {
@ -530,7 +534,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
return status & AT91_PMC_MOSCEN ? 1 : 0;
return clk_main_parent_select(status);
}
static const struct clk_ops sam9x5_main_ops = {
@ -572,7 +576,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
clkmain->hw.init = &init;
clkmain->regmap = regmap;
regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
clkmain->parent = clk_main_parent_select(status);
hw = &clkmain->hw;
ret = clk_hw_register(NULL, &clkmain->hw);

View file

@ -610,7 +610,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
.guts_compat = "fsl,qoriq-device-config-1.0",
.init_periph = p5020_init_periph,
.cmux_groups = {
&p2041_cmux_grp1, &p2041_cmux_grp2
&p5020_cmux_grp1, &p5020_cmux_grp2
},
.cmux_to_group = {
0, 1, -1

View file

@ -647,7 +647,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_names = gcc_parent_names_10,
.num_parents = 5,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_floor_ops,
},
};
@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
.name = "gcc_sdcc4_apps_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_floor_ops,
},
};

View file

@ -341,7 +341,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
return;
pd->name = np->name;
pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
GENPD_FLAG_ACTIVE_WAKEUP;
pd->attach_dev = cpg_mstp_attach_dev;
pd->detach_dev = cpg_mstp_detach_dev;
pm_genpd_init(pd, &pm_domain_always_on_gov, false);

View file

@ -514,7 +514,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
genpd = &pd->genpd;
genpd->name = np->name;
genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
GENPD_FLAG_ACTIVE_WAKEUP;
genpd->attach_dev = cpg_mssr_attach_dev;
genpd->detach_dev = cpg_mssr_detach_dev;
pm_genpd_init(genpd, &pm_domain_always_on_gov, false);

View file

@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
{
struct clk_dmn *clk = to_dmnclk(hw);
u32 cfg = clkc_readl(clk->regofs);
const char *name = clk_hw_get_name(hw);
/* parent of io domain can only be pll3 */
if (strcmp(hw->init->name, "io") == 0)
if (strcmp(name, "io") == 0)
return 4;
WARN_ON((cfg & (BIT(3) - 1)) > 4);
@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
{
struct clk_dmn *clk = to_dmnclk(hw);
u32 cfg = clkc_readl(clk->regofs);
const char *name = clk_hw_get_name(hw);
/* parent of io domain can only be pll3 */
if (strcmp(hw->init->name, "io") == 0)
if (strcmp(name, "io") == 0)
return -EINVAL;
cfg &= ~(BIT(3) - 1);
@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
{
unsigned long fin;
unsigned ratio, wait, hold;
unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
const char *name = clk_hw_get_name(hw);
unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
fin = *parent_rate;
ratio = fin / rate;
@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_dmn *clk = to_dmnclk(hw);
unsigned long fin;
unsigned ratio, wait, hold, reg;
unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
const char *name = clk_hw_get_name(hw);
unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
fin = parent_rate;
ratio = fin / rate;

View file

@ -71,16 +71,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw)
struct clk_hw *hw;
for (i = 0; i < clkhw->num; i++) {
const char *name;
hw = clkhw->hws[i];
if (!hw)
continue;
name = hw->init->name;
ret = devm_clk_hw_register(dev, hw);
if (ret) {
dev_err(dev, "Couldn't register clock %d - %s\n",
i, hw->init->name);
i, name);
return ret;
}
}

View file

@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
k2 + refin * nint * CLK_PLL_1M;
}
kfree(cfg);
return rate;
}
@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll,
if (!ret)
udelay(pll->udelay);
kfree(cfg);
return ret;
}

View file

@ -499,6 +499,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
[CLK_MMC1] = &mmc1_clk.common.hw,
[CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
[CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
[CLK_MMC2] = &mmc2_clk.common.hw,
[CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
[CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
[CLK_CE] = &ce_clk.common.hw,
[CLK_SPI0] = &spi0_clk.common.hw,
[CLK_USB_PHY0] = &usb_phy0_clk.common.hw,

View file

@ -567,6 +567,7 @@ static int __init top_clocks_init(struct device_node *np)
{
void __iomem *reg_base;
int i, ret;
const char *name;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@ -576,11 +577,10 @@ static int __init top_clocks_init(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
name = zx296718_pll_clk[i].hw.init->name;
ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
if (ret) {
pr_warn("top clk %s init error!\n",
zx296718_pll_clk[i].hw.init->name);
}
if (ret)
pr_warn("top clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
@ -588,11 +588,10 @@ static int __init top_clocks_init(struct device_node *np)
top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
&top_ffactor_clk[i].factor.hw;
name = top_ffactor_clk[i].factor.hw.init->name;
ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
if (ret) {
pr_warn("top clk %s init error!\n",
top_ffactor_clk[i].factor.hw.init->name);
}
if (ret)
pr_warn("top clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
@ -601,11 +600,10 @@ static int __init top_clocks_init(struct device_node *np)
&top_mux_clk[i].mux.hw;
top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
name = top_mux_clk[i].mux.hw.init->name;
ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
if (ret) {
pr_warn("top clk %s init error!\n",
top_mux_clk[i].mux.hw.init->name);
}
if (ret)
pr_warn("top clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
@ -614,11 +612,10 @@ static int __init top_clocks_init(struct device_node *np)
&top_gate_clk[i].gate.hw;
top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
name = top_gate_clk[i].gate.hw.init->name;
ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
if (ret) {
pr_warn("top clk %s init error!\n",
top_gate_clk[i].gate.hw.init->name);
}
if (ret)
pr_warn("top clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
@ -627,11 +624,10 @@ static int __init top_clocks_init(struct device_node *np)
&top_div_clk[i].div.hw;
top_div_clk[i].div.reg += (uintptr_t)reg_base;
name = top_div_clk[i].div.hw.init->name;
ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
if (ret) {
pr_warn("top clk %s init error!\n",
top_div_clk[i].div.hw.init->name);
}
if (ret)
pr_warn("top clk %s init error!\n", name);
}
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
@ -757,6 +753,7 @@ static int __init lsp0_clocks_init(struct device_node *np)
{
void __iomem *reg_base;
int i, ret;
const char *name;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@ -770,11 +767,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
&lsp0_mux_clk[i].mux.hw;
lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
name = lsp0_mux_clk[i].mux.hw.init->name;
ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
if (ret) {
pr_warn("lsp0 clk %s init error!\n",
lsp0_mux_clk[i].mux.hw.init->name);
}
if (ret)
pr_warn("lsp0 clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
@ -783,11 +779,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
&lsp0_gate_clk[i].gate.hw;
lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
name = lsp0_gate_clk[i].gate.hw.init->name;
ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
if (ret) {
pr_warn("lsp0 clk %s init error!\n",
lsp0_gate_clk[i].gate.hw.init->name);
}
if (ret)
pr_warn("lsp0 clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
@ -796,11 +791,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
&lsp0_div_clk[i].div.hw;
lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
name = lsp0_div_clk[i].div.hw.init->name;
ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
if (ret) {
pr_warn("lsp0 clk %s init error!\n",
lsp0_div_clk[i].div.hw.init->name);
}
if (ret)
pr_warn("lsp0 clk %s init error!\n", name);
}
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
@ -865,6 +859,7 @@ static int __init lsp1_clocks_init(struct device_node *np)
{
void __iomem *reg_base;
int i, ret;
const char *name;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@ -878,11 +873,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
&lsp0_mux_clk[i].mux.hw;
lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
name = lsp1_mux_clk[i].mux.hw.init->name;
ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
if (ret) {
pr_warn("lsp1 clk %s init error!\n",
lsp1_mux_clk[i].mux.hw.init->name);
}
if (ret)
pr_warn("lsp1 clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
@ -891,11 +885,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
&lsp1_gate_clk[i].gate.hw;
lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
name = lsp1_gate_clk[i].gate.hw.init->name;
ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
if (ret) {
pr_warn("lsp1 clk %s init error!\n",
lsp1_gate_clk[i].gate.hw.init->name);
}
if (ret)
pr_warn("lsp1 clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
@ -904,11 +897,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
&lsp1_div_clk[i].div.hw;
lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
name = lsp1_div_clk[i].div.hw.init->name;
ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
if (ret) {
pr_warn("lsp1 clk %s init error!\n",
lsp1_div_clk[i].div.hw.init->name);
}
if (ret)
pr_warn("lsp1 clk %s init error!\n", name);
}
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
@ -982,6 +974,7 @@ static int __init audio_clocks_init(struct device_node *np)
{
void __iomem *reg_base;
int i, ret;
const char *name;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@ -995,11 +988,10 @@ static int __init audio_clocks_init(struct device_node *np)
&audio_mux_clk[i].mux.hw;
audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
name = audio_mux_clk[i].mux.hw.init->name;
ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
if (ret) {
pr_warn("audio clk %s init error!\n",
audio_mux_clk[i].mux.hw.init->name);
}
if (ret)
pr_warn("audio clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
@ -1008,11 +1000,10 @@ static int __init audio_clocks_init(struct device_node *np)
&audio_adiv_clk[i].hw;
audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
name = audio_adiv_clk[i].hw.init->name;
ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
if (ret) {
pr_warn("audio clk %s init error!\n",
audio_adiv_clk[i].hw.init->name);
}
if (ret)
pr_warn("audio clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
@ -1021,11 +1012,10 @@ static int __init audio_clocks_init(struct device_node *np)
&audio_div_clk[i].div.hw;
audio_div_clk[i].div.reg += (uintptr_t)reg_base;
name = audio_div_clk[i].div.hw.init->name;
ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
if (ret) {
pr_warn("audio clk %s init error!\n",
audio_div_clk[i].div.hw.init->name);
}
if (ret)
pr_warn("audio clk %s init error!\n", name);
}
for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
@ -1034,11 +1024,10 @@ static int __init audio_clocks_init(struct device_node *np)
&audio_gate_clk[i].gate.hw;
audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
name = audio_gate_clk[i].gate.hw.init->name;
ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
if (ret) {
pr_warn("audio clk %s init error!\n",
audio_gate_clk[i].gate.hw.init->name);
}
if (ret)
pr_warn("audio clk %s init error!\n", name);
}
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,

View file

@ -215,17 +215,18 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
dma_addr_t psec_sgl, struct sec_dev_info *info)
{
struct sec_hw_sgl *sgl_current, *sgl_next;
dma_addr_t sgl_next_dma;
if (!hw_sgl)
return;
sgl_current = hw_sgl;
while (sgl_current->next) {
while (sgl_current) {
sgl_next = sgl_current->next;
dma_pool_free(info->hw_sgl_pool, sgl_current,
sgl_current->next_sgl);
sgl_next_dma = sgl_current->next_sgl;
dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
sgl_current = sgl_next;
psec_sgl = sgl_next_dma;
}
dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
}
static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,

View file

@ -194,11 +194,10 @@ static void exynos_bus_exit(struct device *dev)
if (ret < 0)
dev_warn(dev, "failed to disable the devfreq-event devices\n");
if (bus->regulator)
regulator_disable(bus->regulator);
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
if (bus->regulator)
regulator_disable(bus->regulator);
}
/*
@ -386,6 +385,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
struct exynos_bus *bus;
int ret, max_state;
unsigned long min_freq, max_freq;
bool passive = false;
if (!np) {
dev_err(dev, "failed to find devicetree node\n");
@ -399,27 +399,27 @@ static int exynos_bus_probe(struct platform_device *pdev)
bus->dev = &pdev->dev;
platform_set_drvdata(pdev, bus);
/* Parse the device-tree to get the resource information */
ret = exynos_bus_parse_of(np, bus);
if (ret < 0)
return ret;
profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
if (!profile) {
ret = -ENOMEM;
goto err;
}
if (!profile)
return -ENOMEM;
node = of_parse_phandle(dev->of_node, "devfreq", 0);
if (node) {
of_node_put(node);
goto passive;
passive = true;
} else {
ret = exynos_bus_parent_parse_of(np, bus);
if (ret < 0)
return ret;
}
/* Parse the device-tree to get the resource information */
ret = exynos_bus_parse_of(np, bus);
if (ret < 0)
goto err;
goto err_reg;
if (passive)
goto passive;
/* Initialize the struct profile and governor data for parent device */
profile->polling_ms = 50;
@ -510,6 +510,9 @@ out:
err:
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
err_reg:
if (!passive)
regulator_disable(bus->regulator);
return ret;
}

View file

@ -152,7 +152,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb,
static int devfreq_passive_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
struct device *dev = devfreq->dev.parent;
struct devfreq_passive_data *p_data
= (struct devfreq_passive_data *)devfreq->data;
struct devfreq *parent = (struct devfreq *)p_data->parent;
@ -168,12 +167,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
p_data->this = devfreq;
nb->notifier_call = devfreq_passive_notifier_call;
ret = devm_devfreq_register_notifier(dev, parent, nb,
ret = devfreq_register_notifier(parent, nb,
DEVFREQ_TRANSITION_NOTIFIER);
break;
case DEVFREQ_GOV_STOP:
devm_devfreq_unregister_notifier(dev, parent, nb,
DEVFREQ_TRANSITION_NOTIFIER);
WARN_ON(devfreq_unregister_notifier(parent, nb,
DEVFREQ_TRANSITION_NOTIFIER));
break;
default:
break;

View file

@ -141,17 +141,14 @@ static void timeline_fence_release(struct dma_fence *fence)
{
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
if (!list_empty(&pt->link)) {
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
if (!list_empty(&pt->link)) {
list_del(&pt->link);
rb_erase(&pt->node, &parent->pt_tree);
}
spin_unlock_irqrestore(fence->lock, flags);
list_del(&pt->link);
rb_erase(&pt->node, &parent->pt_tree);
}
spin_unlock_irqrestore(fence->lock, flags);
sync_timeline_put(parent);
dma_fence_free(fence);
@ -274,7 +271,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
p = &parent->rb_left;
} else {
if (dma_fence_get_rcu(&other->base)) {
dma_fence_put(&pt->base);
sync_timeline_put(obj);
kfree(pt);
pt = other;
goto unlock;
}

View file

@ -898,8 +898,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
if (rc) {
dev_err(&pdev->dev, "Unable to set DMA mask\n");
return rc;
}
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)

View file

@ -125,9 +125,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
chain_node) {
pr_debug("\tcookie: %d slot: %d busy: %d "
"this_desc: %#x next_desc: %#x ack: %d\n",
"this_desc: %#x next_desc: %#llx ack: %d\n",
iter->async_tx.cookie, iter->idx, busy,
iter->async_tx.phys, iop_desc_get_next_desc(iter),
iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
async_tx_test_ack(&iter->async_tx));
prefetch(_iter);
prefetch(&_iter->async_tx);
@ -315,9 +315,9 @@ retry:
int i;
dev_dbg(iop_chan->device->common.dev,
"allocated slot: %d "
"(desc %p phys: %#x) slots_per_op %d\n",
"(desc %p phys: %#llx) slots_per_op %d\n",
iter->idx, iter->hw_desc,
iter->async_tx.phys, slots_per_op);
(u64)iter->async_tx.phys, slots_per_op);
/* pre-ack all but the last descriptor */
if (num_slots != slots_per_op)
@ -525,7 +525,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
return NULL;
BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
__func__, len);
spin_lock_bh(&iop_chan->lock);
@ -558,7 +558,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
"%s src_cnt: %d len: %u flags: %lx\n",
"%s src_cnt: %d len: %zu flags: %lx\n",
__func__, src_cnt, len, flags);
spin_lock_bh(&iop_chan->lock);
@ -591,7 +591,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
if (unlikely(!len))
return NULL;
dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
__func__, src_cnt, len);
spin_lock_bh(&iop_chan->lock);
@ -629,7 +629,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
"%s src_cnt: %d len: %u flags: %lx\n",
"%s src_cnt: %d len: %zu flags: %lx\n",
__func__, src_cnt, len, flags);
if (dmaf_p_disabled_continue(flags))
@ -692,7 +692,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
return NULL;
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
__func__, src_cnt, len);
spin_lock_bh(&iop_chan->lock);

View file

@ -2273,9 +2273,6 @@ static int edma_probe(struct platform_device *pdev)
ecc->default_queue = info->default_queue;
for (i = 0; i < ecc->num_slots; i++)
edma_write_slot(ecc, i, &dummy_paramset);
if (info->rsv) {
/* Set the reserved slots in inuse list */
rsv_slots = info->rsv->rsv_slots;
@ -2288,6 +2285,12 @@ static int edma_probe(struct platform_device *pdev)
}
}
for (i = 0; i < ecc->num_slots; i++) {
/* Reset only unused - not reserved - paRAM slots */
if (!test_bit(i, ecc->slot_inuse))
edma_write_slot(ecc, i, &dummy_paramset);
}
/* Clear the xbar mapped channels in unused list */
xbar_chans = info->xbar_chans;
if (xbar_chans) {

View file

@ -1956,6 +1956,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
int irq = irq_desc_get_irq(desc);
unsigned long bits;
dberr = (irq == edac->db_irq) ? 1 : 0;
sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
@ -1965,7 +1966,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
bits = irq_status;
for_each_set_bit(bit, &bits, 32) {
irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
if (irq)
generic_handle_irq(irq);

View file

@ -2501,13 +2501,6 @@ static void decode_umc_error(int node_id, struct mce *m)
goto log_error;
}
if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
err.err_code = ERR_NORM_ADDR;
goto log_error;
}
error_address_to_page_and_offset(sys_addr, &err);
if (!(m->status & MCI_STATUS_SYNDV)) {
err.err_code = ERR_SYND;
goto log_error;
@ -2524,6 +2517,13 @@ static void decode_umc_error(int node_id, struct mce *m)
err.csrow = m->synd & 0x7;
if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
err.err_code = ERR_NORM_ADDR;
goto log_error;
}
error_address_to_page_and_offset(sys_addr, &err);
log_error:
__log_ecc_error(mci, &err, ecc_type);
}
@ -3101,12 +3101,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
static inline void
f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
u8 i, ecc_en = 1, cpk_en = 1;
u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
for (i = 0; i < NUM_UMCS; i++) {
if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
}
}
@ -3114,8 +3117,15 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
if (ecc_en) {
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
if (cpk_en)
if (!cpk_en)
return;
if (dev_x4)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
else if (dev_x16)
mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
else
mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
}
}

View file

@ -1246,9 +1246,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
if (p > e->location)
*(p - 1) = '\0';
/* Report the error via the trace interface */
grain_bits = fls_long(e->grain) + 1;
/* Sanity-check driver-supplied grain value. */
if (WARN_ON_ONCE(!e->grain))
e->grain = 1;
grain_bits = fls_long(e->grain - 1);
/* Report the error via the trace interface */
if (IS_ENABLED(CONFIG_RAS))
trace_mc_event(type, e->msg, e->label, e->error_count,
mci->mc_idx, e->top_layer, e->mid_layer,

View file

@ -268,11 +268,14 @@ static u64 get_sideband_reg_base_addr(void)
}
}
#define DNV_MCHBAR_SIZE 0x8000
#define DNV_SB_PORT_SIZE 0x10000
static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
struct pci_dev *pdev;
char *base;
u64 addr;
unsigned long size;
if (op == 4) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
@ -287,15 +290,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
addr = get_mem_ctrl_hub_base_addr();
if (!addr)
return -ENODEV;
size = DNV_MCHBAR_SIZE;
} else {
/* MMIO via sideband register base address */
addr = get_sideband_reg_base_addr();
if (!addr)
return -ENODEV;
addr += (port << 16);
size = DNV_SB_PORT_SIZE;
}
base = ioremap((resource_size_t)addr, 0x10000);
base = ioremap((resource_size_t)addr, size);
if (!base)
return -ENODEV;

View file

@ -271,6 +271,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct scmi_shared_mem __iomem *mem = cinfo->payload;
/*
* Ideally channel must be free by now unless OS timeout last
* request and platform continued to process the same, wait
* until it releases the shared memory, otherwise we may endup
* overwriting its response with new message payload or vice-versa
*/
spin_until_cond(ioread32(&mem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
/* Mark channel busy + clear error */
iowrite32(0x0, &mem->channel_status);
iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,

View file

@ -402,6 +402,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk(
"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
/* Fatal errors call __ghes_panic() before AER handler prints this */
if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
(gdata->error_severity & CPER_SEV_FATAL)) {
struct aer_capability_regs *aer;
aer = (struct aer_capability_regs *)pcie->aer_info;
printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
pfx, aer->uncor_status, aer->uncor_mask);
printk("%saer_uncor_severity: 0x%08x\n",
pfx, aer->uncor_severity);
printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
aer->header_log.dw0, aer->header_log.dw1,
aer->header_log.dw2, aer->header_log.dw3);
}
}
static void cper_print_tstamp(const char *pfx,

View file

@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/types.h>
@ -449,6 +450,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
phys_addr_t mem_to_map_phys;
phys_addr_t dest_phys;
phys_addr_t ptr_phys;
dma_addr_t ptr_dma;
size_t mem_to_map_sz;
size_t dest_sz;
size_t src_sz;
@ -466,9 +468,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
ALIGN(dest_sz, SZ_64);
ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
/* Fill source vmid detail */
src = ptr;
@ -498,7 +501,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
ptr_phys, src_sz, dest_phys, dest_sz);
dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d.\n", ret);

View file

@ -1813,7 +1813,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
if (orig != data)
si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
data &= ~PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
@ -1862,14 +1862,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
data &= ~LS2_EXIT_TIME_MASK;
if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
data |= LS2_EXIT_TIME(5);
if (orig != data)
si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
data &= ~LS2_EXIT_TIME_MASK;
if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
data |= LS2_EXIT_TIME(5);
if (orig != data)
si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);

View file

@ -1462,6 +1462,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = amdgpu_dm_backlight_get_brightness,
.update_status = amdgpu_dm_backlight_update_status,
};

View file

@ -1585,6 +1585,14 @@ void dc_set_power_state(
dc_resource_state_construct(dc, dc->current_state);
dc->hwss.init_hw(dc);
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
if (dc->hwss.init_sys_ctx != NULL &&
dc->vm_pa_config.valid) {
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
}
#endif
break;
default:

View file

@ -229,12 +229,10 @@ bool resource_construct(
DC_ERR("DC: failed to create audio!\n");
return false;
}
if (!aud->funcs->endpoint_valid(aud)) {
aud->funcs->destroy(&aud);
break;
}
pool->audios[i] = aud;
pool->audio_count++;
}
@ -1703,24 +1701,25 @@ static struct audio *find_first_free_audio(
const struct resource_pool *pool,
enum engine_id id)
{
int i;
for (i = 0; i < pool->audio_count; i++) {
int i, available_audio_count;
available_audio_count = pool->audio_count;
for (i = 0; i < available_audio_count; i++) {
if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
/*we have enough audio endpoint, find the matching inst*/
if (id != i)
continue;
return pool->audios[i];
}
}
/* use engine id to find free audio */
if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
/* use engine id to find free audio */
if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
return pool->audios[id];
}
/*not found the matching one, first come first serve*/
for (i = 0; i < pool->audio_count; i++) {
for (i = 0; i < available_audio_count; i++) {
if (res_ctx->is_audio_acquired[i] == false) {
return pool->audios[i];
}

View file

@ -611,6 +611,8 @@ void dce_aud_az_configure(
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
value);
DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n",
audio->inst, value, audio_info->display_name);
/*
*write the port ID:
@ -922,7 +924,6 @@ static const struct audio_funcs funcs = {
.az_configure = dce_aud_az_configure,
.destroy = dce_aud_destroy,
};
void dce_aud_destroy(struct audio **audio)
{
struct dce_audio *aud = DCE_AUD(*audio);
@ -953,7 +954,6 @@ struct audio *dce_audio_create(
audio->regs = reg;
audio->shifts = shifts;
audio->masks = masks;
return &audio->base;
}

View file

@ -292,9 +292,10 @@ bool cm_helper_translate_curve_to_hw_format(
seg_distr[7] = 4;
seg_distr[8] = 4;
seg_distr[9] = 4;
seg_distr[10] = 1;
region_start = -10;
region_end = 0;
region_end = 1;
}
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)

View file

@ -4052,6 +4052,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
data->frame_time_x2 = frame_time_in_us * 2 / 100;
if (data->frame_time_x2 < 280) {
pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
data->frame_time_x2 = 280;
}
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);

Some files were not shown because too many files have changed in this diff Show more