This is the 4.19.249 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmK22iMACgkQONu9yGCS
 aT5HJQ/6ApRt37oalJ6HFGpWaGChmbs9ttGCNxXMyUSLgSzfVqXEZBT4S5Nyjhz0
 D6rxpFMHrQUWfoyEG7CEo53dBTeG6g3/NKah4godguxUqEmbKAy9rGYKLL9VTdo/
 nH5mBXJJaMKlGX105R94Aq2BCKVeycNpcqWWTZrZepqCL1mFqGh0VhgU8wCeTi5f
 wmRuMh58WiWgdBOHTMYseUB8YsLEeDC1qZsQ/aD4tg3FaTK6KVSuervz++M4WzeK
 QG2JnFLJ3Sl/lPDMNhHEYK7PmHhYwBDonT36QP6Lr7yuOSd37fBrEufWI+9T1ULA
 lHtsLMPBiIGumOKRqUIKAH2etqeGaWrd4I311XkWI42vEMqM8ZBlVBFnRbC8VoPF
 irrzmYJ1LS0Fp3+0cdZBKmBa2mztgh+aWpsVfQk4jvwxafit5LIntENOHbbVeFSm
 LIlts+uB6nY3sPr8GOeKFbFyxDEeMR06GS1emzDKkFRi83dOLYcVbnrn6k8lY5j8
 Utd/DONlhLfybvxw4bJi1ovc+kqUe66h9w7sxVQv0z5F5xiU6ur8VTJjTUr6NgXK
 MpkcvoaF8WXnoeHdFl7s+c1Q3O5q9HwTIpxa7HcT44euHj9ngq4QbXECF/xNJeok
 /iBtOjnpSjtobqet8QmZtOYdIqtQbTGpS2TjhqQSLzbLg7Q+sJA=
 =/rv5
 -----END PGP SIGNATURE-----

Merge 4.19.249 into android-4.19-stable

Changes in 4.19.249
	9p: missing chunk of "fs/9p: Don't update file type when updating file attributes"
	drivers/char/random.c: constify poolinfo_table
	drivers/char/random.c: remove unused stuct poolinfo::poolbits
	drivers/char/random.c: make primary_crng static
	random: only read from /dev/random after its pool has received 128 bits
	random: move rand_initialize() earlier
	random: document get_random_int() family
	latent_entropy: avoid build error when plugin cflags are not set
	random: fix soft lockup when trying to read from an uninitialized blocking pool
	random: Support freezable kthreads in add_hwgenerator_randomness()
	fdt: add support for rng-seed
	random: Use wait_event_freezable() in add_hwgenerator_randomness()
	char/random: Add a newline at the end of the file
	Revert "hwrng: core - Freeze khwrng thread during suspend"
	crypto: blake2s - generic C library implementation and selftest
	lib/crypto: blake2s: move hmac construction into wireguard
	lib/crypto: sha1: re-roll loops to reduce code size
	random: Don't wake crng_init_wait when crng_init == 1
	random: Add a urandom_read_nowait() for random APIs that don't warn
	random: add GRND_INSECURE to return best-effort non-cryptographic bytes
	random: ignore GRND_RANDOM in getentropy(2)
	random: make /dev/random be almost like /dev/urandom
	char/random: silence a lockdep splat with printk()
	random: fix crash on multiple early calls to add_bootloader_randomness()
	random: remove the blocking pool
	random: delete code to pull data into pools
	random: remove kernel.random.read_wakeup_threshold
	random: remove unnecessary unlikely()
	random: convert to ENTROPY_BITS for better code readability
	random: Add and use pr_fmt()
	random: fix typo in add_timer_randomness()
	random: remove some dead code of poolinfo
	random: split primary/secondary crng init paths
	random: avoid warnings for !CONFIG_NUMA builds
	x86: Remove arch_has_random, arch_has_random_seed
	powerpc: Remove arch_has_random, arch_has_random_seed
	s390: Remove arch_has_random, arch_has_random_seed
	linux/random.h: Remove arch_has_random, arch_has_random_seed
	linux/random.h: Use false with bool
	linux/random.h: Mark CONFIG_ARCH_RANDOM functions __must_check
	powerpc: Use bool in archrandom.h
	random: add arch_get_random_*long_early()
	random: avoid arch_get_random_seed_long() when collecting IRQ randomness
	random: remove dead code left over from blocking pool
	MAINTAINERS: co-maintain random.c
	crypto: blake2s - include <linux/bug.h> instead of <asm/bug.h>
	crypto: blake2s - adjust include guard naming
	random: document add_hwgenerator_randomness() with other input functions
	random: remove unused irq_flags argument from add_interrupt_randomness()
	random: use BLAKE2s instead of SHA1 in extraction
	random: do not sign extend bytes for rotation when mixing
	random: do not re-init if crng_reseed completes before primary init
	random: mix bootloader randomness into pool
	random: harmonize "crng init done" messages
	random: use IS_ENABLED(CONFIG_NUMA) instead of ifdefs
	random: initialize ChaCha20 constants with correct endianness
	random: early initialization of ChaCha constants
	random: avoid superfluous call to RDRAND in CRNG extraction
	random: don't reset crng_init_cnt on urandom_read()
	random: fix typo in comments
	random: cleanup poolinfo abstraction
	random: cleanup integer types
	random: remove incomplete last_data logic
	random: remove unused extract_entropy() reserved argument
	random: rather than entropy_store abstraction, use global
	random: remove unused OUTPUT_POOL constants
	random: de-duplicate INPUT_POOL constants
	random: prepend remaining pool constants with POOL_
	random: cleanup fractional entropy shift constants
	random: access input_pool_data directly rather than through pointer
	random: simplify arithmetic function flow in account()
	random: continually use hwgenerator randomness
	random: access primary_pool directly rather than through pointer
	random: only call crng_finalize_init() for primary_crng
	random: use computational hash for entropy extraction
	random: simplify entropy debiting
	random: use linear min-entropy accumulation crediting
	random: always wake up entropy writers after extraction
	random: make credit_entropy_bits() always safe
	random: remove use_input_pool parameter from crng_reseed()
	random: remove batched entropy locking
	random: fix locking in crng_fast_load()
	random: use RDSEED instead of RDRAND in entropy extraction
	random: inline leaves of rand_initialize()
	random: ensure early RDSEED goes through mixer on init
	random: do not xor RDRAND when writing into /dev/random
	random: absorb fast pool into input pool after fast load
	random: use hash function for crng_slow_load()
	random: remove outdated INT_MAX >> 6 check in urandom_read()
	random: zero buffer after reading entropy from userspace
	random: tie batched entropy generation to base_crng generation
	random: remove ifdef'd out interrupt bench
	random: remove unused tracepoints
	random: add proper SPDX header
	random: deobfuscate irq u32/u64 contributions
	random: introduce drain_entropy() helper to declutter crng_reseed()
	random: remove useless header comment
	random: remove whitespace and reorder includes
	random: group initialization wait functions
	random: group entropy extraction functions
	random: group entropy collection functions
	random: group userspace read/write functions
	random: group sysctl functions
	random: rewrite header introductory comment
	random: defer fast pool mixing to worker
	random: do not take pool spinlock at boot
	random: unify early init crng load accounting
	random: check for crng_init == 0 in add_device_randomness()
	random: pull add_hwgenerator_randomness() declaration into random.h
	random: clear fast pool, crng, and batches in cpuhp bring up
	random: round-robin registers as ulong, not u32
	random: only wake up writers after zap if threshold was passed
	random: cleanup UUID handling
	random: unify cycles_t and jiffies usage and types
	random: do crng pre-init loading in worker rather than irq
	random: give sysctl_random_min_urandom_seed a more sensible value
	random: don't let 644 read-only sysctls be written to
	random: replace custom notifier chain with standard one
	random: use SipHash as interrupt entropy accumulator
	random: make consistent usage of crng_ready()
	random: reseed more often immediately after booting
	random: check for signal and try earlier when generating entropy
	random: skip fast_init if hwrng provides large chunk of entropy
	random: treat bootloader trust toggle the same way as cpu trust toggle
	random: re-add removed comment about get_random_{u32,u64} reseeding
	random: mix build-time latent entropy into pool at init
	random: do not split fast init input in add_hwgenerator_randomness()
	random: do not allow user to keep crng key around on stack
	random: check for signal_pending() outside of need_resched() check
	random: check for signals every PAGE_SIZE chunk of /dev/[u]random
	random: make random_get_entropy() return an unsigned long
	random: document crng_fast_key_erasure() destination possibility
	random: fix sysctl documentation nits
	init: call time_init() before rand_initialize()
	ia64: define get_cycles macro for arch-override
	s390: define get_cycles macro for arch-override
	parisc: define get_cycles macro for arch-override
	alpha: define get_cycles macro for arch-override
	powerpc: define get_cycles macro for arch-override
	timekeeping: Add raw clock fallback for random_get_entropy()
	m68k: use fallback for random_get_entropy() instead of zero
	mips: use fallback for random_get_entropy() instead of just c0 random
	arm: use fallback for random_get_entropy() instead of zero
	nios2: use fallback for random_get_entropy() instead of zero
	x86/tsc: Use fallback for random_get_entropy() instead of zero
	um: use fallback for random_get_entropy() instead of zero
	sparc: use fallback for random_get_entropy() instead of zero
	xtensa: use fallback for random_get_entropy() instead of zero
	random: insist on random_get_entropy() existing in order to simplify
	random: do not use batches when !crng_ready()
	random: do not pretend to handle premature next security model
	random: order timer entropy functions below interrupt functions
	random: do not use input pool from hard IRQs
	random: help compiler out with fast_mix() by using simpler arguments
	siphash: use one source of truth for siphash permutations
	random: use symbolic constants for crng_init states
	random: avoid initializing twice in credit race
	random: remove ratelimiting for in-kernel unseeded randomness
	random: use proper jiffies comparison macro
	random: handle latent entropy and command line from random_init()
	random: credit architectural init the exact amount
	random: use static branch for crng_ready()
	random: remove extern from functions in header
	random: use proper return types on get_random_{int,long}_wait()
	random: move initialization functions out of hot pages
	random: move randomize_page() into mm where it belongs
	random: convert to using fops->write_iter()
	random: wire up fops->splice_{read,write}_iter()
	random: check for signals after page of pool writes
	Revert "random: use static branch for crng_ready()"
	crypto: drbg - add FIPS 140-2 CTRNG for noise source
	crypto: drbg - always seeded with SP800-90B compliant noise source
	crypto: drbg - prepare for more fine-grained tracking of seeding state
	crypto: drbg - track whether DRBG was seeded with !rng_is_initialized()
	crypto: drbg - move dynamic ->reseed_threshold adjustments to __drbg_seed()
	crypto: drbg - always try to free Jitter RNG instance
	crypto: drbg - make reseeding from get_random_bytes() synchronous
	random: avoid checking crng_ready() twice in random_init()
	random: mark bootloader randomness code as __init
	random: account for arch randomness in bits
	powerpc/kasan: Silence KASAN warnings in __get_wchan()
	ASoC: cs42l52: Fix TLV scales for mixer controls
	ASoC: cs53l30: Correct number of volume levels on SX controls
	ASoC: cs42l52: Correct TLV for Bypass Volume
	ASoC: cs42l56: Correct typo in minimum level for SX volume controls
	ata: libata-core: fix NULL pointer deref in ata_host_alloc_pinfo()
	ASoC: wm8962: Fix suspend while playing music
	ASoC: es8328: Fix event generation for deemphasis control
	ASoC: wm_adsp: Fix event generation for wm_adsp_fw_put()
	scsi: vmw_pvscsi: Expand vcpuHint to 16 bits
	scsi: lpfc: Fix port stuck in bypassed state after LIP in PT2PT topology
	scsi: ipr: Fix missing/incorrect resource cleanup in error case
	scsi: pmcraid: Fix missing resource cleanup in error case
	virtio-mmio: fix missing put_device() when vm_cmdline_parent registration failed
	nfc: nfcmrvl: Fix memory leak in nfcmrvl_play_deferred
	ipv6: Fix signed integer overflow in l2tp_ip6_sendmsg
	net: ethernet: mtk_eth_soc: fix misuse of mem alloc interface netdev[napi]_alloc_frag
	random: credit cpu and bootloader seeds by default
	pNFS: Don't keep retrying if the server replied NFS4ERR_LAYOUTUNAVAILABLE
	i40e: Fix adding ADQ filter to TC0
	i40e: Fix call trace in setup_tx_descriptors
	tty: goldfish: Fix free_irq() on remove
	misc: atmel-ssc: Fix IRQ check in ssc_probe
	mlxsw: spectrum_cnt: Reorder counter pools
	net: bgmac: Fix an erroneous kfree() in bgmac_remove()
	arm64: ftrace: fix branch range checks
	certs/blacklist_hashes.c: fix const confusion in certs blacklist
	faddr2line: Fix overlapping text section failures, the sequel
	irqchip/gic/realview: Fix refcount leak in realview_gic_of_init
	irqchip/gic-v3: Fix refcount leak in gic_populate_ppi_partitions
	comedi: vmk80xx: fix expression for tx buffer size
	USB: serial: option: add support for Cinterion MV31 with new baseline
	USB: serial: io_ti: add Agilent E5805A support
	usb: dwc2: Fix memory leak in dwc2_hcd_init
	usb: gadget: lpc32xx_udc: Fix refcount leak in lpc32xx_udc_probe
	serial: 8250: Store to lsr_save_flags after lsr read
	ext4: fix bug_on ext4_mb_use_inode_pa
	ext4: make variable "count" signed
	ext4: add reserved GDT blocks check
	virtio-pci: Remove wrong address verification in vp_del_vqs()
	net: openvswitch: fix misuse of the cached connection on tuple changes
	net: openvswitch: fix leak of nested actions
	RISC-V: fix barrier() use in <vdso/processor.h>
	powerpc/mm: Switch obsolete dssall to .long
	s390/mm: use non-quiescing sske for KVM switch to keyed guest
	usb: gadget: u_ether: fix regression in setting fixed MAC address
	xprtrdma: fix incorrect header size calculations
	tcp: add some entropy in __inet_hash_connect()
	tcp: use different parts of the port_offset for index and offset
	tcp: add small random increments to the source port
	tcp: dynamically allocate the perturb table used by source ports
	tcp: increase source port perturb table to 2^16
	tcp: drop the hash_32() part from the index calculation
	Revert "hwmon: Make chip parameter for with_info API mandatory"
	Linux 4.19.249

Merge resolution notes:
  - Dropped the changes that added an LTS-specific backport of the
    blake2s library, since this branch already has a newer version of
    the blake2s library.

  - Added CHACHA20_KEY_SIZE and CHACHA20_BLOCK_SIZE constants to
    chacha.h, to minimize changes from the 4.19 LTS version of random.c

  - Retain a fix to the rng-seed support in drivers/of/fdt.c that this
    branch and 4.19.250 have, but 4.19.249 doesn't have.

Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: If9d9e3168f0976f61ae1ab9b36c063558a7f6ebf
This commit is contained in:
Greg Kroah-Hartman 2022-07-07 21:13:12 +02:00
commit 25e813ddc6
102 changed files with 2011 additions and 2772 deletions

View file

@ -3725,6 +3725,12 @@
fully seed the kernel's CRNG. Default is controlled
by CONFIG_RANDOM_TRUST_CPU.
random.trust_bootloader={on,off}
[KNL] Enable or disable trusting the use of a
seed passed by the bootloader (if available) to
fully seed the kernel's CRNG. Default is controlled
by CONFIG_RANDOM_TRUST_BOOTLOADER.
ras=option[,option,...] [KNL] RAS-specific options
cec_disable [X86]

View file

@ -822,9 +822,40 @@ The kernel command line parameter printk.devkmsg= overrides this and is
a one-time setting until next reboot: once set, it cannot be changed by
this sysctl interface anymore.
==============================================================
pty
===
randomize_va_space:
See Documentation/filesystems/devpts.rst.
random
======
This is a directory, with the following entries:
* ``boot_id``: a UUID generated the first time this is retrieved, and
unvarying after that;
* ``uuid``: a UUID generated every time this is retrieved (this can
thus be used to generate UUIDs at will);
* ``entropy_avail``: the pool's entropy count, in bits;
* ``poolsize``: the entropy pool size, in bits;
* ``urandom_min_reseed_secs``: obsolete (used to determine the minimum
number of seconds between urandom pool reseeding). This file is
writable for compatibility purposes, but writing to it has no effect
on any RNG behavior;
* ``write_wakeup_threshold``: when the entropy count drops below this
(as a number of bits), processes waiting to write to ``/dev/random``
are woken up. This file is writable for compatibility purposes, but
writing to it has no effect on any RNG behavior.
randomize_va_space
==================
This option can be used to select the type of process address
space randomization that is used in the system, for architectures

View file

@ -12259,6 +12259,7 @@ F: arch/mips/configs/generic/board-ranchu.config
RANDOM NUMBER DRIVER
M: "Theodore Ts'o" <tytso@mit.edu>
M: Jason A. Donenfeld <Jason@zx2c4.com>
S: Maintained
F: drivers/char/random.c

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 248
SUBLEVEL = 249
EXTRAVERSION =
NAME = "People's Front"

View file

@ -28,5 +28,6 @@ static inline cycles_t get_cycles (void)
__asm__ __volatile__ ("rpcc %0" : "=r"(ret));
return ret;
}
#define get_cycles get_cycles
#endif

View file

@ -14,5 +14,6 @@
typedef unsigned long cycles_t;
#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
#endif

View file

@ -72,7 +72,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long pc = rec->ip;
u32 old, new;
long offset = (long)pc - (long)addr;
long offset = (long)addr - (long)pc;
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
@ -151,7 +151,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip;
bool validate = true;
u32 old = 0, new;
long offset = (long)pc - (long)addr;
long offset = (long)addr - (long)pc;
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS

View file

@ -39,6 +39,7 @@ get_cycles (void)
ret = ia64_getreg(_IA64_REG_AR_ITC);
return ret;
}
#define get_cycles get_cycles
extern void ia64_cpu_local_tick (void);
extern unsigned long long ia64_native_sched_clock (void);

View file

@ -35,7 +35,7 @@ static inline unsigned long random_get_entropy(void)
{
if (mach_random_get_entropy)
return mach_random_get_entropy();
return 0;
return random_get_entropy_fallback();
}
#define random_get_entropy random_get_entropy

View file

@ -76,25 +76,24 @@ static inline cycles_t get_cycles(void)
else
return 0; /* no usable counter */
}
#define get_cycles get_cycles
/*
* Like get_cycles - but where c0_count is not available we desperately
* use c0_random in an attempt to get at least a little bit of entropy.
*
* R6000 and R6000A neither have a count register nor a random register.
* That leaves no entropy source in the CPU itself.
*/
static inline unsigned long random_get_entropy(void)
{
unsigned int prid = read_c0_prid();
unsigned int imp = prid & PRID_IMP_MASK;
unsigned int c0_random;
if (can_use_mips_counter(prid))
if (can_use_mips_counter(read_c0_prid()))
return read_c0_count();
else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
return read_c0_random();
if (cpu_has_3kex)
c0_random = (read_c0_random() >> 8) & 0x3f;
else
return 0; /* no usable register */
c0_random = read_c0_random() & 0x3f;
return (random_get_entropy_fallback() << 6) | (0x3f - c0_random);
}
#define random_get_entropy random_get_entropy

View file

@ -20,5 +20,8 @@
typedef unsigned long cycles_t;
extern cycles_t get_cycles(void);
#define get_cycles get_cycles
#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
#endif

View file

@ -12,9 +12,10 @@
typedef unsigned long cycles_t;
static inline cycles_t get_cycles (void)
static inline cycles_t get_cycles(void)
{
return mfctl(16);
}
#define get_cycles get_cycles
#endif

View file

@ -6,27 +6,28 @@
#include <asm/machdep.h>
static inline int arch_get_random_long(unsigned long *v)
static inline bool arch_get_random_long(unsigned long *v)
{
return 0;
return false;
}
static inline int arch_get_random_int(unsigned int *v)
static inline bool arch_get_random_int(unsigned int *v)
{
return 0;
return false;
}
static inline int arch_get_random_seed_long(unsigned long *v)
static inline bool arch_get_random_seed_long(unsigned long *v)
{
if (ppc_md.get_random_seed)
return ppc_md.get_random_seed(v);
return 0;
return false;
}
static inline int arch_get_random_seed_int(unsigned int *v)
static inline bool arch_get_random_seed_int(unsigned int *v)
{
unsigned long val;
int rc;
bool rc;
rc = arch_get_random_seed_long(&val);
if (rc)
@ -34,16 +35,6 @@ static inline int arch_get_random_seed_int(unsigned int *v)
return rc;
}
static inline int arch_has_random(void)
{
return 0;
}
static inline int arch_has_random_seed(void)
{
return !!ppc_md.get_random_seed;
}
#endif /* CONFIG_ARCH_RANDOM */
#ifdef CONFIG_PPC_POWERNV

View file

@ -207,6 +207,7 @@
#define PPC_INST_ICBT 0x7c00002c
#define PPC_INST_ICSWX 0x7c00032d
#define PPC_INST_ICSWEPX 0x7c00076d
#define PPC_INST_DSSALL 0x7e00066c
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
@ -424,6 +425,7 @@
__PPC_RA(a) | __PPC_RB(b))
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
__PPC_RA(a) | __PPC_RB(b))
#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL)
#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LQARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))

View file

@ -50,6 +50,7 @@ static inline cycles_t get_cycles(void)
return ret;
#endif
}
#define get_cycles get_cycles
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TIMEX_H */

View file

@ -133,7 +133,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
mtspr SPRN_HID0,r4
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
CURRENT_THREAD_INFO(r9, r1)

View file

@ -108,7 +108,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
/* Stop DST streams */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
@ -305,7 +305,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
isync
/* Stop DST streams */
DSSALL
PPC_DSSALL
sync
/* Get the current enable bit of the L3CR into r4 */
@ -414,7 +414,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
_GLOBAL(__flush_disable_L1)
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync

View file

@ -2017,12 +2017,12 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
do {
sp = *(unsigned long *)sp;
sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
p->state == TASK_RUNNING)
return 0;
if (count > 0) {
ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
if (!in_sched_functions(ip))
return ip;
}

View file

@ -181,7 +181,7 @@ _GLOBAL(swsusp_arch_resume)
#ifdef CONFIG_ALTIVEC
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
sync

View file

@ -143,7 +143,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
_GLOBAL(swsusp_arch_resume)
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync

View file

@ -83,7 +83,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* context
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
asm volatile (PPC_DSSALL);
if (new_on_cpu)
radix_kvm_prefetch_workaround(next);

View file

@ -53,7 +53,7 @@ flush_disable_75x:
/* Stop DST streams */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
@ -201,7 +201,7 @@ flush_disable_745x:
isync
/* Stop prefetch streams */
DSSALL
PPC_DSSALL
sync
/* Disable L2 prefetching */

View file

@ -30,6 +30,8 @@
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
struct task_struct;
struct pt_regs;

View file

@ -21,18 +21,6 @@ extern atomic64_t s390_arch_random_counter;
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
static inline bool arch_has_random(void)
{
return false;
}
static inline bool arch_has_random_seed(void)
{
if (static_branch_likely(&s390_arch_random_available))
return true;
return false;
}
static inline bool arch_get_random_long(unsigned long *v)
{
return false;

View file

@ -177,6 +177,7 @@ static inline cycles_t get_cycles(void)
{
return (cycles_t) get_tod_clock() >> 2;
}
#define get_cycles get_cycles
int get_phys_clock(unsigned long *clock);
void init_cpu_timer(void);

View file

@ -716,7 +716,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
ptev = pte_val(*ptep);
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}

View file

@ -9,8 +9,6 @@
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
/* XXX Maybe do something better at some point... -DaveM */
typedef unsigned long cycles_t;
#define get_cycles() (0)
#include <asm-generic/timex.h>
#endif

View file

@ -2,13 +2,8 @@
#ifndef __UM_TIMEX_H
#define __UM_TIMEX_H
typedef unsigned long cycles_t;
static inline cycles_t get_cycles (void)
{
return 0;
}
#define CLOCK_TICK_RATE (HZ)
#include <asm-generic/timex.h>
#endif

View file

@ -86,10 +86,6 @@ static inline bool rdseed_int(unsigned int *v)
return ok;
}
/* Conditional execution based on CPU type */
#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
/*
* These are the generic interfaces; they must not be declared if the
* stubs in <linux/random.h> are to be invoked,
@ -99,22 +95,22 @@ static inline bool rdseed_int(unsigned int *v)
static inline bool arch_get_random_long(unsigned long *v)
{
return arch_has_random() ? rdrand_long(v) : false;
return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_long(v) : false;
}
static inline bool arch_get_random_int(unsigned int *v)
{
return arch_has_random() ? rdrand_int(v) : false;
return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_int(v) : false;
}
static inline bool arch_get_random_seed_long(unsigned long *v)
{
return arch_has_random_seed() ? rdseed_long(v) : false;
return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_long(v) : false;
}
static inline bool arch_get_random_seed_int(unsigned int *v)
{
return arch_has_random_seed() ? rdseed_int(v) : false;
return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_int(v) : false;
}
extern void x86_init_rdrand(struct cpuinfo_x86 *c);

View file

@ -5,6 +5,15 @@
#include <asm/processor.h>
#include <asm/tsc.h>
static inline unsigned long random_get_entropy(void)
{
if (!IS_ENABLED(CONFIG_X86_TSC) &&
!cpu_feature_enabled(X86_FEATURE_TSC))
return random_get_entropy_fallback();
return rdtsc();
}
#define random_get_entropy random_get_entropy
/* Assume we use the PIT time source for the clock tick */
#define CLOCK_TICK_RATE PIT_TICK_RATE

View file

@ -22,13 +22,12 @@ extern void disable_TSC(void);
static inline cycles_t get_cycles(void)
{
#ifndef CONFIG_X86_TSC
if (!boot_cpu_has(X86_FEATURE_TSC))
if (!IS_ENABLED(CONFIG_X86_TSC) &&
!cpu_feature_enabled(X86_FEATURE_TSC))
return 0;
#endif
return rdtsc();
}
#define get_cycles get_cycles
extern struct system_counterval_t convert_art_to_tsc(u64 art);
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);

View file

@ -30,10 +30,6 @@
extern unsigned long ccount_freq;
typedef unsigned long long cycles_t;
#define get_cycles() (0)
void local_timer_setup(unsigned cpu);
/*
@ -69,4 +65,6 @@ static inline void set_linux_timer (unsigned long ccompare)
WSR_CCOMPARE(LINUX_TIMER, ccompare);
}
#include <asm-generic/timex.h>
#endif /* _XTENSA_TIMEX_H */

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "blacklist.h"
const char __initdata *const blacklist_hashes[] = {
const char __initconst *const blacklist_hashes[] = {
#include CONFIG_SYSTEM_BLACKLIST_HASH_LIST
, NULL
};

View file

@ -219,6 +219,57 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
}
}
/*
* FIPS 140-2 continuous self test for the noise source
* The test is performed on the noise source input data. Thus, the function
* implicitly knows the size of the buffer to be equal to the security
* strength.
*
* Note, this function disregards the nonce trailing the entropy data during
* initial seeding.
*
* drbg->drbg_mutex must have been taken.
*
* @drbg DRBG handle
* @entropy buffer of seed data to be checked
*
* return:
* 0 on success
* -EAGAIN on when the CTRNG is not yet primed
* < 0 on error
*/
static int drbg_fips_continuous_test(struct drbg_state *drbg,
const unsigned char *entropy)
{
unsigned short entropylen = drbg_sec_strength(drbg->core->flags);
int ret = 0;
if (!IS_ENABLED(CONFIG_CRYPTO_FIPS))
return 0;
/* skip test if we test the overall system */
if (list_empty(&drbg->test_data.list))
return 0;
/* only perform test in FIPS mode */
if (!fips_enabled)
return 0;
if (!drbg->fips_primed) {
/* Priming of FIPS test */
memcpy(drbg->prev, entropy, entropylen);
drbg->fips_primed = true;
/* priming: another round is needed */
return -EAGAIN;
}
ret = memcmp(drbg->prev, entropy, entropylen);
if (!ret)
panic("DRBG continuous self test failed\n");
memcpy(drbg->prev, entropy, entropylen);
/* the test shall pass when the two values are not equal */
return 0;
}
/*
* Convert an integer into a byte representation of this integer.
* The byte representation is big-endian
@ -984,55 +1035,79 @@ static const struct drbg_state_ops drbg_hash_ops = {
******************************************************************/
static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
int reseed)
int reseed, enum drbg_seed_state new_seed_state)
{
int ret = drbg->d_ops->update(drbg, seed, reseed);
if (ret)
return ret;
drbg->seeded = true;
drbg->seeded = new_seed_state;
/* 10.1.1.2 / 10.1.1.3 step 5 */
drbg->reseed_ctr = 1;
switch (drbg->seeded) {
case DRBG_SEED_STATE_UNSEEDED:
/* Impossible, but handle it to silence compiler warnings. */
case DRBG_SEED_STATE_PARTIAL:
/*
* Require frequent reseeds until the seed source is
* fully initialized.
*/
drbg->reseed_threshold = 50;
break;
case DRBG_SEED_STATE_FULL:
/*
* Seed source has become fully initialized, frequent
* reseeds no longer required.
*/
drbg->reseed_threshold = drbg_max_requests(drbg);
break;
}
return ret;
}
static void drbg_async_seed(struct work_struct *work)
static inline int drbg_get_random_bytes(struct drbg_state *drbg,
unsigned char *entropy,
unsigned int entropylen)
{
int ret;
do {
get_random_bytes(entropy, entropylen);
ret = drbg_fips_continuous_test(drbg, entropy);
if (ret && ret != -EAGAIN)
return ret;
} while (ret);
return 0;
}
static int drbg_seed_from_random(struct drbg_state *drbg)
{
struct drbg_string data;
LIST_HEAD(seedlist);
struct drbg_state *drbg = container_of(work, struct drbg_state,
seed_work);
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
unsigned char entropy[32];
int ret;
BUG_ON(!entropylen);
BUG_ON(entropylen > sizeof(entropy));
get_random_bytes(entropy, entropylen);
drbg_string_fill(&data, entropy, entropylen);
list_add_tail(&data.list, &seedlist);
mutex_lock(&drbg->drbg_mutex);
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
/* If nonblocking pool is initialized, deactivate Jitter RNG */
crypto_free_rng(drbg->jent);
drbg->jent = NULL;
/* Set seeded to false so that if __drbg_seed fails the
* next generate call will trigger a reseed.
*/
drbg->seeded = false;
__drbg_seed(drbg, &seedlist, true);
if (drbg->seeded)
drbg->reseed_threshold = drbg_max_requests(drbg);
mutex_unlock(&drbg->drbg_mutex);
ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
out:
memzero_explicit(entropy, entropylen);
return ret;
}
/*
@ -1054,6 +1129,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
struct drbg_string data1;
LIST_HEAD(seedlist);
enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL;
/* 9.1 / 9.2 / 9.3.1 step 3 */
if (pers && pers->len > (drbg_max_addtl(drbg))) {
@ -1081,7 +1157,12 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
BUG_ON((entropylen * 2) > sizeof(entropy));
/* Get seed from in-kernel /dev/urandom */
get_random_bytes(entropy, entropylen);
if (!rng_is_initialized())
new_seed_state = DRBG_SEED_STATE_PARTIAL;
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
if (!drbg->jent) {
drbg_string_fill(&data1, entropy, entropylen);
@ -1094,7 +1175,23 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
entropylen);
if (ret) {
pr_devel("DRBG: jent failed with %d\n", ret);
return ret;
/*
* Do not treat the transient failure of the
* Jitter RNG as an error that needs to be
* reported. The combined number of the
* maximum reseed threshold times the maximum
* number of Jitter RNG transient errors is
* less than the reseed threshold required by
* SP800-90A allowing us to treat the
* transient errors as such.
*
* However, we mandate that at least the first
* seeding operation must succeed with the
* Jitter RNG.
*/
if (!reseed || ret != -EAGAIN)
goto out;
}
drbg_string_fill(&data1, entropy, entropylen * 2);
@ -1119,8 +1216,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
memset(drbg->C, 0, drbg_statelen(drbg));
}
ret = __drbg_seed(drbg, &seedlist, reseed);
ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state);
out:
memzero_explicit(entropy, entropylen * 2);
return ret;
@ -1142,6 +1240,11 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
drbg->reseed_ctr = 0;
drbg->d_ops = NULL;
drbg->core = NULL;
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
kzfree(drbg->prev);
drbg->prev = NULL;
drbg->fips_primed = false;
}
}
/*
@ -1211,6 +1314,14 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
}
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags),
GFP_KERNEL);
if (!drbg->prev)
goto fini;
drbg->fips_primed = false;
}
return 0;
fini:
@ -1283,19 +1394,25 @@ static int drbg_generate(struct drbg_state *drbg,
* here. The spec is a bit convoluted here, we make it simpler.
*/
if (drbg->reseed_threshold < drbg->reseed_ctr)
drbg->seeded = false;
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
if (drbg->pr || !drbg->seeded) {
if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
pr_devel("DRBG: reseeding before generation (prediction "
"resistance: %s, state %s)\n",
drbg->pr ? "true" : "false",
drbg->seeded ? "seeded" : "unseeded");
(drbg->seeded == DRBG_SEED_STATE_FULL ?
"seeded" : "unseeded"));
/* 9.3.1 steps 7.1 through 7.3 */
len = drbg_seed(drbg, addtl, true);
if (len)
goto err;
/* 9.3.1 step 7.4 */
addtl = NULL;
} else if (rng_is_initialized() &&
drbg->seeded == DRBG_SEED_STATE_PARTIAL) {
len = drbg_seed_from_random(drbg);
if (len)
goto err;
}
if (addtl && 0 < addtl->len)
@ -1388,51 +1505,15 @@ static int drbg_generate_long(struct drbg_state *drbg,
return 0;
}
static void drbg_schedule_async_seed(struct random_ready_callback *rdy)
{
struct drbg_state *drbg = container_of(rdy, struct drbg_state,
random_ready);
schedule_work(&drbg->seed_work);
}
static int drbg_prepare_hrng(struct drbg_state *drbg)
{
int err;
/* We do not need an HRNG in test mode. */
if (list_empty(&drbg->test_data.list))
return 0;
INIT_WORK(&drbg->seed_work, drbg_async_seed);
drbg->random_ready.owner = THIS_MODULE;
drbg->random_ready.func = drbg_schedule_async_seed;
err = add_random_ready_callback(&drbg->random_ready);
switch (err) {
case 0:
break;
case -EALREADY:
err = 0;
/* fall through */
default:
drbg->random_ready.func = NULL;
return err;
}
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
/*
* Require frequent reseeds until the seed source is fully
* initialized.
*/
drbg->reseed_threshold = 50;
return err;
return 0;
}
/*
@ -1475,7 +1556,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
if (!drbg->core) {
drbg->core = &drbg_cores[coreref];
drbg->pr = pr;
drbg->seeded = false;
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
drbg->reseed_threshold = drbg_max_requests(drbg);
ret = drbg_alloc_state(drbg);
@ -1526,12 +1607,9 @@ free_everything:
*/
static int drbg_uninstantiate(struct drbg_state *drbg)
{
if (drbg->random_ready.func) {
del_random_ready_callback(&drbg->random_ready);
cancel_work_sync(&drbg->seed_work);
if (!IS_ERR_OR_NULL(drbg->jent))
crypto_free_rng(drbg->jent);
drbg->jent = NULL;
}
drbg->jent = NULL;
if (drbg->d_ops)
drbg->d_ops->crypto_fini(drbg);

View file

@ -6253,7 +6253,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi,
int n_ports)
{
const struct ata_port_info *pi;
const struct ata_port_info *pi = &ata_dummy_port_info;
struct ata_host *host;
int i, j;
@ -6261,7 +6261,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
if (!host)
return NULL;
for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
for (i = 0, j = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ppi[j])

View file

@ -552,28 +552,41 @@ config ADI
and SSM (Silicon Secured Memory). Intended consumers of this
driver include crash and makedumpfile.
endmenu
config RANDOM_TRUST_CPU
bool "Trust the CPU manufacturer to initialize Linux's CRNG"
depends on X86 || S390 || PPC
default n
bool "Initialize RNG using CPU RNG instructions"
default y
depends on ARCH_RANDOM
help
Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
for the purposes of initializing Linux's CRNG. Since this is not
something that can be independently audited, this amounts to trusting
that CPU manufacturer (perhaps with the insistence or mandate
of a Nation State's intelligence or law enforcement agencies)
has not installed a hidden back door to compromise the CPU's
random number generation facilities. This can also be configured
at boot with "random.trust_cpu=on/off".
Initialize the RNG using random numbers supplied by the CPU's
RNG instructions (e.g. RDRAND), if supported and available. These
random numbers are never used directly, but are rather hashed into
the main input pool, and this happens regardless of whether or not
this option is enabled. Instead, this option controls whether the
they are credited and hence can initialize the RNG. Additionally,
other sources of randomness are always used, regardless of this
setting. Enabling this implies trusting that the CPU can supply high
quality and non-backdoored random numbers.
Say Y here unless you have reason to mistrust your CPU or believe
its RNG facilities may be faulty. This may also be configured at
boot time with "random.trust_cpu=on/off".
config RANDOM_TRUST_BOOTLOADER
bool "Trust the bootloader to initialize Linux's CRNG"
bool "Initialize RNG using bootloader-supplied seed"
default y
help
Some bootloaders can provide entropy to increase the kernel's initial
device randomness. Say Y here to assume the entropy provided by the
booloader is trustworthy so it will be added to the kernel's entropy
pool. Otherwise, say N here so it will be regarded as device input that
only mixes the entropy pool.
Initialize the RNG using a seed supplied by the bootloader or boot
environment (e.g. EFI or a bootloader-generated device tree). This
seed is not used directly, but is rather hashed into the main input
pool, and this happens regardless of whether or not this option is
enabled. Instead, this option controls whether the seed is credited
and hence can initialize the RNG. Additionally, other sources of
randomness are always used, regardless of this setting. Enabling
this implies trusting that the bootloader can supply high quality and
non-backdoored seeds.
Say Y here unless you have reason to mistrust your bootloader or
believe its RNG facilities may be faulty. This may also be configured
at boot time with "random.trust_bootloader=on/off".
endmenu

View file

@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>

File diff suppressed because it is too large Load diff

View file

@ -115,7 +115,7 @@ static void hv_stimer0_isr(void)
hv_cpu = this_cpu_ptr(hv_context.cpu_context);
hv_cpu->clk_evt->event_handler(hv_cpu->clk_evt);
add_interrupt_randomness(stimer0_vector, 0);
add_interrupt_randomness(stimer0_vector);
}
static int hv_ce_set_next_event(unsigned long delta,

View file

@ -1146,7 +1146,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR);
}
/*

View file

@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent)
/* The PB11MPCore GIC needs to be configured in the syscon */
map = syscon_node_to_regmap(np);
of_node_put(np);
if (!IS_ERR(map)) {
/* new irq mode with no DCC */
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,

View file

@ -1209,12 +1209,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
continue;
cpu = of_cpu_node_to_id(cpu_node);
if (WARN_ON(cpu < 0))
if (WARN_ON(cpu < 0)) {
of_node_put(cpu_node);
continue;
}
pr_cont("%pOF[%d] ", cpu_node, cpu);
cpumask_set_cpu(cpu, &part->mask);
of_node_put(cpu_node);
}
pr_cont("}\n");

View file

@ -235,9 +235,9 @@ static int ssc_probe(struct platform_device *pdev)
clk_disable_unprepare(ssc->clk);
ssc->irq = platform_get_irq(pdev, 0);
if (!ssc->irq) {
if (ssc->irq < 0) {
dev_dbg(&pdev->dev, "could not get irq\n");
return -ENXIO;
return ssc->irq;
}
mutex_lock(&user_lock);

View file

@ -323,7 +323,6 @@ static void bgmac_remove(struct bcma_device *core)
bcma_mdio_mii_unregister(bgmac->mii_bus);
bgmac_enet_remove(bgmac);
bcma_set_drvdata(core, NULL);
kfree(bgmac);
}
static struct bcma_driver bgmac_bcma_driver = {

View file

@ -2195,15 +2195,16 @@ static void i40e_diag_test(struct net_device *netdev,
set_bit(__I40E_TESTING, pf->state);
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
dev_warn(&pf->pdev->dev,
"Cannot start offline testing when PF is in reset state.\n");
goto skip_ol_tests;
}
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
data[I40E_ETH_TEST_REG] = 1;
data[I40E_ETH_TEST_EEPROM] = 1;
data[I40E_ETH_TEST_INTR] = 1;
data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, pf->state);
goto skip_ol_tests;
}
@ -2250,9 +2251,17 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_INTR] = 0;
}
skip_ol_tests:
netif_info(pf, drv, netdev, "testing finished\n");
return;
skip_ol_tests:
data[I40E_ETH_TEST_REG] = 1;
data[I40E_ETH_TEST_EEPROM] = 1;
data[I40E_ETH_TEST_INTR] = 1;
data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, pf->state);
netif_info(pf, drv, netdev, "testing failed\n");
}
static void i40e_get_wol(struct net_device *netdev,

View file

@ -7508,6 +7508,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
}
if (!tc) {
dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
return -EINVAL;
}
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;

View file

@ -597,6 +597,17 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
}
static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
{
unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
unsigned long data;
data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
get_order(size));
return (void *)data;
}
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
@ -1005,7 +1016,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
/* alloc new buffer */
new_data = napi_alloc_frag(ring->frag_size);
if (ring->frag_size <= PAGE_SIZE)
new_data = napi_alloc_frag(ring->frag_size);
else
new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
if (unlikely(!new_data)) {
netdev->stats.rx_dropped++;
goto release_desc;
@ -1312,7 +1326,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
ring->data[i] = netdev_alloc_frag(ring->frag_size);
if (ring->frag_size <= PAGE_SIZE)
ring->data[i] = netdev_alloc_frag(ring->frag_size);
else
ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
if (!ring->data[i])
return -ENOMEM;
}

View file

@ -7,8 +7,8 @@
#include "spectrum.h"
enum mlxsw_sp_counter_sub_pool_id {
MLXSW_SP_COUNTER_SUB_POOL_FLOW,
MLXSW_SP_COUNTER_SUB_POOL_RIF,
MLXSW_SP_COUNTER_SUB_POOL_FLOW,
};
int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,

View file

@ -401,13 +401,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data)
int err;
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
usb_anchor_urb(urb, &drv_data->tx_anchor);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
if (err) {
kfree(urb->setup_packet);
usb_unanchor_urb(urb);
usb_free_urb(urb);
break;
}
drv_data->tx_in_flight++;
usb_free_urb(urb);
}
/* Cleanup the rest deferred urbs. */
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
kfree(urb->setup_packet);
usb_free_urb(urb);
}
usb_scuttle_anchored_urbs(&drv_data->deferred);
}
static int nfcmrvl_resume(struct usb_interface *intf)

View file

@ -9783,7 +9783,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
GFP_KERNEL);
if (!ioa_cfg->hrrq[i].host_rrq) {
while (--i > 0)
while (--i >= 0)
dma_free_coherent(&pdev->dev,
sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg->hrrq[i].host_rrq,
@ -10056,7 +10056,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]);
if (rc) {
while (--i >= 0)
while (--i > 0)
free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]);
return rc;

View file

@ -662,7 +662,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
if (ndlp->nlp_DID == Fabric_DID) {
if (vport->port_state <= LPFC_FDISC)
if (vport->port_state <= LPFC_FDISC ||
vport->fc_flag & FC_PT2PT)
goto out;
lpfc_linkdown_port(vport);
spin_lock_irq(shost->host_lock);

View file

@ -4559,7 +4559,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
return 0;
out_unwind:
while (--i > 0)
while (--i >= 0)
free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
pci_free_irq_vectors(pdev);
return rc;

View file

@ -333,8 +333,8 @@ struct PVSCSIRingReqDesc {
u8 tag;
u8 bus;
u8 target;
u8 vcpuHint;
u8 unused[59];
u16 vcpuHint;
u8 unused[58];
} __packed;
/*

View file

@ -685,7 +685,7 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
if (!devpriv->usb_rx_buf)
return -ENOMEM;
size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE);
size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_tx_buf)
return -ENOMEM;

View file

@ -428,7 +428,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
tty_unregister_device(goldfish_tty_driver, qtty->console.index);
iounmap(qtty->base);
qtty->base = NULL;
free_irq(qtty->irq, pdev);
free_irq(qtty->irq, qtty);
tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)

View file

@ -1522,6 +1522,8 @@ static inline void __stop_tx(struct uart_8250_port *p)
if (em485) {
unsigned char lsr = serial_in(p, UART_LSR);
p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
/*
* To provide required timeing and allow FIFO transfer,
* __stop_tx_rs485() must be called only when both FIFO and

View file

@ -5236,7 +5236,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
retval = -EINVAL;
goto error1;
goto error2;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);

View file

@ -772,9 +772,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
dev->qmult = qmult;
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
if (get_ether_addr(dev_addr, net->dev_addr))
if (get_ether_addr(dev_addr, net->dev_addr)) {
net->addr_assign_type = NET_ADDR_RANDOM;
dev_warn(&g->dev,
"using random %s ethernet address\n", "self");
} else {
net->addr_assign_type = NET_ADDR_SET;
}
if (get_ether_addr(host_addr, dev->host_mac))
dev_warn(&g->dev,
"using random %s ethernet address\n", "host");
@ -831,6 +835,9 @@ struct net_device *gether_setup_name_default(const char *netname)
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
/* by default we always have a random MAC address */
net->addr_assign_type = NET_ADDR_RANDOM;
skb_queue_head_init(&dev->rx_frames);
/* network device setup */
@ -868,7 +875,6 @@ int gether_register_netdev(struct net_device *net)
g = dev->gadget;
memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN);
net->addr_assign_type = NET_ADDR_RANDOM;
status = register_netdev(net);
if (status < 0) {
@ -908,6 +914,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
if (get_ether_addr(dev_addr, new_addr))
return -EINVAL;
memcpy(dev->dev_mac, new_addr, ETH_ALEN);
net->addr_assign_type = NET_ADDR_SET;
return 0;
}
EXPORT_SYMBOL_GPL(gether_set_dev_addr);

View file

@ -3021,6 +3021,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
}
udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
of_node_put(isp1301_node);
if (!udc->isp1301_i2c_client) {
retval = -EPROBE_DEFER;
goto phy_fail;

View file

@ -168,6 +168,7 @@ static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
{ }
};
@ -206,6 +207,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
{ }
};

View file

@ -212,6 +212,7 @@
//
// Definitions for other product IDs
#define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device
#define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4)
#define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \

View file

@ -432,6 +432,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_CLS8 0x00b0
#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8
#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
#define CINTERION_PRODUCT_MV32_WA 0x00f1
#define CINTERION_PRODUCT_MV32_WB 0x00f2
@ -1979,6 +1981,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
.driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff),
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff),
.driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),

View file

@ -663,6 +663,7 @@ static int vm_cmdline_set(const char *device,
if (!vm_cmdline_parent_registered) {
err = device_register(&vm_cmdline_parent);
if (err) {
put_device(&vm_cmdline_parent);
pr_err("Failed to register parent device!\n");
return err;
}

View file

@ -257,8 +257,7 @@ void vp_del_vqs(struct virtio_device *vdev)
if (vp_dev->msix_affinity_masks) {
for (i = 0; i < vp_dev->msix_vectors; i++)
if (vp_dev->msix_affinity_masks[i])
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
}
if (vp_dev->msix_enabled) {

View file

@ -656,14 +656,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
if (stat->st_result_mask & P9_STATS_NLINK)
set_nlink(inode, stat->st_nlink);
if (stat->st_result_mask & P9_STATS_MODE) {
inode->i_mode = stat->st_mode;
if ((S_ISBLK(inode->i_mode)) ||
(S_ISCHR(inode->i_mode)))
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
mode = stat->st_mode & S_IALLUGO;
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
}
if (stat->st_result_mask & P9_STATS_RDEV)
inode->i_rdev = new_decode_dev(stat->st_rdev);
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
stat->st_result_mask & P9_STATS_SIZE)
v9fs_i_size_write(inode, stat->st_size);

View file

@ -3170,6 +3170,15 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
size = size >> bsbits;
start = start_off >> bsbits;
/*
* For tiny groups (smaller than 8MB) the chosen allocation
* alignment may be larger than group size. Make sure the
* alignment does not move allocation to a different group which
* makes mballoc fail assertions later.
*/
start = max(start, rounddown(ac->ac_o_ex.fe_logical,
(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
/* don't cover already allocated blocks in selected range */
if (ar->pleft && start <= ar->lleft) {
size -= ar->lleft + 1 - start;

View file

@ -1917,7 +1917,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
struct dx_hash_info *hinfo)
{
unsigned blocksize = dir->i_sb->s_blocksize;
unsigned count, continued;
unsigned continued;
int count;
struct buffer_head *bh2;
ext4_lblk_t newblock;
u32 hash2;

View file

@ -52,6 +52,16 @@ int ext4_resize_begin(struct super_block *sb)
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
/*
* If the reserved GDT blocks is non-zero, the resize_inode feature
* should always be set.
*/
if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks &&
!ext4_has_feature_resize_inode(sb)) {
ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
return -EFSCORRUPTED;
}
/*
* If we are not using the primary superblock/GDT copy don't resize,
* because the user tools have no way of handling this. Probably a

View file

@ -2045,6 +2045,12 @@ lookup_again:
case -ERECALLCONFLICT:
case -EAGAIN:
break;
case -ENODATA:
/* The server returned NFS4ERR_LAYOUTUNAVAILABLE */
pnfs_layout_set_fail_bit(
lo, pnfs_iomode_to_fail_bit(iomode));
lseg = NULL;
goto out_put_layout_hdr;
default:
if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));

View file

@ -22,7 +22,9 @@
#define CHACHA_IV_SIZE 16
#define CHACHA_KEY_SIZE 32
#define CHACHA20_KEY_SIZE 32
#define CHACHA_BLOCK_SIZE 64
#define CHACHA20_BLOCK_SIZE 64
#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
@ -46,13 +48,25 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
hchacha_block_generic(state, out, nrounds);
}
enum chacha_constants { /* expand 32-byte k */
CHACHA_CONSTANT_EXPA = 0x61707865U,
CHACHA_CONSTANT_ND_3 = 0x3320646eU,
CHACHA_CONSTANT_2_BY = 0x79622d32U,
CHACHA_CONSTANT_TE_K = 0x6b206574U
};
static inline void chacha_init_consts(u32 *state)
{
state[0] = CHACHA_CONSTANT_EXPA;
state[1] = CHACHA_CONSTANT_ND_3;
state[2] = CHACHA_CONSTANT_2_BY;
state[3] = CHACHA_CONSTANT_TE_K;
}
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
{
state[0] = 0x61707865; /* "expa" */
state[1] = 0x3320646e; /* "nd 3" */
state[2] = 0x79622d32; /* "2-by" */
state[3] = 0x6b206574; /* "te k" */
chacha_init_consts(state);
state[4] = key[0];
state[5] = key[1];
state[6] = key[2];

View file

@ -105,6 +105,12 @@ struct drbg_test_data {
struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
};
enum drbg_seed_state {
DRBG_SEED_STATE_UNSEEDED,
DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
DRBG_SEED_STATE_FULL,
};
struct drbg_state {
struct mutex drbg_mutex; /* lock around DRBG */
unsigned char *V; /* internal state 10.1.1.1 1a) */
@ -127,14 +133,14 @@ struct drbg_state {
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
bool seeded; /* DRBG fully seeded? */
enum drbg_seed_state seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
struct work_struct seed_work; /* asynchronous seeding support */
bool fips_primed; /* Continuous test primed? */
unsigned char *prev; /* FIPS 140-2 continuous test value */
struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;
const struct drbg_core *core;
struct drbg_string test_data;
struct random_ready_callback random_ready;
};
static inline __u8 drbg_statelen(struct drbg_state *drbg)
@ -182,11 +188,7 @@ static inline size_t drbg_max_addtl(struct drbg_state *drbg)
static inline size_t drbg_max_requests(struct drbg_state *drbg)
{
/* SP800-90A requires 2**48 maximum requests before reseeding */
#if (__BITS_PER_LONG == 32)
return SIZE_MAX;
#else
return (1UL<<48);
#endif
return (1<<20);
}
/*

View file

@ -59,6 +59,7 @@ enum cpuhp_state {
CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_RANDOM_PREPARE,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@ -177,6 +178,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RANDOM_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_NOTIFY_PERF_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,

View file

@ -59,7 +59,5 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
/** Feed random bits into the pool. */
extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
#endif /* LINUX_HWRANDOM_H_ */

View file

@ -2363,6 +2363,8 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
unsigned long randomize_page(unsigned long start, unsigned long range);
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long mmap_region(struct file *file, unsigned long addr,

View file

@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/siphash.h>
u32 prandom_u32(void);
void prandom_bytes(void *buf, size_t nbytes);
@ -21,15 +22,10 @@ void prandom_reseed_late(void);
* The core SipHash round function. Each line can be executed in
* parallel given enough CPU resources.
*/
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
)
#define PRND_SIPROUND(v0, v1, v2, v3) SIPHASH_PERMUTATION(v0, v1, v2, v3)
#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
#define PRND_K0 (SIPHASH_CONST_0 ^ SIPHASH_CONST_2)
#define PRND_K1 (SIPHASH_CONST_1 ^ SIPHASH_CONST_3)
#elif BITS_PER_LONG == 32
/*
@ -37,14 +33,9 @@ void prandom_reseed_late(void);
* This is weaker, but 32-bit machines are not used for high-traffic
* applications, so there is less output for an attacker to analyze.
*/
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
)
#define PRND_K0 0x6c796765
#define PRND_K1 0x74656462
#define PRND_SIPROUND(v0, v1, v2, v3) HSIPHASH_PERMUTATION(v0, v1, v2, v3)
#define PRND_K0 (HSIPHASH_CONST_0 ^ HSIPHASH_CONST_2)
#define PRND_K1 (HSIPHASH_CONST_1 ^ HSIPHASH_CONST_3)
#else
#error Unsupported BITS_PER_LONG

View file

@ -1,52 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/random.h
*
* Include file for the random number generator.
*/
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/once.h>
#include <uapi/linux/random.h>
struct random_ready_callback {
struct list_head list;
void (*func)(struct random_ready_callback *rdy);
struct module *owner;
};
struct notifier_block;
extern void add_device_randomness(const void *, unsigned int);
extern void add_bootloader_randomness(const void *, unsigned int);
void add_device_randomness(const void *buf, size_t len);
void __init add_bootloader_randomness(const void *buf, size_t len);
void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value) __latent_entropy;
void add_interrupt_randomness(int irq) __latent_entropy;
void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__)
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
static inline void add_latent_entropy(void)
{
add_device_randomness((const void *)&latent_entropy,
sizeof(latent_entropy));
add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
}
#else
static inline void add_latent_entropy(void) {}
#endif
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value) __latent_entropy;
extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int wait_for_random_bytes(void);
extern int __init rand_initialize(void);
extern bool rng_is_initialized(void);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
static inline void add_latent_entropy(void) { }
#endif
void get_random_bytes(void *buf, size_t len);
size_t __must_check get_random_bytes_arch(void *buf, size_t len);
u32 get_random_u32(void);
u64 get_random_u64(void);
static inline unsigned int get_random_int(void)
@ -78,36 +61,38 @@ static inline unsigned long get_random_long(void)
static inline unsigned long get_random_canary(void)
{
unsigned long val = get_random_long();
return val & CANARY_MASK;
return get_random_long() & CANARY_MASK;
}
int __init random_init(const char *command_line);
bool rng_is_initialized(void);
int wait_for_random_bytes(void);
int register_random_ready_notifier(struct notifier_block *nb);
int unregister_random_ready_notifier(struct notifier_block *nb);
/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
* Returns the result of the call to wait_for_random_bytes. */
static inline int get_random_bytes_wait(void *buf, int nbytes)
static inline int get_random_bytes_wait(void *buf, size_t nbytes)
{
int ret = wait_for_random_bytes();
get_random_bytes(buf, nbytes);
return ret;
}
#define declare_get_random_var_wait(var) \
static inline int get_random_ ## var ## _wait(var *out) { \
#define declare_get_random_var_wait(name, ret_type) \
static inline int get_random_ ## name ## _wait(ret_type *out) { \
int ret = wait_for_random_bytes(); \
if (unlikely(ret)) \
return ret; \
*out = get_random_ ## var(); \
*out = get_random_ ## name(); \
return 0; \
}
declare_get_random_var_wait(u32)
declare_get_random_var_wait(u64)
declare_get_random_var_wait(int)
declare_get_random_var_wait(long)
declare_get_random_var_wait(u32, u32)
declare_get_random_var_wait(u64, u32)
declare_get_random_var_wait(int, unsigned int)
declare_get_random_var_wait(long, unsigned long)
#undef declare_get_random_var
unsigned long randomize_page(unsigned long start, unsigned long range);
/*
* This is designed to be standalone for just prandom
* users, but for now we include it from <linux/random.h>
@ -118,22 +103,39 @@ unsigned long randomize_page(unsigned long start, unsigned long range);
#ifdef CONFIG_ARCH_RANDOM
# include <asm/archrandom.h>
#else
static inline bool __must_check arch_get_random_long(unsigned long *v)
static inline bool __must_check arch_get_random_long(unsigned long *v) { return false; }
static inline bool __must_check arch_get_random_int(unsigned int *v) { return false; }
static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { return false; }
static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { return false; }
#endif
/*
* Called from the boot CPU during startup; not valid to call once
* secondary CPUs are up and preemption is possible.
*/
#ifndef arch_get_random_seed_long_early
static inline bool __init arch_get_random_seed_long_early(unsigned long *v)
{
return false;
}
static inline bool __must_check arch_get_random_int(unsigned int *v)
{
return false;
}
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
return false;
}
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
return false;
WARN_ON(system_state != SYSTEM_BOOTING);
return arch_get_random_seed_long(v);
}
#endif
#ifndef arch_get_random_long_early
static inline bool __init arch_get_random_long_early(unsigned long *v)
{
WARN_ON(system_state != SYSTEM_BOOTING);
return arch_get_random_long(v);
}
#endif
#ifdef CONFIG_SMP
int random_prepare_cpu(unsigned int cpu);
int random_online_cpu(unsigned int cpu);
#endif
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
#endif
#endif /* _LINUX_RANDOM_H */

View file

@ -136,4 +136,32 @@ static inline u32 hsiphash(const void *data, size_t len,
return ___hsiphash_aligned(data, len, key);
}
/*
* These macros expose the raw SipHash and HalfSipHash permutations.
* Do not use them directly! If you think you have a use for them,
* be sure to CC the maintainer of this file explaining why.
*/
#define SIPHASH_PERMUTATION(a, b, c, d) ( \
(a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
(c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
(a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
(c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
#define SIPHASH_CONST_0 0x736f6d6570736575ULL
#define SIPHASH_CONST_1 0x646f72616e646f6dULL
#define SIPHASH_CONST_2 0x6c7967656e657261ULL
#define SIPHASH_CONST_3 0x7465646279746573ULL
#define HSIPHASH_PERMUTATION(a, b, c, d) ( \
(a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \
(c) += (d), (d) = rol32((d), 8), (d) ^= (c), \
(a) += (d), (d) = rol32((d), 7), (d) ^= (a), \
(c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16))
#define HSIPHASH_CONST_0 0U
#define HSIPHASH_CONST_1 0U
#define HSIPHASH_CONST_2 0x6c796765U
#define HSIPHASH_CONST_3 0x74656462U
#endif /* _LINUX_SIPHASH_H */

View file

@ -62,6 +62,8 @@
#include <linux/types.h>
#include <linux/param.h>
unsigned long random_get_entropy_fallback(void);
#include <asm/timex.h>
#ifndef random_get_entropy
@ -74,8 +76,14 @@
*
* By default we use get_cycles() for this purpose, but individual
* architectures may override this in their asm/timex.h header file.
* If a given arch does not have get_cycles(), then we fallback to
* using random_get_entropy_fallback().
*/
#define random_get_entropy() get_cycles()
#ifdef get_cycles
#define random_get_entropy() ((unsigned long)get_cycles())
#else
#define random_get_entropy() random_get_entropy_fallback()
#endif
#endif
/*

View file

@ -1,313 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM random
#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RANDOM_H
#include <linux/writeback.h>
#include <linux/tracepoint.h>
TRACE_EVENT(add_device_randomness,
TP_PROTO(int bytes, unsigned long IP),
TP_ARGS(bytes, IP),
TP_STRUCT__entry(
__field( int, bytes )
__field(unsigned long, IP )
),
TP_fast_assign(
__entry->bytes = bytes;
__entry->IP = IP;
),
TP_printk("bytes %d caller %pS",
__entry->bytes, (void *)__entry->IP)
);
DECLARE_EVENT_CLASS(random__mix_pool_bytes,
TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, bytes )
__field(unsigned long, IP )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->bytes = bytes;
__entry->IP = IP;
),
TP_printk("%s pool: bytes %d caller %pS",
__entry->pool_name, __entry->bytes, (void *)__entry->IP)
);
DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP)
);
DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP)
);
TRACE_EVENT(credit_entropy_bits,
TP_PROTO(const char *pool_name, int bits, int entropy_count,
unsigned long IP),
TP_ARGS(pool_name, bits, entropy_count, IP),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, bits )
__field( int, entropy_count )
__field(unsigned long, IP )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->bits = bits;
__entry->entropy_count = entropy_count;
__entry->IP = IP;
),
TP_printk("%s pool: bits %d entropy_count %d caller %pS",
__entry->pool_name, __entry->bits,
__entry->entropy_count, (void *)__entry->IP)
);
TRACE_EVENT(push_to_pool,
TP_PROTO(const char *pool_name, int pool_bits, int input_bits),
TP_ARGS(pool_name, pool_bits, input_bits),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, pool_bits )
__field( int, input_bits )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->pool_bits = pool_bits;
__entry->input_bits = input_bits;
),
TP_printk("%s: pool_bits %d input_pool_bits %d",
__entry->pool_name, __entry->pool_bits,
__entry->input_bits)
);
TRACE_EVENT(debit_entropy,
TP_PROTO(const char *pool_name, int debit_bits),
TP_ARGS(pool_name, debit_bits),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, debit_bits )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->debit_bits = debit_bits;
),
TP_printk("%s: debit_bits %d", __entry->pool_name,
__entry->debit_bits)
);
TRACE_EVENT(add_input_randomness,
TP_PROTO(int input_bits),
TP_ARGS(input_bits),
TP_STRUCT__entry(
__field( int, input_bits )
),
TP_fast_assign(
__entry->input_bits = input_bits;
),
TP_printk("input_pool_bits %d", __entry->input_bits)
);
TRACE_EVENT(add_disk_randomness,
TP_PROTO(dev_t dev, int input_bits),
TP_ARGS(dev, input_bits),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, input_bits )
),
TP_fast_assign(
__entry->dev = dev;
__entry->input_bits = input_bits;
),
TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
MINOR(__entry->dev), __entry->input_bits)
);
TRACE_EVENT(xfer_secondary_pool,
TP_PROTO(const char *pool_name, int xfer_bits, int request_bits,
int pool_entropy, int input_entropy),
TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy,
input_entropy),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, xfer_bits )
__field( int, request_bits )
__field( int, pool_entropy )
__field( int, input_entropy )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->xfer_bits = xfer_bits;
__entry->request_bits = request_bits;
__entry->pool_entropy = pool_entropy;
__entry->input_entropy = input_entropy;
),
TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
"input_entropy %d", __entry->pool_name, __entry->xfer_bits,
__entry->request_bits, __entry->pool_entropy,
__entry->input_entropy)
);
DECLARE_EVENT_CLASS(random__get_random_bytes,
TP_PROTO(int nbytes, unsigned long IP),
TP_ARGS(nbytes, IP),
TP_STRUCT__entry(
__field( int, nbytes )
__field(unsigned long, IP )
),
TP_fast_assign(
__entry->nbytes = nbytes;
__entry->IP = IP;
),
TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP)
);
DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
TP_PROTO(int nbytes, unsigned long IP),
TP_ARGS(nbytes, IP)
);
DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
TP_PROTO(int nbytes, unsigned long IP),
TP_ARGS(nbytes, IP)
);
DECLARE_EVENT_CLASS(random__extract_entropy,
TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
unsigned long IP),
TP_ARGS(pool_name, nbytes, entropy_count, IP),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, nbytes )
__field( int, entropy_count )
__field(unsigned long, IP )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->nbytes = nbytes;
__entry->entropy_count = entropy_count;
__entry->IP = IP;
),
TP_printk("%s pool: nbytes %d entropy_count %d caller %pS",
__entry->pool_name, __entry->nbytes, __entry->entropy_count,
(void *)__entry->IP)
);
DEFINE_EVENT(random__extract_entropy, extract_entropy,
TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
unsigned long IP),
TP_ARGS(pool_name, nbytes, entropy_count, IP)
);
DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
unsigned long IP),
TP_ARGS(pool_name, nbytes, entropy_count, IP)
);
TRACE_EVENT(random_read,
TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left),
TP_ARGS(got_bits, need_bits, pool_left, input_left),
TP_STRUCT__entry(
__field( int, got_bits )
__field( int, need_bits )
__field( int, pool_left )
__field( int, input_left )
),
TP_fast_assign(
__entry->got_bits = got_bits;
__entry->need_bits = need_bits;
__entry->pool_left = pool_left;
__entry->input_left = input_left;
),
TP_printk("got_bits %d still_needed_bits %d "
"blocking_pool_entropy_left %d input_entropy_left %d",
__entry->got_bits, __entry->got_bits, __entry->pool_left,
__entry->input_left)
);
TRACE_EVENT(urandom_read,
TP_PROTO(int got_bits, int pool_left, int input_left),
TP_ARGS(got_bits, pool_left, input_left),
TP_STRUCT__entry(
__field( int, got_bits )
__field( int, pool_left )
__field( int, input_left )
),
TP_fast_assign(
__entry->got_bits = got_bits;
__entry->pool_left = pool_left;
__entry->input_left = input_left;
),
TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
"input_entropy_left %d", __entry->got_bits,
__entry->pool_left, __entry->input_left)
);
#endif /* _TRACE_RANDOM_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -660,21 +660,18 @@ asmlinkage __visible void __init start_kernel(void)
hrtimers_init();
softirq_init();
timekeeping_init();
time_init();
/*
* For best initial stack canary entropy, prepare it after:
* - setup_arch() for any UEFI RNG entropy and boot cmdline access
* - timekeeping_init() for ktime entropy used in rand_initialize()
* - rand_initialize() to get any arch-specific entropy like RDRAND
* - add_latent_entropy() to get any latent entropy
* - adding command line entropy
* - timekeeping_init() for ktime entropy used in random_init()
* - time_init() for making random_get_entropy() work on some platforms
* - random_init() to initialize the RNG from from early entropy sources
*/
rand_initialize();
add_latent_entropy();
add_device_randomness(command_line, strlen(command_line));
random_init(command_line);
boot_init_stack_canary();
time_init();
perf_event_init();
profile_init();
call_function_init();

View file

@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>
#include <linux/cpuset.h>
#include <linux/random.h>
#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
@ -1411,6 +1412,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
.startup.single = perf_event_init_cpu,
.teardown.single = perf_event_exit_cpu,
},
[CPUHP_RANDOM_PREPARE] = {
.name = "random:prepare",
.startup.single = random_prepare_cpu,
.teardown.single = NULL,
},
[CPUHP_WORKQUEUE_PREP] = {
.name = "workqueue:prepare",
.startup.single = workqueue_prepare_cpu,
@ -1527,6 +1533,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
.startup.single = workqueue_online_cpu,
.teardown.single = workqueue_offline_cpu,
},
[CPUHP_AP_RANDOM_ONLINE] = {
.name = "random:online",
.startup.single = random_online_cpu,
.teardown.single = NULL,
},
[CPUHP_AP_RCUTREE_ONLINE] = {
.name = "RCU/tree:online",
.startup.single = rcutree_online_cpu,

View file

@ -188,7 +188,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
retval = __handle_irq_event_percpu(desc, &flags);
add_interrupt_randomness(desc->irq_data.irq, flags);
add_interrupt_randomness(desc->irq_data.irq);
if (!noirqdebug)
note_interrupt(desc, retval);

View file

@ -22,6 +22,7 @@
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/tick.h>
#include <linux/stop_machine.h>
#include <linux/pvclock_gtod.h>
@ -2313,6 +2314,20 @@ static int timekeeping_validate_timex(const struct timex *txc)
return 0;
}
/**
* random_get_entropy_fallback - Returns the raw clock source value,
* used by random.c for platforms with no valid random_get_entropy().
*/
unsigned long random_get_entropy_fallback(void)
{
struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
struct clocksource *clock = READ_ONCE(tkr->clock);
if (unlikely(timekeeping_suspended || !clock))
return 0;
return clock->read(clock);
}
EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
/**
* do_adjtimex() - Accessor function to NTP __do_adjtimex function

View file

@ -1277,8 +1277,7 @@ config WARN_ALL_UNSEEDED_RANDOM
so architecture maintainers really need to do what they can
to get the CRNG seeded sooner after the system is booted.
However, since users cannot do anything actionable to
address this, by default the kernel will issue only a single
warning for the first use of unseeded randomness.
address this, by default this option is disabled.
Say Y here if you want to receive warnings for all uses of
unseeded randomness. This will be of use primarily for

View file

@ -38,6 +38,9 @@
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <asm/unaligned.h>
/**
@ -544,9 +547,11 @@ static void prandom_reseed(struct timer_list *unused)
* To avoid worrying about whether it's safe to delay that interrupt
* long enough to seed all CPUs, just schedule an immediate timer event.
*/
static void prandom_timer_start(struct random_ready_callback *unused)
static int prandom_timer_start(struct notifier_block *nb,
unsigned long action, void *data)
{
mod_timer(&seed_timer, jiffies);
return 0;
}
/*
@ -555,13 +560,13 @@ static void prandom_timer_start(struct random_ready_callback *unused)
*/
static int __init prandom_init_late(void)
{
static struct random_ready_callback random_ready = {
.func = prandom_timer_start
static struct notifier_block random_ready = {
.notifier_call = prandom_timer_start
};
int ret = add_random_ready_callback(&random_ready);
int ret = register_random_ready_notifier(&random_ready);
if (ret == -EALREADY) {
prandom_timer_start(&random_ready);
prandom_timer_start(&random_ready, 0, NULL);
ret = 0;
}
return ret;

View file

@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/cryptohash.h>
#include <linux/string.h>
#include <asm/unaligned.h>
/*
@ -55,7 +56,8 @@
#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
__u32 TEMP = input(t); setW(t, TEMP); \
E += TEMP + rol32(A,5) + (fn) + (constant); \
B = ror32(B, 2); } while (0)
B = ror32(B, 2); \
TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0)
#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
@ -82,6 +84,7 @@
void sha_transform(__u32 *digest, const char *data, __u32 *array)
{
__u32 A, B, C, D, E;
unsigned int i = 0;
A = digest[0];
B = digest[1];
@ -90,94 +93,24 @@ void sha_transform(__u32 *digest, const char *data, __u32 *array)
E = digest[4];
/* Round 1 - iterations 0-16 take their input from 'data' */
T_0_15( 0, A, B, C, D, E);
T_0_15( 1, E, A, B, C, D);
T_0_15( 2, D, E, A, B, C);
T_0_15( 3, C, D, E, A, B);
T_0_15( 4, B, C, D, E, A);
T_0_15( 5, A, B, C, D, E);
T_0_15( 6, E, A, B, C, D);
T_0_15( 7, D, E, A, B, C);
T_0_15( 8, C, D, E, A, B);
T_0_15( 9, B, C, D, E, A);
T_0_15(10, A, B, C, D, E);
T_0_15(11, E, A, B, C, D);
T_0_15(12, D, E, A, B, C);
T_0_15(13, C, D, E, A, B);
T_0_15(14, B, C, D, E, A);
T_0_15(15, A, B, C, D, E);
for (; i < 16; ++i)
T_0_15(i, A, B, C, D, E);
/* Round 1 - tail. Input from 512-bit mixing array */
T_16_19(16, E, A, B, C, D);
T_16_19(17, D, E, A, B, C);
T_16_19(18, C, D, E, A, B);
T_16_19(19, B, C, D, E, A);
for (; i < 20; ++i)
T_16_19(i, A, B, C, D, E);
/* Round 2 */
T_20_39(20, A, B, C, D, E);
T_20_39(21, E, A, B, C, D);
T_20_39(22, D, E, A, B, C);
T_20_39(23, C, D, E, A, B);
T_20_39(24, B, C, D, E, A);
T_20_39(25, A, B, C, D, E);
T_20_39(26, E, A, B, C, D);
T_20_39(27, D, E, A, B, C);
T_20_39(28, C, D, E, A, B);
T_20_39(29, B, C, D, E, A);
T_20_39(30, A, B, C, D, E);
T_20_39(31, E, A, B, C, D);
T_20_39(32, D, E, A, B, C);
T_20_39(33, C, D, E, A, B);
T_20_39(34, B, C, D, E, A);
T_20_39(35, A, B, C, D, E);
T_20_39(36, E, A, B, C, D);
T_20_39(37, D, E, A, B, C);
T_20_39(38, C, D, E, A, B);
T_20_39(39, B, C, D, E, A);
for (; i < 40; ++i)
T_20_39(i, A, B, C, D, E);
/* Round 3 */
T_40_59(40, A, B, C, D, E);
T_40_59(41, E, A, B, C, D);
T_40_59(42, D, E, A, B, C);
T_40_59(43, C, D, E, A, B);
T_40_59(44, B, C, D, E, A);
T_40_59(45, A, B, C, D, E);
T_40_59(46, E, A, B, C, D);
T_40_59(47, D, E, A, B, C);
T_40_59(48, C, D, E, A, B);
T_40_59(49, B, C, D, E, A);
T_40_59(50, A, B, C, D, E);
T_40_59(51, E, A, B, C, D);
T_40_59(52, D, E, A, B, C);
T_40_59(53, C, D, E, A, B);
T_40_59(54, B, C, D, E, A);
T_40_59(55, A, B, C, D, E);
T_40_59(56, E, A, B, C, D);
T_40_59(57, D, E, A, B, C);
T_40_59(58, C, D, E, A, B);
T_40_59(59, B, C, D, E, A);
for (; i < 60; ++i)
T_40_59(i, A, B, C, D, E);
/* Round 4 */
T_60_79(60, A, B, C, D, E);
T_60_79(61, E, A, B, C, D);
T_60_79(62, D, E, A, B, C);
T_60_79(63, C, D, E, A, B);
T_60_79(64, B, C, D, E, A);
T_60_79(65, A, B, C, D, E);
T_60_79(66, E, A, B, C, D);
T_60_79(67, D, E, A, B, C);
T_60_79(68, C, D, E, A, B);
T_60_79(69, B, C, D, E, A);
T_60_79(70, A, B, C, D, E);
T_60_79(71, E, A, B, C, D);
T_60_79(72, D, E, A, B, C);
T_60_79(73, C, D, E, A, B);
T_60_79(74, B, C, D, E, A);
T_60_79(75, A, B, C, D, E);
T_60_79(76, E, A, B, C, D);
T_60_79(77, D, E, A, B, C);
T_60_79(78, C, D, E, A, B);
T_60_79(79, B, C, D, E, A);
for (; i < 80; ++i)
T_60_79(i, A, B, C, D, E);
digest[0] += A;
digest[1] += B;

View file

@ -18,19 +18,13 @@
#include <asm/word-at-a-time.h>
#endif
#define SIPROUND \
do { \
v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
} while (0)
#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
#define PREAMBLE(len) \
u64 v0 = 0x736f6d6570736575ULL; \
u64 v1 = 0x646f72616e646f6dULL; \
u64 v2 = 0x6c7967656e657261ULL; \
u64 v3 = 0x7465646279746573ULL; \
u64 v0 = SIPHASH_CONST_0; \
u64 v1 = SIPHASH_CONST_1; \
u64 v2 = SIPHASH_CONST_2; \
u64 v3 = SIPHASH_CONST_3; \
u64 b = ((u64)(len)) << 56; \
v3 ^= key->key[1]; \
v2 ^= key->key[0]; \
@ -389,19 +383,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
}
EXPORT_SYMBOL(hsiphash_4u32);
#else
#define HSIPROUND \
do { \
v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
} while (0)
#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3)
#define HPREAMBLE(len) \
u32 v0 = 0; \
u32 v1 = 0; \
u32 v2 = 0x6c796765U; \
u32 v3 = 0x74656462U; \
u32 v0 = HSIPHASH_CONST_0; \
u32 v1 = HSIPHASH_CONST_1; \
u32 v2 = HSIPHASH_CONST_2; \
u32 v3 = HSIPHASH_CONST_3; \
u32 b = ((u32)(len)) << 24; \
v3 ^= key->key[1]; \
v2 ^= key->key[0]; \

View file

@ -1700,14 +1700,16 @@ static void enable_ptr_key_workfn(struct work_struct *work)
static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
static void fill_random_ptr_key(struct random_ready_callback *unused)
static int fill_random_ptr_key(struct notifier_block *nb,
unsigned long action, void *data)
{
/* This may be in an interrupt handler. */
queue_work(system_unbound_wq, &enable_ptr_key_work);
return 0;
}
static struct random_ready_callback random_ready = {
.func = fill_random_ptr_key
static struct notifier_block random_ready = {
.notifier_call = fill_random_ptr_key
};
static int __init initialize_ptr_random(void)
@ -1721,7 +1723,7 @@ static int __init initialize_ptr_random(void)
return 0;
}
ret = add_random_ready_callback(&random_ready);
ret = register_random_ready_notifier(&random_ready);
if (!ret) {
return 0;
} else if (ret == -EALREADY) {

View file

@ -14,6 +14,7 @@
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/userfaultfd_k.h>
#include <linux/random.h>
#include <asm/sections.h>
#include <linux/uaccess.h>
@ -286,6 +287,38 @@ int vma_is_stack_for_current(struct vm_area_struct *vma)
return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}
/**
* randomize_page - Generate a random, page aligned address
* @start: The smallest acceptable address the caller will take.
* @range: The size of the area, starting at @start, within which the
* random address must fall.
*
* If @start + @range would overflow, @range is capped.
*
* NOTE: Historical use of randomize_range, which this replaces, presumed that
* @start was already page aligned. We now align it regardless.
*
* Return: A page aligned address within [start, start + range). On error,
* @start is returned.
*/
unsigned long randomize_page(unsigned long start, unsigned long range)
{
if (!PAGE_ALIGNED(start)) {
range -= PAGE_ALIGN(start) - start;
start = PAGE_ALIGN(start);
}
if (start > ULONG_MAX - range)
range = ULONG_MAX - start;
range >>= PAGE_SHIFT;
if (range == 0)
return start;
return start + (get_random_long() % range << PAGE_SHIFT);
}
#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
{

View file

@ -718,12 +718,14 @@ EXPORT_SYMBOL_GPL(inet_unhash);
* Note that we use 32bit integers (vs RFC 'short integers')
* because 2^16 is not a multiple of num_ephemeral and this
* property might be used by clever attacker.
* RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
* we use 256 instead to really give more isolation and
* privacy, this only consumes 1 KB of kernel memory.
* RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
* attacks were since demonstrated, thus we use 65536 instead to really
* give more isolation and privacy, at the expense of 256kB of kernel
* memory.
*/
#define INET_TABLE_PERTURB_SHIFT 8
static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
#define INET_TABLE_PERTURB_SHIFT 16
#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
static u32 *table_perturb;
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u64 port_offset,
@ -763,10 +765,11 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
if (likely(remaining > 1))
remaining &= ~1U;
net_get_random_once(table_perturb, sizeof(table_perturb));
index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
net_get_random_once(table_perturb,
INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
offset = READ_ONCE(table_perturb[index]) + port_offset;
offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
offset %= remaining;
/* In first pass we try ports of @low parity.
@ -821,6 +824,12 @@ next_port:
return -EADDRNOTAVAIL;
ok:
/* Here we want to add a little bit of randomness to the next source
* port that will be chosen. We use a max() with a random here so that
* on low contention the randomness is maximal and on high contention
* it may be inexistent.
*/
i = max_t(int, i, (prandom_u32() & 7) * 2);
WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
/* Head lock still held and bh's disabled */
@ -890,6 +899,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
INIT_HLIST_HEAD(&h->lhash2[i].head);
h->lhash2[i].count = 0;
}
/* this one is used for source ports of outgoing connections */
table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE,
sizeof(*table_perturb), GFP_KERNEL);
if (!table_perturb)
panic("TCP: failed to alloc table_perturb");
}
int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)

View file

@ -519,14 +519,15 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct ipcm6_cookie ipc6;
int addr_len = msg->msg_namelen;
int transhdrlen = 4; /* zero session-id */
int ulen = len + transhdrlen;
int ulen;
int err;
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX)
if (len > INT_MAX - transhdrlen)
return -EMSGSIZE;
ulen = len + transhdrlen;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)

View file

@ -443,6 +443,7 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
update_ip_l4_checksum(skb, nh, *addr, new_addr);
csum_replace4(&nh->check, *addr, new_addr);
skb_clear_hash(skb);
ovs_ct_clear(skb, NULL);
*addr = new_addr;
}
@ -490,6 +491,7 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
update_ipv6_checksum(skb, l4_proto, addr, new_addr);
skb_clear_hash(skb);
ovs_ct_clear(skb, NULL);
memcpy(addr, new_addr, sizeof(__be32[4]));
}
@ -730,6 +732,7 @@ static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
static void set_tp_port(struct sk_buff *skb, __be16 *port,
__be16 new_port, __sum16 *check)
{
ovs_ct_clear(skb, NULL);
inet_proto_csum_replace2(check, skb, *port, new_port, false);
*port = new_port;
}
@ -769,6 +772,7 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
uh->dest = dst;
flow_key->tp.src = src;
flow_key->tp.dst = dst;
ovs_ct_clear(skb, NULL);
}
skb_clear_hash(skb);
@ -831,6 +835,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
skb_clear_hash(skb);
ovs_ct_clear(skb, NULL);
flow_key->tp.src = sh->source;
flow_key->tp.dst = sh->dest;

View file

@ -1303,7 +1303,8 @@ int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
if (skb_nfct(skb)) {
nf_conntrack_put(skb_nfct(skb));
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
ovs_ct_fill_key(skb, key);
if (key)
ovs_ct_fill_key(skb, key);
}
return 0;

View file

@ -2253,6 +2253,36 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size)
return sfa;
}
static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len);
static void ovs_nla_free_clone_action(const struct nlattr *action)
{
const struct nlattr *a = nla_data(action);
int rem = nla_len(action);
switch (nla_type(a)) {
case OVS_CLONE_ATTR_EXEC:
/* The real list of actions follows this attribute. */
a = nla_next(a, &rem);
ovs_nla_free_nested_actions(a, rem);
break;
}
}
static void ovs_nla_free_sample_action(const struct nlattr *action)
{
const struct nlattr *a = nla_data(action);
int rem = nla_len(action);
switch (nla_type(a)) {
case OVS_SAMPLE_ATTR_ARG:
/* The real list of actions follows this attribute. */
a = nla_next(a, &rem);
ovs_nla_free_nested_actions(a, rem);
break;
}
}
static void ovs_nla_free_set_action(const struct nlattr *a)
{
const struct nlattr *ovs_key = nla_data(a);
@ -2266,25 +2296,46 @@ static void ovs_nla_free_set_action(const struct nlattr *a)
}
}
void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len)
{
const struct nlattr *a;
int rem;
if (!sf_acts)
/* Whenever new actions are added, the need to update this
* function should be considered.
*/
BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 20);
if (!actions)
return;
nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
nla_for_each_attr(a, actions, len, rem) {
switch (nla_type(a)) {
case OVS_ACTION_ATTR_SET:
ovs_nla_free_set_action(a);
case OVS_ACTION_ATTR_CLONE:
ovs_nla_free_clone_action(a);
break;
case OVS_ACTION_ATTR_CT:
ovs_ct_free_action(a);
break;
case OVS_ACTION_ATTR_SAMPLE:
ovs_nla_free_sample_action(a);
break;
case OVS_ACTION_ATTR_SET:
ovs_nla_free_set_action(a);
break;
}
}
}
void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
{
if (!sf_acts)
return;
ovs_nla_free_nested_actions(sf_acts->actions, sf_acts->actions_len);
kfree(sf_acts);
}

View file

@ -72,7 +72,7 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
/* Maximum Read list size */
maxsegs += 2; /* segment for head and tail buffers */
size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
/* Minimal Read chunk size */
size += sizeof(__be32); /* segment count */
@ -98,7 +98,7 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
/* Maximum Write list size */
maxsegs += 2; /* segment for head and tail buffers */
size = sizeof(__be32); /* segment count */
size += sizeof(__be32); /* segment count */
size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
size += sizeof(__be32); /* list discriminator */

View file

@ -95,17 +95,25 @@ __faddr2line() {
local print_warnings=$4
local sym_name=${func_addr%+*}
local offset=${func_addr#*+}
offset=${offset%/*}
local func_offset=${func_addr#*+}
func_offset=${func_offset%/*}
local user_size=
local file_type
local is_vmlinux=0
[[ $func_addr =~ "/" ]] && user_size=${func_addr#*/}
if [[ -z $sym_name ]] || [[ -z $offset ]] || [[ $sym_name = $func_addr ]]; then
if [[ -z $sym_name ]] || [[ -z $func_offset ]] || [[ $sym_name = $func_addr ]]; then
warn "bad func+offset $func_addr"
DONE=1
return
fi
# vmlinux uses absolute addresses in the section table rather than
# section offsets.
local file_type=$(${READELF} --file-header $objfile |
${AWK} '$1 == "Type:" { print $2; exit }')
[[ $file_type = "EXEC" ]] && is_vmlinux=1
# Go through each of the object's symbols which match the func name.
# In rare cases there might be duplicates, in which case we print all
# matches.
@ -114,9 +122,11 @@ __faddr2line() {
local sym_addr=0x${fields[1]}
local sym_elf_size=${fields[2]}
local sym_sec=${fields[6]}
local sec_size
local sec_name
# Get the section size:
local sec_size=$(${READELF} --section-headers --wide $objfile |
sec_size=$(${READELF} --section-headers --wide $objfile |
sed 's/\[ /\[/' |
${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print "0x" $6; exit }')
@ -126,6 +136,17 @@ __faddr2line() {
return
fi
# Get the section name:
sec_name=$(${READELF} --section-headers --wide $objfile |
sed 's/\[ /\[/' |
${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print $2; exit }')
if [[ -z $sec_name ]]; then
warn "bad section name: section: $sym_sec"
DONE=1
return
fi
# Calculate the symbol size.
#
# Unfortunately we can't use the ELF size, because kallsyms
@ -174,10 +195,10 @@ __faddr2line() {
sym_size=0x$(printf %x $sym_size)
# Calculate the section address from user-supplied offset:
local addr=$(($sym_addr + $offset))
# Calculate the address from user-supplied offset:
local addr=$(($sym_addr + $func_offset))
if [[ -z $addr ]] || [[ $addr = 0 ]]; then
warn "bad address: $sym_addr + $offset"
warn "bad address: $sym_addr + $func_offset"
DONE=1
return
fi
@ -191,9 +212,9 @@ __faddr2line() {
fi
# Make sure the provided offset is within the symbol's range:
if [[ $offset -gt $sym_size ]]; then
if [[ $func_offset -gt $sym_size ]]; then
[[ $print_warnings = 1 ]] &&
echo "skipping $sym_name address at $addr due to size mismatch ($offset > $sym_size)"
echo "skipping $sym_name address at $addr due to size mismatch ($func_offset > $sym_size)"
continue
fi
@ -202,11 +223,13 @@ __faddr2line() {
[[ $FIRST = 0 ]] && echo
FIRST=0
echo "$sym_name+$offset/$sym_size:"
echo "$sym_name+$func_offset/$sym_size:"
# Pass section address to addr2line and strip absolute paths
# from the output:
local output=$(${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;")
local args="--functions --pretty-print --inlines --exe=$objfile"
[[ $is_vmlinux = 0 ]] && args="$args --section=$sec_name"
local output=$(${ADDR2LINE} $args $addr | sed "s; $dir_prefix\(\./\)*; ;")
[[ -z $output ]] && continue
# Default output (non --list):

View file

@ -141,7 +141,9 @@ static DECLARE_TLV_DB_SCALE(mic_tlv, 1600, 100, 0);
static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
static DECLARE_TLV_DB_SCALE(pass_tlv, -6000, 50, 0);
static DECLARE_TLV_DB_SCALE(mix_tlv, -5150, 50, 0);
static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
@ -355,7 +357,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
CS42L52_SPKB_VOL, 0, 0x40, 0xC0, hl_tlv),
SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL,
CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pga_tlv),
CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pass_tlv),
SOC_DOUBLE("Bypass Mute", CS42L52_MISC_CTL, 4, 5, 1, 0),
@ -368,7 +370,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
CS42L52_ADCB_VOL, 0, 0xA0, 0x78, ipd_tlv),
SOC_DOUBLE_R_SX_TLV("ADC Mixer Volume",
CS42L52_ADCA_MIXER_VOL, CS42L52_ADCB_MIXER_VOL,
0, 0x19, 0x7F, ipd_tlv),
0, 0x19, 0x7F, mix_tlv),
SOC_DOUBLE("ADC Switch", CS42L52_ADC_MISC_CTL, 0, 1, 1, 0),

View file

@ -403,9 +403,9 @@ static const struct snd_kcontrol_new cs42l56_snd_controls[] = {
SOC_DOUBLE("ADC Boost Switch", CS42L56_GAIN_BIAS_CTL, 3, 2, 1, 1),
SOC_DOUBLE_R_SX_TLV("Headphone Volume", CS42L56_HPA_VOLUME,
CS42L56_HPB_VOLUME, 0, 0x84, 0x48, hl_tlv),
CS42L56_HPB_VOLUME, 0, 0x44, 0x48, hl_tlv),
SOC_DOUBLE_R_SX_TLV("LineOut Volume", CS42L56_LOA_VOLUME,
CS42L56_LOB_VOLUME, 0, 0x84, 0x48, hl_tlv),
CS42L56_LOB_VOLUME, 0, 0x44, 0x48, hl_tlv),
SOC_SINGLE_TLV("Bass Shelving Volume", CS42L56_TONE_CTL,
0, 0x00, 1, tone_tlv),

View file

@ -351,22 +351,22 @@ static const struct snd_kcontrol_new cs53l30_snd_controls[] = {
SOC_ENUM("ADC2 NG Delay", adc2_ng_delay_enum),
SOC_SINGLE_SX_TLV("ADC1A PGA Volume",
CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
SOC_SINGLE_SX_TLV("ADC1B PGA Volume",
CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
SOC_SINGLE_SX_TLV("ADC2A PGA Volume",
CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
SOC_SINGLE_SX_TLV("ADC2B PGA Volume",
CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
SOC_SINGLE_SX_TLV("ADC1A Digital Volume",
CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
SOC_SINGLE_SX_TLV("ADC1B Digital Volume",
CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
SOC_SINGLE_SX_TLV("ADC2A Digital Volume",
CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
SOC_SINGLE_SX_TLV("ADC2B Digital Volume",
CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
};
static const struct snd_soc_dapm_widget cs53l30_dapm_widgets[] = {

View file

@ -165,13 +165,16 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol,
if (deemph > 1)
return -EINVAL;
if (es8328->deemph == deemph)
return 0;
ret = es8328_set_deemph(component);
if (ret < 0)
return ret;
es8328->deemph = deemph;
return 0;
return 1;
}

Some files were not shown because too many files have changed in this diff Show more