Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

The nf_conntrack_core.c fix in 'net' is not relevant in 'net-next'
because we no longer have a per-netns conntrack hash.

The ip_gre.c conflict as well as the iwlwifi ones were cases of
overlapping changes.

Conflicts:
	drivers/net/wireless/intel/iwlwifi/mvm/tx.c
	net/ipv4/ip_gre.c
	net/netfilter/nf_conntrack_core.c

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-05-15 13:32:12 -04:00
commit 909b27f706
144 changed files with 1365 additions and 616 deletions

View file

@ -1,50 +1,29 @@
Device-Tree binding for regmap Devicetree binding for regmap
The endianness mode of CPU & Device scenarios:
Index Device Endianness properties
---------------------------------------------------
1 BE 'big-endian'
2 LE 'little-endian'
3 Native 'native-endian'
For one device driver, which will run in different scenarios above
on different SoCs using the devicetree, we need one way to simplify
this.
Optional properties: Optional properties:
- {big,little,native}-endian: these are boolean properties, if absent
then the implementation will choose a default based on the device little-endian,
being controlled. These properties are for register values and all big-endian,
the buffers only. Native endian means that the CPU and device have native-endian: See common-properties.txt for a definition
the same endianness.
Note:
Regmap defaults to little-endian register access on MMIO based
devices, this is by far the most common setting. On CPU
architectures that typically run big-endian operating systems
(e.g. PowerPC), registers can be defined as big-endian and must
be marked that way in the devicetree.
On SoCs that can be operated in both big-endian and little-endian
modes, with a single hardware switch controlling both the endianess
of the CPU and a byteswap for MMIO registers (e.g. many Broadcom MIPS
chips), "native-endian" is used to allow using the same device tree
blob in both cases.
Examples: Examples:
Scenario 1 : CPU in LE mode & device in LE mode. Scenario 1 : a register set in big-endian mode.
dev: dev@40031000 { dev: dev@40031000 {
compatible = "name"; compatible = "syscon";
reg = <0x40031000 0x1000>; reg = <0x40031000 0x1000>;
...
};
Scenario 2 : CPU in LE mode & device in BE mode.
dev: dev@40031000 {
compatible = "name";
reg = <0x40031000 0x1000>;
...
big-endian; big-endian;
};
Scenario 3 : CPU in BE mode & device in BE mode.
dev: dev@40031000 {
compatible = "name";
reg = <0x40031000 0x1000>;
... ...
}; };
Scenario 4 : CPU in BE mode & device in LE mode.
dev: dev@40031000 {
compatible = "name";
reg = <0x40031000 0x1000>;
...
little-endian;
};

View file

@ -645,7 +645,7 @@ allowed to execute.
perf_event_paranoid: perf_event_paranoid:
Controls use of the performance events system by unprivileged Controls use of the performance events system by unprivileged
users (without CAP_SYS_ADMIN). The default value is 1. users (without CAP_SYS_ADMIN). The default value is 2.
-1: Allow use of (almost) all events by all users -1: Allow use of (almost) all events by all users
>=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK >=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK

View file

@ -11315,6 +11315,20 @@ F: include/trace/
F: kernel/trace/ F: kernel/trace/
F: tools/testing/selftests/ftrace/ F: tools/testing/selftests/ftrace/
TRACING MMIO ACCESSES (MMIOTRACE)
M: Steven Rostedt <rostedt@goodmis.org>
M: Ingo Molnar <mingo@kernel.org>
R: Karol Herbst <karolherbst@gmail.com>
R: Pekka Paalanen <ppaalanen@gmail.com>
S: Maintained
L: linux-kernel@vger.kernel.org
L: nouveau@lists.freedesktop.org
F: kernel/trace/trace_mmiotrace.c
F: include/linux/mmiotrace.h
F: arch/x86/mm/kmmio.c
F: arch/x86/mm/mmio-mod.c
F: arch/x86/mm/testmmiotrace.c
TRIVIAL PATCHES TRIVIAL PATCHES
M: Jiri Kosina <trivial@kernel.org> M: Jiri Kosina <trivial@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git

View file

@ -106,7 +106,7 @@
pmc: pmc@fffffc00 { pmc: pmc@fffffc00 {
compatible = "atmel,at91sam9x5-pmc", "syscon"; compatible = "atmel,at91sam9x5-pmc", "syscon";
reg = <0xfffffc00 0x100>; reg = <0xfffffc00 0x200>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
interrupt-controller; interrupt-controller;
#address-cells = <1>; #address-cells = <1>;

View file

@ -280,7 +280,7 @@
status = "disabled"; status = "disabled";
nfc@c0000000 { nfc@c0000000 {
compatible = "atmel,sama5d4-nfc"; compatible = "atmel,sama5d3-nfc";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
reg = < /* NFC Command Registers */ reg = < /* NFC Command Registers */

View file

@ -2,6 +2,7 @@ menu "Platform selection"
config ARCH_SUNXI config ARCH_SUNXI
bool "Allwinner sunxi 64-bit SoC Family" bool "Allwinner sunxi 64-bit SoC Family"
select GENERIC_IRQ_CHIP
help help
This enables support for Allwinner sunxi based SoCs like the A64. This enables support for Allwinner sunxi based SoCs like the A64.

View file

@ -476,6 +476,7 @@ emit_cond_jmp:
case BPF_JGE: case BPF_JGE:
jmp_cond = A64_COND_CS; jmp_cond = A64_COND_CS;
break; break;
case BPF_JSET:
case BPF_JNE: case BPF_JNE:
jmp_cond = A64_COND_NE; jmp_cond = A64_COND_NE;
break; break;

View file

@ -26,3 +26,6 @@ CONFIG_VIRTIO_NET=y
CONFIG_9P_FS=y CONFIG_9P_FS=y
CONFIG_NET_9P=y CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y CONFIG_NET_9P_VIRTIO=y
CONFIG_SCSI_LOWLEVEL=y
CONFIG_SCSI_VIRTIO=y
CONFIG_VIRTIO_INPUT=y

View file

@ -3708,7 +3708,7 @@ __init int intel_pmu_init(void)
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
} }
c->idxmsk64 &= c->idxmsk64 &=
~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed)); ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
c->weight = hweight64(c->idxmsk64); c->weight = hweight64(c->idxmsk64);
} }
} }

View file

@ -709,6 +709,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
/* clear STOP and INT from current entry */ /* clear STOP and INT from current entry */
buf->topa_index[buf->stop_pos]->stop = 0; buf->topa_index[buf->stop_pos]->stop = 0;
buf->topa_index[buf->stop_pos]->intr = 0;
buf->topa_index[buf->intr_pos]->intr = 0; buf->topa_index[buf->intr_pos]->intr = 0;
/* how many pages till the STOP marker */ /* how many pages till the STOP marker */
@ -733,6 +734,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
buf->intr_pos = idx; buf->intr_pos = idx;
buf->topa_index[buf->stop_pos]->stop = 1; buf->topa_index[buf->stop_pos]->stop = 1;
buf->topa_index[buf->stop_pos]->intr = 1;
buf->topa_index[buf->intr_pos]->intr = 1; buf->topa_index[buf->intr_pos]->intr = 1;
return 0; return 0;

View file

@ -219,6 +219,9 @@
#define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
#define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18) #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
#define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32) #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
#define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
#define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
#define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
/* KNL EDC/MC UCLK */ /* KNL EDC/MC UCLK */
#define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
@ -1902,6 +1905,10 @@ static int knl_cha_hw_config(struct intel_uncore_box *box,
reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx; KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
reg1->config = event->attr.config1 & knl_cha_filter_mask(idx); reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
reg1->idx = idx; reg1->idx = idx;
} }
return 0; return 0;

View file

@ -166,7 +166,7 @@ again:
if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
delta = sign_extend64(delta, 31); delta = sign_extend64(delta, 31);
local64_add(now - prev, &event->count); local64_add(delta, &event->count);
} }
static void msr_event_start(struct perf_event *event, int flags) static void msr_event_start(struct perf_event *event, int flags)

View file

@ -108,6 +108,14 @@ struct exception_table_entry {
#define ARCH_HAS_RELATIVE_EXTABLE #define ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex_entry_fixup(a, b, tmp, delta) \
do { \
(a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \
(a)->handler = (b)->handler + (delta); \
(b)->handler = (tmp).handler - (delta); \
} while (0)
extern int fixup_exception(struct pt_regs *regs, int trapnr); extern int fixup_exception(struct pt_regs *regs, int trapnr);
extern bool ex_has_fault_handler(unsigned long ip); extern bool ex_has_fault_handler(unsigned long ip);
extern int early_fixup_exception(unsigned long *ip); extern int early_fixup_exception(unsigned long *ip);

View file

@ -336,7 +336,7 @@ static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
if (c->cpuid_level < 4) if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
return 1; return 1;
/* Intel has a non-standard dependency on %ecx for this CPUID level. */ /* Intel has a non-standard dependency on %ecx for this CPUID level. */

View file

@ -332,6 +332,11 @@ static void __init smp_init_package_map(void)
* primary cores. * primary cores.
*/ */
ncpus = boot_cpu_data.x86_max_cores; ncpus = boot_cpu_data.x86_max_cores;
if (!ncpus) {
pr_warn("x86_max_cores == zero !?!?");
ncpus = 1;
}
__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
/* /*

View file

@ -5110,13 +5110,17 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{ {
register void *__sp asm(_ASM_SP);
ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
if (!(ctxt->d & ByteOp)) if (!(ctxt->d & ByteOp))
fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
[fastop]"+S"(fop) [fastop]"+S"(fop), "+r"(__sp)
: "c"(ctxt->src2.val)); : "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
if (!fop) /* exception is returned in fop variable */ if (!fop) /* exception is returned in fop variable */
return emulate_de(ctxt); return emulate_de(ctxt);

View file

@ -9,24 +9,6 @@
#include "blk.h" #include "blk.h"
static bool iovec_gap_to_prv(struct request_queue *q,
struct iovec *prv, struct iovec *cur)
{
unsigned long prev_end;
if (!queue_virt_boundary(q))
return false;
if (prv->iov_base == NULL && prv->iov_len == 0)
/* prv is not set - don't check */
return false;
prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
prev_end & queue_virt_boundary(q));
}
int blk_rq_append_bio(struct request_queue *q, struct request *rq, int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio) struct bio *bio)
{ {
@ -125,31 +107,18 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask) const struct iov_iter *iter, gfp_t gfp_mask)
{ {
struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; bool copy = false;
bool copy = (q->dma_pad_mask & iter->count) || map_data; unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL; struct bio *bio = NULL;
struct iov_iter i; struct iov_iter i;
int ret; int ret;
if (!iter || !iter->count) if (map_data)
return -EINVAL; copy = true;
else if (iov_iter_alignment(iter) & align)
iov_for_each(iov, i, *iter) { copy = true;
unsigned long uaddr = (unsigned long) iov.iov_base; else if (queue_virt_boundary(q))
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
if (!iov.iov_len)
return -EINVAL;
/*
* Keep going so we check length of all segments
*/
if ((uaddr & queue_dma_alignment(q)) ||
iovec_gap_to_prv(q, &prv, &iov))
copy = true;
prv.iov_base = iov.iov_base;
prv.iov_len = iov.iov_len;
}
i = *iter; i = *iter;
do { do {

View file

@ -1776,6 +1776,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
static int do_test_rsa(struct crypto_akcipher *tfm, static int do_test_rsa(struct crypto_akcipher *tfm,
struct akcipher_testvec *vecs) struct akcipher_testvec *vecs)
{ {
char *xbuf[XBUFSIZE];
struct akcipher_request *req; struct akcipher_request *req;
void *outbuf_enc = NULL; void *outbuf_enc = NULL;
void *outbuf_dec = NULL; void *outbuf_dec = NULL;
@ -1784,9 +1785,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
int err = -ENOMEM; int err = -ENOMEM;
struct scatterlist src, dst, src_tab[2]; struct scatterlist src, dst, src_tab[2];
if (testmgr_alloc_buf(xbuf))
return err;
req = akcipher_request_alloc(tfm, GFP_KERNEL); req = akcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) if (!req)
return err; goto free_xbuf;
init_completion(&result.completion); init_completion(&result.completion);
@ -1804,9 +1808,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
if (!outbuf_enc) if (!outbuf_enc)
goto free_req; goto free_req;
if (WARN_ON(vecs->m_size > PAGE_SIZE))
goto free_all;
memcpy(xbuf[0], vecs->m, vecs->m_size);
sg_init_table(src_tab, 2); sg_init_table(src_tab, 2);
sg_set_buf(&src_tab[0], vecs->m, 8); sg_set_buf(&src_tab[0], xbuf[0], 8);
sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8); sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
sg_init_one(&dst, outbuf_enc, out_len_max); sg_init_one(&dst, outbuf_enc, out_len_max);
akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size, akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
out_len_max); out_len_max);
@ -1825,7 +1834,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
goto free_all; goto free_all;
} }
/* verify that encrypted message is equal to expected */ /* verify that encrypted message is equal to expected */
if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) { if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
pr_err("alg: rsa: encrypt test failed. Invalid output\n"); pr_err("alg: rsa: encrypt test failed. Invalid output\n");
err = -EINVAL; err = -EINVAL;
goto free_all; goto free_all;
@ -1840,7 +1849,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
err = -ENOMEM; err = -ENOMEM;
goto free_all; goto free_all;
} }
sg_init_one(&src, vecs->c, vecs->c_size);
if (WARN_ON(vecs->c_size > PAGE_SIZE))
goto free_all;
memcpy(xbuf[0], vecs->c, vecs->c_size);
sg_init_one(&src, xbuf[0], vecs->c_size);
sg_init_one(&dst, outbuf_dec, out_len_max); sg_init_one(&dst, outbuf_dec, out_len_max);
init_completion(&result.completion); init_completion(&result.completion);
akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max); akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
@ -1867,6 +1882,8 @@ free_all:
kfree(outbuf_enc); kfree(outbuf_enc);
free_req: free_req:
akcipher_request_free(req); akcipher_request_free(req);
free_xbuf:
testmgr_free_buf(xbuf);
return err; return err;
} }

View file

@ -13,6 +13,7 @@
#ifndef _REGMAP_INTERNAL_H #ifndef _REGMAP_INTERNAL_H
#define _REGMAP_INTERNAL_H #define _REGMAP_INTERNAL_H
#include <linux/device.h>
#include <linux/regmap.h> #include <linux/regmap.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/list.h> #include <linux/list.h>

View file

@ -23,6 +23,8 @@
#include <linux/regmap.h> #include <linux/regmap.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "internal.h"
struct regmap_mmio_context { struct regmap_mmio_context {
void __iomem *regs; void __iomem *regs;
unsigned val_bytes; unsigned val_bytes;
@ -212,6 +214,7 @@ static const struct regmap_bus regmap_mmio = {
.reg_write = regmap_mmio_write, .reg_write = regmap_mmio_write,
.reg_read = regmap_mmio_read, .reg_read = regmap_mmio_read,
.free_context = regmap_mmio_free_context, .free_context = regmap_mmio_free_context,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
}; };
static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
@ -245,7 +248,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
ctx->val_bytes = config->val_bits / 8; ctx->val_bytes = config->val_bits / 8;
ctx->clk = ERR_PTR(-ENODEV); ctx->clk = ERR_PTR(-ENODEV);
switch (config->reg_format_endian) { switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
case REGMAP_ENDIAN_DEFAULT: case REGMAP_ENDIAN_DEFAULT:
case REGMAP_ENDIAN_LITTLE: case REGMAP_ENDIAN_LITTLE:
#ifdef __LITTLE_ENDIAN #ifdef __LITTLE_ENDIAN

View file

@ -142,7 +142,7 @@ static int regmap_spmi_ext_read(void *context,
while (val_size) { while (val_size) {
len = min_t(size_t, val_size, 8); len = min_t(size_t, val_size, 8);
err = spmi_ext_register_readl(context, addr, val, val_size); err = spmi_ext_register_readl(context, addr, val, len);
if (err) if (err)
goto err_out; goto err_out;

View file

@ -276,8 +276,8 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
} }
} }
} else { } else {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) { if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num; *dp_lanes = lane_num;

View file

@ -2872,20 +2872,6 @@ static void intel_dp_info(struct seq_file *m,
intel_panel_info(m, &intel_connector->panel); intel_panel_info(m, &intel_connector->panel);
} }
static void intel_dp_mst_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
struct intel_encoder *intel_encoder = intel_connector->encoder;
struct intel_dp_mst_encoder *intel_mst =
enc_to_mst(&intel_encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
intel_connector->port);
seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
}
static void intel_hdmi_info(struct seq_file *m, static void intel_hdmi_info(struct seq_file *m,
struct intel_connector *intel_connector) struct intel_connector *intel_connector)
{ {
@ -2929,8 +2915,6 @@ static void intel_connector_info(struct seq_file *m,
intel_hdmi_info(m, intel_connector); intel_hdmi_info(m, intel_connector);
else if (intel_encoder->type == INTEL_OUTPUT_LVDS) else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
intel_lvds_info(m, intel_connector); intel_lvds_info(m, intel_connector);
else if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
intel_dp_mst_info(m, intel_connector);
} }
seq_printf(m, "\tmodes:\n"); seq_printf(m, "\tmodes:\n");

View file

@ -7444,6 +7444,8 @@ enum skl_disp_power_wells {
#define TRANS_CLK_SEL_DISABLED (0x0<<29) #define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29) #define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
#define CDCLK_FREQ _MMIO(0x46200)
#define _TRANSA_MSA_MISC 0x60410 #define _TRANSA_MSA_MISC 0x60410
#define _TRANSB_MSA_MISC 0x61410 #define _TRANSB_MSA_MISC 0x61410
#define _TRANSC_MSA_MISC 0x62410 #define _TRANSC_MSA_MISC 0x62410

View file

@ -262,8 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE; tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK; tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK; tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) || if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
tmp |= AUD_CONFIG_N_VALUE_INDEX; tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(HSW_AUD_CFG(pipe), tmp); I915_WRITE(HSW_AUD_CFG(pipe), tmp);
@ -476,8 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) || if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
tmp |= AUD_CONFIG_N_VALUE_INDEX; tmp |= AUD_CONFIG_N_VALUE_INDEX;
else else
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@ -515,8 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/* ELD Conn_Type */ /* ELD Conn_Type */
connector->eld[5] &= ~(3 << 2); connector->eld[5] &= ~(3 << 2);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST))
connector->eld[5] |= (1 << 2); connector->eld[5] |= (1 << 2);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;

View file

@ -257,8 +257,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true; pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */ /* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev)) if (HAS_PCH_LPT(dev)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
DRM_DEBUG_KMS("LPT only supports 24bpp\n");
return false;
}
pipe_config->pipe_bpp = 24; pipe_config->pipe_bpp = 24;
}
/* FDI must always be 2.7 GHz */ /* FDI must always be 2.7 GHz */
if (HAS_DDI(dev)) { if (HAS_DDI(dev)) {

View file

@ -3106,23 +3106,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_CTL(PIPE_A), val); I915_WRITE(FDI_RX_CTL(PIPE_A), val);
} }
bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc)
{
u32 temp;
if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
return true;
}
return false;
}
void intel_ddi_get_config(struct intel_encoder *encoder, void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config) struct intel_crtc_state *pipe_config)
{ {
@ -3183,8 +3166,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break; break;
} }
pipe_config->has_audio = if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
intel_ddi_is_audio_enabled(dev_priv, intel_crtc); temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
pipe_config->has_audio = true;
}
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp && if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {

View file

@ -7988,9 +7988,6 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
pipe_config->gmch_pfit.control = tmp; pipe_config->gmch_pfit.control = tmp;
pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
if (INTEL_INFO(dev)->gen < 5)
pipe_config->gmch_pfit.lvds_border_bits =
I915_READ(LVDS) & LVDS_BORDER_ENABLE;
} }
static void vlv_crtc_clock_get(struct intel_crtc *crtc, static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@ -9752,6 +9749,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
intel_update_cdclk(dev); intel_update_cdclk(dev);
WARN(cdclk != dev_priv->cdclk_freq, WARN(cdclk != dev_priv->cdclk_freq,

View file

@ -78,8 +78,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
return false; return false;
} }
if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port))
pipe_config->has_audio = true;
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
pipe_config->pbn = mst_pbn; pipe_config->pbn = mst_pbn;
@ -104,11 +102,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret; int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
@ -119,10 +112,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
if (ret) { if (ret) {
DRM_ERROR("failed to update payload %d\n", ret); DRM_ERROR("failed to update payload %d\n", ret);
} }
if (intel_crtc->config->has_audio) {
intel_audio_codec_disable(encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
} }
static void intel_mst_post_disable_dp(struct intel_encoder *encoder) static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
@ -221,7 +210,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
int ret; int ret;
@ -234,13 +222,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
ret = drm_dp_check_act_status(&intel_dp->mst_mgr); ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
if (crtc->config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
intel_audio_codec_enable(encoder);
}
} }
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@ -266,9 +247,6 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
pipe_config->has_dp_encoder = true; pipe_config->has_dp_encoder = true;
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, crtc);
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC) if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC; flags |= DRM_MODE_FLAG_PHSYNC;

View file

@ -1019,8 +1019,6 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_fdi_disable(struct drm_crtc *crtc); void intel_ddi_fdi_disable(struct drm_crtc *crtc);
bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc);
void intel_ddi_get_config(struct intel_encoder *encoder, void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config); struct intel_crtc_state *pipe_config);
struct intel_encoder * struct intel_encoder *

View file

@ -123,6 +123,10 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags; pipe_config->base.adjusted_mode.flags |= flags;
if (INTEL_INFO(dev)->gen < 5)
pipe_config->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */ /* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_INFO(dev)->gen < 4) { if (INTEL_INFO(dev)->gen < 4) {
tmp = I915_READ(PFIT_CONTROL); tmp = I915_READ(PFIT_CONTROL);

View file

@ -6646,6 +6646,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
misccpctl = I915_READ(GEN7_MISCCPCTL); misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT); I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
/*
* Wait at least 100 clocks before re-enabling clock gating. See
* the definition of L3SQCREG1 in BSpec.
*/
POSTING_READ(GEN8_L3SQCREG1);
udelay(1);
I915_WRITE(GEN7_MISCCPCTL, misccpctl); I915_WRITE(GEN7_MISCCPCTL, misccpctl);
/* /*

View file

@ -1742,6 +1742,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc) static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc; struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc; struct radeon_crtc *test_radeon_crtc;
@ -1751,6 +1752,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc); test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder && if (test_radeon_crtc->encoder &&
ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
/* PPLL2 is exclusive to UNIPHYA on DCE61 */
if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
test_radeon_crtc->pll_id == ATOM_PPLL2)
continue;
/* for DP use the same PLL for all */ /* for DP use the same PLL for all */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id; return test_radeon_crtc->pll_id;
@ -1772,6 +1777,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
{ {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc; struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc; struct radeon_crtc *test_radeon_crtc;
u32 adjusted_clock, test_adjusted_clock; u32 adjusted_clock, test_adjusted_clock;
@ -1787,6 +1793,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc); test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder && if (test_radeon_crtc->encoder &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
/* PPLL2 is exclusive to UNIPHYA on DCE61 */
if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
test_radeon_crtc->pll_id == ATOM_PPLL2)
continue;
/* check if we are already driving this connector with another crtc */ /* check if we are already driving this connector with another crtc */
if (test_radeon_crtc->connector == radeon_crtc->connector) { if (test_radeon_crtc->connector == radeon_crtc->connector) {
/* if we are, return that pll */ /* if we are, return that pll */

View file

@ -326,8 +326,8 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
} }
} }
} else { } else {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) { if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num; *dp_lanes = lane_num;

View file

@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7); tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd); tmp |= AUX_HPD_SEL(chan->rec.hpd);
tmp |= AUX_EN | AUX_LS_READ_EN; tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
WREG32(AUX_CONTROL + aux_offset[instance], tmp); WREG32(AUX_CONTROL + aux_offset[instance], tmp);

View file

@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
const struct max8997_platform_data *pdata = const struct max8997_platform_data *pdata =
dev_get_platdata(iodev->dev); dev_get_platdata(iodev->dev);
const struct max8997_haptic_platform_data *haptic_pdata = const struct max8997_haptic_platform_data *haptic_pdata = NULL;
pdata->haptic_pdata;
struct max8997_haptic *chip; struct max8997_haptic *chip;
struct input_dev *input_dev; struct input_dev *input_dev;
int error; int error;
if (pdata)
haptic_pdata = pdata->haptic_pdata;
if (!haptic_pdata) { if (!haptic_pdata) {
dev_err(&pdev->dev, "no haptic platform data\n"); dev_err(&pdev->dev, "no haptic platform data\n");
return -EINVAL; return -EINVAL;

View file

@ -257,6 +257,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
int vddvibr_uV = 0; int vddvibr_uV = 0;
int error; int error;
of_node_get(twl6040_core_dev->of_node);
twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node, twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
"vibra"); "vibra");
if (!twl6040_core_node) { if (!twl6040_core_node) {

View file

@ -2,6 +2,10 @@
* BYD TouchPad PS/2 mouse driver * BYD TouchPad PS/2 mouse driver
* *
* Copyright (C) 2015 Chris Diamand <chris@diamand.org> * Copyright (C) 2015 Chris Diamand <chris@diamand.org>
* Copyright (C) 2015 Richard Pospesel
* Copyright (C) 2015 Tai Chi Minh Ralph Eastwood
* Copyright (C) 2015 Martin Wimpress
* Copyright (C) 2015 Jay Kuri
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by * under the terms of the GNU General Public License version 2 as published by

View file

@ -74,11 +74,6 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
return 0; return 0;
} }
static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
{
return __verify_planes_array(vb, pb);
}
/** /**
* __verify_length() - Verify that the bytesused value for each plane fits in * __verify_length() - Verify that the bytesused value for each plane fits in
* the plane length and that the data offset doesn't exceed the bytesused value. * the plane length and that the data offset doesn't exceed the bytesused value.
@ -442,7 +437,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
} }
static const struct vb2_buf_ops v4l2_buf_ops = { static const struct vb2_buf_ops v4l2_buf_ops = {
.verify_planes_array = __verify_planes_array_core,
.fill_user_buffer = __fill_v4l2_buffer, .fill_user_buffer = __fill_v4l2_buffer,
.fill_vb2_buffer = __fill_vb2_buffer, .fill_vb2_buffer = __fill_vb2_buffer,
.copy_timestamp = __copy_timestamp, .copy_timestamp = __copy_timestamp,

View file

@ -43,6 +43,7 @@ static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel,
static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
struct xgene_cle_dbptr *dbptr, u32 *buf) struct xgene_cle_dbptr *dbptr, u32 *buf)
{ {
buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
SET_VAL(CLE_DSTQIDL, dbptr->dstqid); SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
@ -412,7 +413,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
.branch = { .branch = {
{ {
/* IPV4 */ /* IPV4 */
.valid = 0, .valid = 1,
.next_packet_pointer = 22, .next_packet_pointer = 22,
.jump_bw = JMP_FW, .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, .jump_rel = JMP_ABS,
@ -420,7 +421,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
.next_node = PKT_PROT_NODE, .next_node = PKT_PROT_NODE,
.next_branch = 0, .next_branch = 0,
.data = 0x8, .data = 0x8,
.mask = 0xffff .mask = 0x0
}, },
{ {
.valid = 0, .valid = 0,
@ -456,7 +457,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
.next_node = RSS_IPV4_TCP_NODE, .next_node = RSS_IPV4_TCP_NODE,
.next_branch = 0, .next_branch = 0,
.data = 0x0600, .data = 0x0600,
.mask = 0xffff .mask = 0x00ff
}, },
{ {
/* UDP */ /* UDP */
@ -468,7 +469,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
.next_node = RSS_IPV4_UDP_NODE, .next_node = RSS_IPV4_UDP_NODE,
.next_branch = 0, .next_branch = 0,
.data = 0x1100, .data = 0x1100,
.mask = 0xffff .mask = 0x00ff
}, },
{ {
.valid = 0, .valid = 0,
@ -642,7 +643,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
{ {
/* TCP DST Port */ /* TCP DST Port */
.valid = 0, .valid = 0,
.next_packet_pointer = 256, .next_packet_pointer = 258,
.jump_bw = JMP_FW, .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, .jump_rel = JMP_ABS,
.operation = EQT, .operation = EQT,

View file

@ -83,6 +83,8 @@
#define CLE_TYPE_POS 0 #define CLE_TYPE_POS 0
#define CLE_TYPE_LEN 2 #define CLE_TYPE_LEN 2
#define CLE_DROP_POS 28
#define CLE_DROP_LEN 1
#define CLE_DSTQIDL_POS 25 #define CLE_DSTQIDL_POS 25
#define CLE_DSTQIDL_LEN 7 #define CLE_DSTQIDL_LEN 7
#define CLE_DSTQIDH_POS 0 #define CLE_DSTQIDH_POS 0

View file

@ -219,27 +219,30 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata, struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status) enum xgene_enet_err_code status)
{ {
struct rtnl_link_stats64 *stats = &pdata->stats;
switch (status) { switch (status) {
case INGRESS_CRC: case INGRESS_CRC:
stats->rx_crc_errors++; ring->rx_crc_errors++;
ring->rx_dropped++;
break; break;
case INGRESS_CHECKSUM: case INGRESS_CHECKSUM:
case INGRESS_CHECKSUM_COMPUTE: case INGRESS_CHECKSUM_COMPUTE:
stats->rx_errors++; ring->rx_errors++;
ring->rx_dropped++;
break; break;
case INGRESS_TRUNC_FRAME: case INGRESS_TRUNC_FRAME:
stats->rx_frame_errors++; ring->rx_frame_errors++;
ring->rx_dropped++;
break; break;
case INGRESS_PKT_LEN: case INGRESS_PKT_LEN:
stats->rx_length_errors++; ring->rx_length_errors++;
ring->rx_dropped++;
break; break;
case INGRESS_PKT_UNDER: case INGRESS_PKT_UNDER:
stats->rx_frame_errors++; ring->rx_frame_errors++;
ring->rx_dropped++;
break; break;
case INGRESS_FIFO_OVERRUN: case INGRESS_FIFO_OVERRUN:
stats->rx_fifo_errors++; ring->rx_fifo_errors++;
break; break;
default: default:
break; break;

View file

@ -86,7 +86,7 @@ enum xgene_enet_rm {
#define RINGADDRL_POS 5 #define RINGADDRL_POS 5
#define RINGADDRL_LEN 27 #define RINGADDRL_LEN 27
#define RINGADDRH_POS 0 #define RINGADDRH_POS 0
#define RINGADDRH_LEN 6 #define RINGADDRH_LEN 7
#define RINGSIZE_POS 23 #define RINGSIZE_POS 23
#define RINGSIZE_LEN 3 #define RINGSIZE_LEN 3
#define RINGTYPE_POS 19 #define RINGTYPE_POS 19
@ -94,9 +94,9 @@ enum xgene_enet_rm {
#define RINGMODE_POS 20 #define RINGMODE_POS 20
#define RINGMODE_LEN 3 #define RINGMODE_LEN 3
#define RECOMTIMEOUTL_POS 28 #define RECOMTIMEOUTL_POS 28
#define RECOMTIMEOUTL_LEN 3 #define RECOMTIMEOUTL_LEN 4
#define RECOMTIMEOUTH_POS 0 #define RECOMTIMEOUTH_POS 0
#define RECOMTIMEOUTH_LEN 2 #define RECOMTIMEOUTH_LEN 3
#define NUMMSGSINQ_POS 1 #define NUMMSGSINQ_POS 1
#define NUMMSGSINQ_LEN 16 #define NUMMSGSINQ_LEN 16
#define ACCEPTLERR BIT(19) #define ACCEPTLERR BIT(19)
@ -201,6 +201,8 @@ enum xgene_enet_rm {
#define USERINFO_LEN 32 #define USERINFO_LEN 32
#define FPQNUM_POS 32 #define FPQNUM_POS 32
#define FPQNUM_LEN 12 #define FPQNUM_LEN 12
#define ELERR_POS 46
#define ELERR_LEN 2
#define NV_POS 50 #define NV_POS 50
#define NV_LEN 1 #define NV_LEN 1
#define LL_POS 51 #define LL_POS 51

View file

@ -443,8 +443,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
pdata->stats.tx_packets++; tx_ring->tx_packets++;
pdata->stats.tx_bytes += skb->len; tx_ring->tx_bytes += skb->len;
pdata->ring_ops->wr_cmd(tx_ring, count); pdata->ring_ops->wr_cmd(tx_ring, count);
return NETDEV_TX_OK; return NETDEV_TX_OK;
@ -483,12 +483,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
skb = buf_pool->rx_skb[skb_index]; skb = buf_pool->rx_skb[skb_index];
/* checking for error */ /* checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) { if (unlikely(status > 2)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
status); status);
pdata->stats.rx_dropped++;
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
@ -506,8 +506,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
xgene_enet_skip_csum(skb); xgene_enet_skip_csum(skb);
} }
pdata->stats.rx_packets++; rx_ring->rx_packets++;
pdata->stats.rx_bytes += datalen; rx_ring->rx_bytes += datalen;
napi_gro_receive(&rx_ring->napi, skb); napi_gro_receive(&rx_ring->napi, skb);
out: out:
if (--rx_ring->nbufpool == 0) { if (--rx_ring->nbufpool == 0) {
@ -630,7 +630,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
ring = pdata->rx_ring[i]; ring = pdata->rx_ring[i];
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring); 0, ring->irq_name, ring);
if (ret) { if (ret) {
netdev_err(ndev, "Failed to request irq %s\n", netdev_err(ndev, "Failed to request irq %s\n",
ring->irq_name); ring->irq_name);
@ -641,7 +641,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
ring = pdata->tx_ring[i]->cp_ring; ring = pdata->tx_ring[i]->cp_ring;
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring); 0, ring->irq_name, ring);
if (ret) { if (ret) {
netdev_err(ndev, "Failed to request irq %s\n", netdev_err(ndev, "Failed to request irq %s\n",
ring->irq_name); ring->irq_name);
@ -1127,12 +1127,31 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64(
{ {
struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct rtnl_link_stats64 *stats = &pdata->stats; struct rtnl_link_stats64 *stats = &pdata->stats;
struct xgene_enet_desc_ring *ring;
int i;
stats->rx_errors += stats->rx_length_errors + memset(stats, 0, sizeof(struct rtnl_link_stats64));
stats->rx_crc_errors + for (i = 0; i < pdata->txq_cnt; i++) {
stats->rx_frame_errors + ring = pdata->tx_ring[i];
stats->rx_fifo_errors; if (ring) {
memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); stats->tx_packets += ring->tx_packets;
stats->tx_bytes += ring->tx_bytes;
}
}
for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i];
if (ring) {
stats->rx_packets += ring->rx_packets;
stats->rx_bytes += ring->rx_bytes;
stats->rx_errors += ring->rx_length_errors +
ring->rx_crc_errors +
ring->rx_frame_errors +
ring->rx_fifo_errors;
stats->rx_dropped += ring->rx_dropped;
}
}
memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
return storage; return storage;
} }
@ -1247,6 +1266,13 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
for (i = 0; i < max_irqs; i++) { for (i = 0; i < max_irqs; i++) {
ret = platform_get_irq(pdev, i); ret = platform_get_irq(pdev, i);
if (ret <= 0) { if (ret <= 0) {
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
max_irqs = i;
pdata->rxq_cnt = max_irqs / 2;
pdata->txq_cnt = max_irqs / 2;
pdata->cq_cnt = max_irqs / 2;
break;
}
dev_err(dev, "Unable to get ENET IRQ\n"); dev_err(dev, "Unable to get ENET IRQ\n");
ret = ret ? : -ENXIO; ret = ret ? : -ENXIO;
return ret; return ret;
@ -1450,19 +1476,28 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
pdata->port_ops = &xgene_xgport_ops; pdata->port_ops = &xgene_xgport_ops;
pdata->cle_ops = &xgene_cle3in_ops; pdata->cle_ops = &xgene_cle3in_ops;
pdata->rm = RM0; pdata->rm = RM0;
pdata->rxq_cnt = XGENE_NUM_RX_RING; if (!pdata->rxq_cnt) {
pdata->txq_cnt = XGENE_NUM_TX_RING; pdata->rxq_cnt = XGENE_NUM_RX_RING;
pdata->cq_cnt = XGENE_NUM_TXC_RING; pdata->txq_cnt = XGENE_NUM_TX_RING;
pdata->cq_cnt = XGENE_NUM_TXC_RING;
}
break; break;
} }
if (pdata->enet_id == XGENE_ENET1) { if (pdata->enet_id == XGENE_ENET1) {
switch (pdata->port_id) { switch (pdata->port_id) {
case 0: case 0:
pdata->cpu_bufnum = START_CPU_BUFNUM_0; if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
pdata->eth_bufnum = START_ETH_BUFNUM_0; pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
pdata->bp_bufnum = START_BP_BUFNUM_0; pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
pdata->ring_num = START_RING_NUM_0; pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
pdata->ring_num = START_RING_NUM_0;
} else {
pdata->cpu_bufnum = START_CPU_BUFNUM_0;
pdata->eth_bufnum = START_ETH_BUFNUM_0;
pdata->bp_bufnum = START_BP_BUFNUM_0;
pdata->ring_num = START_RING_NUM_0;
}
break; break;
case 1: case 1:
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {

View file

@ -49,10 +49,10 @@
#define XGENE_ENET_MSS 1448 #define XGENE_ENET_MSS 1448
#define XGENE_MIN_ENET_FRAME_SIZE 60 #define XGENE_MIN_ENET_FRAME_SIZE 60
#define XGENE_MAX_ENET_IRQ 8 #define XGENE_MAX_ENET_IRQ 16
#define XGENE_NUM_RX_RING 4 #define XGENE_NUM_RX_RING 8
#define XGENE_NUM_TX_RING 4 #define XGENE_NUM_TX_RING 8
#define XGENE_NUM_TXC_RING 4 #define XGENE_NUM_TXC_RING 8
#define START_CPU_BUFNUM_0 0 #define START_CPU_BUFNUM_0 0
#define START_ETH_BUFNUM_0 2 #define START_ETH_BUFNUM_0 2
@ -121,6 +121,16 @@ struct xgene_enet_desc_ring {
struct xgene_enet_raw_desc16 *raw_desc16; struct xgene_enet_raw_desc16 *raw_desc16;
}; };
__le64 *exp_bufs; __le64 *exp_bufs;
u64 tx_packets;
u64 tx_bytes;
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
u64 rx_errors;
u64 rx_length_errors;
u64 rx_crc_errors;
u64 rx_frame_errors;
u64 rx_fifo_errors;
}; };
struct xgene_mac_ops { struct xgene_mac_ops {

View file

@ -33,7 +33,7 @@
#define LINK_STATUS BIT(2) #define LINK_STATUS BIT(2)
#define LINK_UP BIT(15) #define LINK_UP BIT(15)
#define MPA_IDLE_WITH_QMI_EMPTY BIT(12) #define MPA_IDLE_WITH_QMI_EMPTY BIT(12)
#define SG_RX_DV_GATE_REG_0_ADDR 0x0dfc #define SG_RX_DV_GATE_REG_0_ADDR 0x05fc
extern const struct xgene_mac_ops xgene_sgmac_ops; extern const struct xgene_mac_ops xgene_sgmac_ops;
extern const struct xgene_port_ops xgene_sgport_ops; extern const struct xgene_port_ops xgene_sgport_ops;

View file

@ -820,6 +820,46 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
return skb; return skb;
} }
static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
u32 *raw_cons, void *cmp)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct rx_cmp *rxcmp = cmp;
u32 tmp_raw_cons = *raw_cons;
u8 cmp_type, agg_bufs = 0;
cmp_type = RX_CMP_TYPE(rxcmp);
if (cmp_type == CMP_TYPE_RX_L2_CMP) {
agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
RX_CMP_AGG_BUFS) >>
RX_CMP_AGG_BUFS_SHIFT;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp;
agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
RX_TPA_END_CMP_AGG_BUFS) >>
RX_TPA_END_CMP_AGG_BUFS_SHIFT;
}
if (agg_bufs) {
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
return -EBUSY;
}
*raw_cons = tmp_raw_cons;
return 0;
}
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
}
rxr->rx_next_cons = 0xffff;
}
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_tpa_start_cmp *tpa_start, struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1) struct rx_tpa_start_cmp_ext *tpa_start1)
@ -837,6 +877,11 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
prod_rx_buf = &rxr->rx_buf_ring[prod]; prod_rx_buf = &rxr->rx_buf_ring[prod];
tpa_info = &rxr->rx_tpa[agg_id]; tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons)) {
bnxt_sched_reset(bp, rxr);
return;
}
prod_rx_buf->data = tpa_info->data; prod_rx_buf->data = tpa_info->data;
mapping = tpa_info->mapping; mapping = tpa_info->mapping;
@ -874,6 +919,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
rxr->rx_prod = NEXT_RX(prod); rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons); cons = NEXT_RX(cons);
rxr->rx_next_cons = NEXT_RX(cons);
cons_rx_buf = &rxr->rx_buf_ring[cons]; cons_rx_buf = &rxr->rx_buf_ring[cons];
bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
@ -987,6 +1033,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
dma_addr_t mapping; dma_addr_t mapping;
struct sk_buff *skb; struct sk_buff *skb;
if (unlikely(bnapi->in_reset)) {
int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
if (rc < 0)
return ERR_PTR(-EBUSY);
return NULL;
}
tpa_info = &rxr->rx_tpa[agg_id]; tpa_info = &rxr->rx_tpa[agg_id];
data = tpa_info->data; data = tpa_info->data;
prefetch(data); prefetch(data);
@ -1153,6 +1207,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
cons = rxcmp->rx_cmp_opaque; cons = rxcmp->rx_cmp_opaque;
rx_buf = &rxr->rx_buf_ring[cons]; rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data; data = rx_buf->data;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
bnxt_sched_reset(bp, rxr);
return rc1;
}
prefetch(data); prefetch(data);
agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
@ -1252,6 +1312,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
next_rx: next_rx:
rxr->rx_prod = NEXT_RX(prod); rxr->rx_prod = NEXT_RX(prod);
rxr->rx_next_cons = NEXT_RX(cons);
next_rx_no_prod: next_rx_no_prod:
*raw_cons = tmp_raw_cons; *raw_cons = tmp_raw_cons;
@ -2537,6 +2598,7 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
rxr->rx_prod = 0; rxr->rx_prod = 0;
rxr->rx_agg_prod = 0; rxr->rx_agg_prod = 0;
rxr->rx_sw_agg_prod = 0; rxr->rx_sw_agg_prod = 0;
rxr->rx_next_cons = 0;
} }
} }
} }
@ -4520,6 +4582,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
int i; int i;
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
bp->bnapi[i]->in_reset = false;
bnxt_enable_poll(bp->bnapi[i]); bnxt_enable_poll(bp->bnapi[i]);
napi_enable(&bp->bnapi[i]->napi); napi_enable(&bp->bnapi[i]->napi);
} }

View file

@ -584,6 +584,7 @@ struct bnxt_rx_ring_info {
u16 rx_prod; u16 rx_prod;
u16 rx_agg_prod; u16 rx_agg_prod;
u16 rx_sw_agg_prod; u16 rx_sw_agg_prod;
u16 rx_next_cons;
void __iomem *rx_doorbell; void __iomem *rx_doorbell;
void __iomem *rx_agg_doorbell; void __iomem *rx_agg_doorbell;
@ -636,6 +637,7 @@ struct bnxt_napi {
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
atomic_t poll_state; atomic_t poll_state;
#endif #endif
bool in_reset;
}; };
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL

View file

@ -533,6 +533,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
nicvf_config_vlan_stripping(nic, nic->netdev->features); nicvf_config_vlan_stripping(nic, nic->netdev->features);
/* Enable Receive queue */ /* Enable Receive queue */
memset(&rq_cfg, 0, sizeof(struct rq_cfg));
rq_cfg.ena = 1; rq_cfg.ena = 1;
rq_cfg.tcp_ena = 0; rq_cfg.tcp_ena = 0;
nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
@ -565,6 +566,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
qidx, (u64)(cq->dmem.phys_base)); qidx, (u64)(cq->dmem.phys_base));
/* Enable Completion queue */ /* Enable Completion queue */
memset(&cq_cfg, 0, sizeof(struct cq_cfg));
cq_cfg.ena = 1; cq_cfg.ena = 1;
cq_cfg.reset = 0; cq_cfg.reset = 0;
cq_cfg.caching = 0; cq_cfg.caching = 0;
@ -613,6 +615,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
qidx, (u64)(sq->dmem.phys_base)); qidx, (u64)(sq->dmem.phys_base));
/* Enable send queue & set queue size */ /* Enable send queue & set queue size */
memset(&sq_cfg, 0, sizeof(struct sq_cfg));
sq_cfg.ena = 1; sq_cfg.ena = 1;
sq_cfg.reset = 0; sq_cfg.reset = 0;
sq_cfg.ldwb = 0; sq_cfg.ldwb = 0;
@ -649,6 +652,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
/* Enable RBDR & set queue size */ /* Enable RBDR & set queue size */
/* Buffer size should be in multiples of 128 bytes */ /* Buffer size should be in multiples of 128 bytes */
memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
rbdr_cfg.ena = 1; rbdr_cfg.ena = 1;
rbdr_cfg.reset = 0; rbdr_cfg.reset = 0;
rbdr_cfg.ldwb = 0; rbdr_cfg.ldwb = 0;

View file

@ -145,7 +145,7 @@ static void nps_enet_tx_handler(struct net_device *ndev)
u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
/* Check if we got TX */ /* Check if we got TX */
if (!priv->tx_packet_sent || tx_ctrl_ct) if (!priv->tx_skb || tx_ctrl_ct)
return; return;
/* Ack Tx ctrl register */ /* Ack Tx ctrl register */
@ -160,7 +160,7 @@ static void nps_enet_tx_handler(struct net_device *ndev)
} }
dev_kfree_skb(priv->tx_skb); dev_kfree_skb(priv->tx_skb);
priv->tx_packet_sent = false; priv->tx_skb = NULL;
if (netif_queue_stopped(ndev)) if (netif_queue_stopped(ndev))
netif_wake_queue(ndev); netif_wake_queue(ndev);
@ -183,6 +183,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
work_done = nps_enet_rx_handler(ndev); work_done = nps_enet_rx_handler(ndev);
if (work_done < budget) { if (work_done < budget) {
u32 buf_int_enable_value = 0; u32 buf_int_enable_value = 0;
u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
u32 tx_ctrl_ct =
(tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
napi_complete(napi); napi_complete(napi);
@ -192,6 +195,18 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
buf_int_enable_value); buf_int_enable_value);
/* in case we will get a tx interrupt while interrupts
* are masked, we will lose it since the tx is edge interrupt.
* specifically, while executing the code section above,
* between nps_enet_tx_handler and the interrupts enable, all
* tx requests will be stuck until we will get an rx interrupt.
* the two code lines below will solve this situation by
* re-adding ourselves to the poll list.
*/
if (priv->tx_skb && !tx_ctrl_ct)
napi_reschedule(napi);
} }
return work_done; return work_done;
@ -217,7 +232,7 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr) if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr)
if (likely(napi_schedule_prep(&priv->napi))) { if (likely(napi_schedule_prep(&priv->napi))) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
__napi_schedule(&priv->napi); __napi_schedule(&priv->napi);
@ -387,8 +402,6 @@ static void nps_enet_send_frame(struct net_device *ndev,
/* Write the length of the Frame */ /* Write the length of the Frame */
tx_ctrl_value |= length << TX_CTL_NT_SHIFT; tx_ctrl_value |= length << TX_CTL_NT_SHIFT;
/* Indicate SW is done */
priv->tx_packet_sent = true;
tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT; tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT;
/* Send Frame */ /* Send Frame */
nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value); nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value);
@ -465,7 +478,7 @@ static s32 nps_enet_open(struct net_device *ndev)
s32 err; s32 err;
/* Reset private variables */ /* Reset private variables */
priv->tx_packet_sent = false; priv->tx_skb = NULL;
priv->ge_mac_cfg_2_value = 0; priv->ge_mac_cfg_2_value = 0;
priv->ge_mac_cfg_3_value = 0; priv->ge_mac_cfg_3_value = 0;
@ -534,6 +547,11 @@ static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
priv->tx_skb = skb; priv->tx_skb = skb;
/* make sure tx_skb is actually written to the memory
* before the HW is informed and the IRQ is fired.
*/
wmb();
nps_enet_send_frame(ndev, skb); nps_enet_send_frame(ndev, skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;

View file

@ -165,14 +165,12 @@
* struct nps_enet_priv - Storage of ENET's private information. * struct nps_enet_priv - Storage of ENET's private information.
* @regs_base: Base address of ENET memory-mapped control registers. * @regs_base: Base address of ENET memory-mapped control registers.
* @irq: For RX/TX IRQ number. * @irq: For RX/TX IRQ number.
* @tx_packet_sent: SW indication if frame is being sent.
* @tx_skb: socket buffer of sent frame. * @tx_skb: socket buffer of sent frame.
* @napi: Structure for NAPI. * @napi: Structure for NAPI.
*/ */
struct nps_enet_priv { struct nps_enet_priv {
void __iomem *regs_base; void __iomem *regs_base;
s32 irq; s32 irq;
bool tx_packet_sent;
struct sk_buff *tx_skb; struct sk_buff *tx_skb;
struct napi_struct napi; struct napi_struct napi;
u32 ge_mac_cfg_2_value; u32 ge_mac_cfg_2_value;

View file

@ -68,7 +68,7 @@ config MVNETA
config MVNETA_BM config MVNETA_BM
tristate tristate
default y if MVNETA=y && MVNETA_BM_ENABLE default y if MVNETA=y && MVNETA_BM_ENABLE!=n
default MVNETA_BM_ENABLE default MVNETA_BM_ENABLE
select HWBM select HWBM
help help

View file

@ -1417,6 +1417,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
bool extended = false; bool extended = false;
int ret;
prev_version = adapter->fw_version; prev_version = adapter->fw_version;
current_version = qlcnic_83xx_get_fw_version(adapter); current_version = qlcnic_83xx_get_fw_version(adapter);
@ -1427,8 +1428,11 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
extended = !qlcnic_83xx_extend_md_capab(adapter); extended = !qlcnic_83xx_extend_md_capab(adapter);
if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
dev_info(&pdev->dev, "Supports FW dump capability\n"); if (ret)
return;
dev_info(&pdev->dev, "Supports FW dump capability\n");
/* Once we have minidump template with extended iSCSI dump /* Once we have minidump template with extended iSCSI dump
* capability, update the minidump capture mask to 0x1f as * capability, update the minidump capture mask to 0x1f as

View file

@ -1667,6 +1667,8 @@ static int ravb_close(struct net_device *ndev)
priv->phydev = NULL; priv->phydev = NULL;
} }
if (priv->chip_id == RCAR_GEN3)
free_irq(priv->emac_irq, ndev);
free_irq(ndev->irq, ndev); free_irq(ndev->irq, ndev);
napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_NC]);

View file

@ -871,9 +871,11 @@ void phy_start(struct phy_device *phydev)
break; break;
case PHY_HALTED: case PHY_HALTED:
/* make sure interrupts are re-enabled for the PHY */ /* make sure interrupts are re-enabled for the PHY */
err = phy_enable_interrupts(phydev); if (phydev->irq != PHY_POLL) {
if (err < 0) err = phy_enable_interrupts(phydev);
break; if (err < 0)
break;
}
phydev->state = PHY_RESUMING; phydev->state = PHY_RESUMING;
do_resume = true; do_resume = true;

View file

@ -442,10 +442,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
*/ */
static struct iwl_device_cmd * static struct iwl_device_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
int hdrlen, struct ieee80211_sta *sta, u8 sta_id) struct ieee80211_tx_info *info, int hdrlen,
struct ieee80211_sta *sta, u8 sta_id)
{ {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct iwl_device_cmd *dev_cmd; struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd; struct iwl_tx_cmd *tx_cmd;
@ -465,10 +466,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
memset(&info->status, 0, sizeof(info->status)); memset(&skb_info->status, 0, sizeof(skb_info->status));
memset(info->driver_data, 0, sizeof(info->driver_data)); memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
info->driver_data[1] = dev_cmd; skb_info->driver_data[1] = dev_cmd;
return dev_cmd; return dev_cmd;
} }
@ -476,22 +477,25 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{ {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_info info;
struct iwl_device_cmd *dev_cmd; struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd; struct iwl_tx_cmd *tx_cmd;
u8 sta_id; u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control); int hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU)) memcpy(&info, skb->cb, sizeof(info));
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
return -1; return -1;
if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
(!info->control.vif || (!info.control.vif ||
info->hw_queue != info->control.vif->cab_queue))) info.hw_queue != info.control.vif->cab_queue)))
return -1; return -1;
/* This holds the amsdu headers length */ /* This holds the amsdu headers length */
info->driver_data[0] = (void *)(uintptr_t)0; skb_info->driver_data[0] = (void *)(uintptr_t)0;
/* /*
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
@ -500,7 +504,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* and hence needs to be sent on the aux queue * and hence needs to be sent on the aux queue
*/ */
if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
info->control.vif->type == NL80211_IFTYPE_STATION) info.control.vif->type == NL80211_IFTYPE_STATION)
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
/* /*
@ -513,14 +517,14 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* AUX station. * AUX station.
*/ */
sta_id = mvm->aux_sta.sta_id; sta_id = mvm->aux_sta.sta_id;
if (info->control.vif) { if (info.control.vif) {
struct iwl_mvm_vif *mvmvif = struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info->control.vif); iwl_mvm_vif_from_mac80211(info.control.vif);
if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
info->control.vif->type == NL80211_IFTYPE_AP) info.control.vif->type == NL80211_IFTYPE_AP)
sta_id = mvmvif->bcast_sta.sta_id; sta_id = mvmvif->bcast_sta.sta_id;
else if (info->control.vif->type == NL80211_IFTYPE_STATION && else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) { is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
@ -529,19 +533,18 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
} }
} }
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue); IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
if (!dev_cmd) if (!dev_cmd)
return -1; return -1;
/* From now on, we cannot access info->control */
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* Copy MAC header from skb into command buffer */ /* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen); memcpy(tx_cmd->hdr, hdr, hdrlen);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) { if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
return -1; return -1;
} }
@ -560,11 +563,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
#ifdef CONFIG_INET #ifdef CONFIG_INET
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct sk_buff_head *mpdus_skb) struct sk_buff_head *mpdus_skb)
{ {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int mss = skb_shinfo(skb)->gso_size;
struct sk_buff *tmp, *next; struct sk_buff *tmp, *next;
@ -673,6 +676,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
/* This skb fits in one single A-MSDU */ /* This skb fits in one single A-MSDU */
if (num_subframes * mss >= tcp_payload_len) { if (num_subframes * mss >= tcp_payload_len) {
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
/* /*
* Compute the length of all the data added for the A-MSDU. * Compute the length of all the data added for the A-MSDU.
* This will be used to compute the length to write in the TX * This will be used to compute the length to write in the TX
@ -681,11 +686,10 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* already had one set of SNAP / IP / TCP headers. * already had one set of SNAP / IP / TCP headers.
*/ */
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
info = IEEE80211_SKB_CB(skb);
amsdu_add = num_subframes * sizeof(struct ethhdr) + amsdu_add = num_subframes * sizeof(struct ethhdr) +
(num_subframes - 1) * (snap_ip_tcp + pad); (num_subframes - 1) * (snap_ip_tcp + pad);
/* This holds the amsdu headers length */ /* This holds the amsdu headers length */
info->driver_data[0] = (void *)(uintptr_t)amsdu_add; skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
__skb_queue_tail(mpdus_skb, skb); __skb_queue_tail(mpdus_skb, skb);
return 0; return 0;
@ -725,11 +729,14 @@ segment:
ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
if (tcp_payload_len > mss) { if (tcp_payload_len > mss) {
struct ieee80211_tx_info *skb_info =
IEEE80211_SKB_CB(tmp);
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
info = IEEE80211_SKB_CB(tmp);
amsdu_add = num_subframes * sizeof(struct ethhdr) + amsdu_add = num_subframes * sizeof(struct ethhdr) +
(num_subframes - 1) * (snap_ip_tcp + pad); (num_subframes - 1) * (snap_ip_tcp + pad);
info->driver_data[0] = (void *)(uintptr_t)amsdu_add; skb_info->driver_data[0] =
(void *)(uintptr_t)amsdu_add;
skb_shinfo(tmp)->gso_size = mss; skb_shinfo(tmp)->gso_size = mss;
} else { } else {
qc = ieee80211_get_qos_ctl((void *)tmp->data); qc = ieee80211_get_qos_ctl((void *)tmp->data);
@ -751,6 +758,7 @@ segment:
} }
#else /* CONFIG_INET */ #else /* CONFIG_INET */
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct sk_buff_head *mpdus_skb) struct sk_buff_head *mpdus_skb)
{ {
@ -794,10 +802,10 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
* Sets the fields in the Tx cmd that are crypto related * Sets the fields in the Tx cmd that are crypto related
*/ */
static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_mvm_sta *mvmsta; struct iwl_mvm_sta *mvmsta;
struct iwl_device_cmd *dev_cmd; struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd; struct iwl_tx_cmd *tx_cmd;
@ -818,7 +826,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
return -1; return -1;
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
sta, mvmsta->sta_id);
if (!dev_cmd) if (!dev_cmd)
goto drop; goto drop;
@ -918,7 +927,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_info info;
struct sk_buff_head mpdus_skbs; struct sk_buff_head mpdus_skbs;
unsigned int payload_len; unsigned int payload_len;
int ret; int ret;
@ -929,21 +939,23 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
return -1; return -1;
memcpy(&info, skb->cb, sizeof(info));
/* This holds the amsdu headers length */ /* This holds the amsdu headers length */
info->driver_data[0] = (void *)(uintptr_t)0; skb_info->driver_data[0] = (void *)(uintptr_t)0;
if (!skb_is_gso(skb)) if (!skb_is_gso(skb))
return iwl_mvm_tx_mpdu(mvm, skb, sta); return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
tcp_hdrlen(skb) + skb->data_len; tcp_hdrlen(skb) + skb->data_len;
if (payload_len <= skb_shinfo(skb)->gso_size) if (payload_len <= skb_shinfo(skb)->gso_size)
return iwl_mvm_tx_mpdu(mvm, skb, sta); return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
__skb_queue_head_init(&mpdus_skbs); __skb_queue_head_init(&mpdus_skbs);
ret = iwl_mvm_tx_tso(mvm, skb, sta, &mpdus_skbs); ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
if (ret) if (ret)
return ret; return ret;
@ -953,7 +965,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
while (!skb_queue_empty(&mpdus_skbs)) { while (!skb_queue_empty(&mpdus_skbs)) {
skb = __skb_dequeue(&mpdus_skbs); skb = __skb_dequeue(&mpdus_skbs);
ret = iwl_mvm_tx_mpdu(mvm, skb, sta); ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
if (ret) { if (ret) {
__skb_queue_purge(&mpdus_skbs); __skb_queue_purge(&mpdus_skbs);
return ret; return ret;

View file

@ -711,6 +711,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
if (cons == end) if (cons == end)
break; break;
RING_COPY_REQUEST(&queue->tx, cons++, txp); RING_COPY_REQUEST(&queue->tx, cons++, txp);
extra_count = 0; /* only the first frag can have extras */
} while (1); } while (1);
queue->tx.req_cons = cons; queue->tx.req_cons = cons;
} }

View file

@ -402,9 +402,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
/* /*
* vmemmap_populate_hugepages() allocates the memmap array in * vmemmap_populate_hugepages() allocates the memmap array in
* HPAGE_SIZE chunks. * PMD_SIZE chunks.
*/ */
memmap_size = ALIGN(64 * npfns, HPAGE_SIZE); memmap_size = ALIGN(64 * npfns, PMD_SIZE);
offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align) offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
- start; - start;
} else if (nd_pfn->mode == PFN_MODE_RAM) } else if (nd_pfn->mode == PFN_MODE_RAM)

View file

@ -294,7 +294,7 @@ void pci_bus_add_device(struct pci_dev *dev)
dev->match_driver = true; dev->match_driver = true;
retval = device_attach(&dev->dev); retval = device_attach(&dev->dev);
if (retval < 0) { if (retval < 0 && retval != -EPROBE_DEFER) {
dev_warn(&dev->dev, "device attach failed (%d)\n", retval); dev_warn(&dev->dev, "device attach failed (%d)\n", retval);
pci_proc_detach_device(dev); pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev); pci_remove_sysfs_dev_files(dev);
@ -324,7 +324,9 @@ void pci_bus_add_devices(const struct pci_bus *bus)
} }
list_for_each_entry(dev, &bus->devices, bus_list) { list_for_each_entry(dev, &bus->devices, bus_list) {
BUG_ON(!dev->is_added); /* Skip if device attach failed */
if (!dev->is_added)
continue;
child = dev->subordinate; child = dev->subordinate;
if (child) if (child)
pci_bus_add_devices(child); pci_bus_add_devices(child);

View file

@ -722,9 +722,11 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
break; break;
case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_BIAS_PULL_UP:
conf |= ATMEL_PIO_PUEN_MASK; conf |= ATMEL_PIO_PUEN_MASK;
conf &= (~ATMEL_PIO_PDEN_MASK);
break; break;
case PIN_CONFIG_BIAS_PULL_DOWN: case PIN_CONFIG_BIAS_PULL_DOWN:
conf |= ATMEL_PIO_PDEN_MASK; conf |= ATMEL_PIO_PDEN_MASK;
conf &= (~ATMEL_PIO_PUEN_MASK);
break; break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN: case PIN_CONFIG_DRIVE_OPEN_DRAIN:
if (arg == 0) if (arg == 0)

View file

@ -157,7 +157,9 @@ static struct regulator_ops axp20x_ops_sw = {
static const struct regulator_linear_range axp20x_ldo4_ranges[] = { static const struct regulator_linear_range axp20x_ldo4_ranges[] = {
REGULATOR_LINEAR_RANGE(1250000, 0x0, 0x0, 0), REGULATOR_LINEAR_RANGE(1250000, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(1300000, 0x1, 0x8, 100000), REGULATOR_LINEAR_RANGE(1300000, 0x1, 0x8, 100000),
REGULATOR_LINEAR_RANGE(2500000, 0x9, 0xf, 100000), REGULATOR_LINEAR_RANGE(2500000, 0x9, 0x9, 0),
REGULATOR_LINEAR_RANGE(2700000, 0xa, 0xb, 100000),
REGULATOR_LINEAR_RANGE(3000000, 0xc, 0xf, 100000),
}; };
static const struct regulator_desc axp20x_regulators[] = { static const struct regulator_desc axp20x_regulators[] = {
@ -215,10 +217,14 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)), AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100, AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)), AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100, /* Note the datasheet only guarantees reliable operation up to
* 3.3V, this needs to be enforced via dts provided constraints */
AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100,
AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07, AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED), AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100, /* Note the datasheet only guarantees reliable operation up to
* 3.3V, this needs to be enforced via dts provided constraints */
AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100,
AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07, AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED), AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000), AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),

View file

@ -900,4 +900,4 @@ module_exit(da9063_regulator_cleanup);
MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>"); MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>");
MODULE_DESCRIPTION("DA9063 regulators driver"); MODULE_DESCRIPTION("DA9063 regulators driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_ALIAS("paltform:" DA9063_DRVNAME_REGULATORS); MODULE_ALIAS("platform:" DA9063_DRVNAME_REGULATORS);

View file

@ -162,6 +162,8 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
of_property_read_u32(np, "startup-delay-us", &config->startup_delay); of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0); config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
if (config->enable_gpio == -EPROBE_DEFER)
return ERR_PTR(-EPROBE_DEFER);
/* Fetch GPIOs. - optional property*/ /* Fetch GPIOs. - optional property*/
ret = of_gpio_count(np); ret = of_gpio_count(np);

View file

@ -308,7 +308,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \ .enable_mask = S2MPS11_ENABLE_MASK \
} }
#define regulator_desc_s2mps11_buck6_10(num, min, step) { \ #define regulator_desc_s2mps11_buck67810(num, min, step) { \
.name = "BUCK"#num, \ .name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \ .id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \ .ops = &s2mps11_buck_ops, \
@ -324,6 +324,22 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \ .enable_mask = S2MPS11_ENABLE_MASK \
} }
#define regulator_desc_s2mps11_buck9 { \
.name = "BUCK9", \
.id = S2MPS11_BUCK9, \
.ops = &s2mps11_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = MIN_3000_MV, \
.uV_step = STEP_25_MV, \
.n_voltages = S2MPS11_BUCK9_N_VOLTAGES, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B9CTRL2, \
.vsel_mask = S2MPS11_BUCK9_VSEL_MASK, \
.enable_reg = S2MPS11_REG_B9CTRL1, \
.enable_mask = S2MPS11_ENABLE_MASK \
}
static const struct regulator_desc s2mps11_regulators[] = { static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_ldo(1, STEP_25_MV), regulator_desc_s2mps11_ldo(1, STEP_25_MV),
regulator_desc_s2mps11_ldo(2, STEP_50_MV), regulator_desc_s2mps11_ldo(2, STEP_50_MV),
@ -368,11 +384,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_buck1_4(3), regulator_desc_s2mps11_buck1_4(3),
regulator_desc_s2mps11_buck1_4(4), regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5, regulator_desc_s2mps11_buck5,
regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV), regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV), regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV), regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV), regulator_desc_s2mps11_buck9,
regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV), regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
}; };
static struct regulator_ops s2mps14_reg_ops; static struct regulator_ops s2mps14_reg_ops;

View file

@ -532,6 +532,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
return SCSI_DH_DEV_TEMP_BUSY; return SCSI_DH_DEV_TEMP_BUSY;
retry: retry:
err = 0;
retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
if (retval) { if (retval) {

View file

@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
.eh_bus_reset_handler = qla1280_eh_bus_reset, .eh_bus_reset_handler = qla1280_eh_bus_reset,
.eh_host_reset_handler = qla1280_eh_adapter_reset, .eh_host_reset_handler = qla1280_eh_adapter_reset,
.bios_param = qla1280_biosparam, .bios_param = qla1280_biosparam,
.can_queue = 0xfffff, .can_queue = MAX_OUTSTANDING_COMMANDS,
.this_id = -1, .this_id = -1,
.sg_tablesize = SG_ALL, .sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,

View file

@ -385,8 +385,8 @@ static int dspi_transfer_one_message(struct spi_master *master,
dspi->cur_chip = spi_get_ctldata(spi); dspi->cur_chip = spi_get_ctldata(spi);
dspi->cs = spi->chip_select; dspi->cs = spi->chip_select;
dspi->cs_change = 0; dspi->cs_change = 0;
if (dspi->cur_transfer->transfer_list.next if (list_is_last(&dspi->cur_transfer->transfer_list,
== &dspi->cur_msg->transfers) &dspi->cur_msg->transfers) || transfer->cs_change)
dspi->cs_change = 1; dspi->cs_change = 1;
dspi->void_write_data = dspi->cur_chip->void_write_data; dspi->void_write_data = dspi->cur_chip->void_write_data;

View file

@ -423,12 +423,16 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
if (mcspi_dma->dma_tx) { if (mcspi_dma->dma_tx) {
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
struct scatterlist sg;
dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl, sg_init_table(&sg, 1);
xfer->tx_sg.nents, DMA_MEM_TO_DEV, sg_dma_address(&sg) = xfer->tx_dma;
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); sg_dma_len(&sg) = xfer->len;
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (tx) { if (tx) {
tx->callback = omap2_mcspi_tx_callback; tx->callback = omap2_mcspi_tx_callback;
tx->callback_param = spi; tx->callback_param = spi;
@ -474,15 +478,20 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
if (mcspi_dma->dma_rx) { if (mcspi_dma->dma_rx) {
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
struct scatterlist sg;
dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0) if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
dma_count -= es; dma_count -= es;
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl, sg_init_table(&sg, 1);
xfer->rx_sg.nents, DMA_DEV_TO_MEM, sg_dma_address(&sg) = xfer->rx_dma;
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); sg_dma_len(&sg) = dma_count;
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
if (tx) { if (tx) {
tx->callback = omap2_mcspi_rx_callback; tx->callback = omap2_mcspi_rx_callback;
tx->callback_param = spi; tx->callback_param = spi;
@ -496,6 +505,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
omap2_mcspi_set_dma_req(spi, 1, 1); omap2_mcspi_set_dma_req(spi, 1, 1);
wait_for_completion(&mcspi_dma->dma_rx_completion); wait_for_completion(&mcspi_dma->dma_rx_completion);
dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
DMA_FROM_DEVICE);
if (mcspi->fifo_depth > 0) if (mcspi->fifo_depth > 0)
return count; return count;
@ -608,6 +619,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
if (tx != NULL) { if (tx != NULL) {
wait_for_completion(&mcspi_dma->dma_tx_completion); wait_for_completion(&mcspi_dma->dma_tx_completion);
dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
DMA_TO_DEVICE);
if (mcspi->fifo_depth > 0) { if (mcspi->fifo_depth > 0) {
irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS; irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@ -1074,16 +1087,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
gpio_free(spi->cs_gpio); gpio_free(spi->cs_gpio);
} }
static bool omap2_mcspi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
if (xfer->len < DMA_MIN_BYTES)
return false;
return true;
}
static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi, static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
struct spi_device *spi, struct spi_transfer *t) struct spi_device *spi, struct spi_transfer *t)
{ {
@ -1265,6 +1268,32 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
return -EINVAL; return -EINVAL;
} }
if (len < DMA_MIN_BYTES)
goto skip_dma_map;
if (mcspi_dma->dma_tx && tx_buf != NULL) {
t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
len, DMA_TO_DEVICE);
if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
'T', len);
return -EINVAL;
}
}
if (mcspi_dma->dma_rx && rx_buf != NULL) {
t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
'R', len);
if (tx_buf != NULL)
dma_unmap_single(mcspi->dev, t->tx_dma,
len, DMA_TO_DEVICE);
return -EINVAL;
}
}
skip_dma_map:
return omap2_mcspi_work_one(mcspi, spi, t); return omap2_mcspi_work_one(mcspi, spi, t);
} }
@ -1348,7 +1377,6 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->transfer_one = omap2_mcspi_transfer_one; master->transfer_one = omap2_mcspi_transfer_one;
master->set_cs = omap2_mcspi_set_cs; master->set_cs = omap2_mcspi_set_cs;
master->cleanup = omap2_mcspi_cleanup; master->cleanup = omap2_mcspi_cleanup;
master->can_dma = omap2_mcspi_can_dma;
master->dev.of_node = node; master->dev.of_node = node;
master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ; master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15; master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;

View file

@ -126,7 +126,7 @@ static const struct lpss_config lpss_platforms[] = {
.reg_general = -1, .reg_general = -1,
.reg_ssp = 0x20, .reg_ssp = 0x20,
.reg_cs_ctrl = 0x24, .reg_cs_ctrl = 0x24,
.reg_capabilities = 0xfc, .reg_capabilities = -1,
.rx_threshold = 1, .rx_threshold = 1,
.tx_threshold_lo = 32, .tx_threshold_lo = 32,
.tx_threshold_hi = 56, .tx_threshold_hi = 56,

View file

@ -94,6 +94,7 @@ struct ti_qspi {
#define QSPI_FLEN(n) ((n - 1) << 0) #define QSPI_FLEN(n) ((n - 1) << 0)
#define QSPI_WLEN_MAX_BITS 128 #define QSPI_WLEN_MAX_BITS 128
#define QSPI_WLEN_MAX_BYTES 16 #define QSPI_WLEN_MAX_BYTES 16
#define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
/* STATUS REGISTER */ /* STATUS REGISTER */
#define BUSY 0x01 #define BUSY 0x01
@ -235,16 +236,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
int count)
{ {
int wlen, count, xfer_len; int wlen, xfer_len;
unsigned int cmd; unsigned int cmd;
const u8 *txbuf; const u8 *txbuf;
u32 data; u32 data;
txbuf = t->tx_buf; txbuf = t->tx_buf;
cmd = qspi->cmd | QSPI_WR_SNGL; cmd = qspi->cmd | QSPI_WR_SNGL;
count = t->len;
wlen = t->bits_per_word >> 3; /* in bytes */ wlen = t->bits_per_word >> 3; /* in bytes */
xfer_len = wlen; xfer_len = wlen;
@ -304,9 +305,10 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
return 0; return 0;
} }
static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
int count)
{ {
int wlen, count; int wlen;
unsigned int cmd; unsigned int cmd;
u8 *rxbuf; u8 *rxbuf;
@ -323,7 +325,6 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
cmd |= QSPI_RD_SNGL; cmd |= QSPI_RD_SNGL;
break; break;
} }
count = t->len;
wlen = t->bits_per_word >> 3; /* in bytes */ wlen = t->bits_per_word >> 3; /* in bytes */
while (count) { while (count) {
@ -354,12 +355,13 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
return 0; return 0;
} }
static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t) static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
int count)
{ {
int ret; int ret;
if (t->tx_buf) { if (t->tx_buf) {
ret = qspi_write_msg(qspi, t); ret = qspi_write_msg(qspi, t, count);
if (ret) { if (ret) {
dev_dbg(qspi->dev, "Error while writing\n"); dev_dbg(qspi->dev, "Error while writing\n");
return ret; return ret;
@ -367,7 +369,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
} }
if (t->rx_buf) { if (t->rx_buf) {
ret = qspi_read_msg(qspi, t); ret = qspi_read_msg(qspi, t, count);
if (ret) { if (ret) {
dev_dbg(qspi->dev, "Error while reading\n"); dev_dbg(qspi->dev, "Error while reading\n");
return ret; return ret;
@ -450,7 +452,8 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
struct spi_device *spi = m->spi; struct spi_device *spi = m->spi;
struct spi_transfer *t; struct spi_transfer *t;
int status = 0, ret; int status = 0, ret;
int frame_length; unsigned int frame_len_words, transfer_len_words;
int wlen;
/* setup device control reg */ /* setup device control reg */
qspi->dc = 0; qspi->dc = 0;
@ -462,14 +465,15 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
if (spi->mode & SPI_CS_HIGH) if (spi->mode & SPI_CS_HIGH)
qspi->dc |= QSPI_CSPOL(spi->chip_select); qspi->dc |= QSPI_CSPOL(spi->chip_select);
frame_length = (m->frame_length << 3) / spi->bits_per_word; frame_len_words = 0;
list_for_each_entry(t, &m->transfers, transfer_list)
frame_length = clamp(frame_length, 0, QSPI_FRAME); frame_len_words += t->len / (t->bits_per_word >> 3);
frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
/* setup command reg */ /* setup command reg */
qspi->cmd = 0; qspi->cmd = 0;
qspi->cmd |= QSPI_EN_CS(spi->chip_select); qspi->cmd |= QSPI_EN_CS(spi->chip_select);
qspi->cmd |= QSPI_FLEN(frame_length); qspi->cmd |= QSPI_FLEN(frame_len_words);
ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG); ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
@ -479,16 +483,23 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
ti_qspi_disable_memory_map(spi); ti_qspi_disable_memory_map(spi);
list_for_each_entry(t, &m->transfers, transfer_list) { list_for_each_entry(t, &m->transfers, transfer_list) {
qspi->cmd |= QSPI_WLEN(t->bits_per_word); qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
QSPI_WLEN(t->bits_per_word));
ret = qspi_transfer_msg(qspi, t); wlen = t->bits_per_word >> 3;
transfer_len_words = min(t->len / wlen, frame_len_words);
ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
if (ret) { if (ret) {
dev_dbg(qspi->dev, "transfer message failed\n"); dev_dbg(qspi->dev, "transfer message failed\n");
mutex_unlock(&qspi->list_lock); mutex_unlock(&qspi->list_lock);
return -EINVAL; return -EINVAL;
} }
m->actual_length += t->len; m->actual_length += transfer_len_words * wlen;
frame_len_words -= transfer_len_words;
if (frame_len_words == 0)
break;
} }
mutex_unlock(&qspi->list_lock); mutex_unlock(&qspi->list_lock);

View file

@ -112,7 +112,6 @@ static int ecryptfs_readdir(struct file *file, struct dir_context *ctx)
.sb = inode->i_sb, .sb = inode->i_sb,
}; };
lower_file = ecryptfs_file_to_lower(file); lower_file = ecryptfs_file_to_lower(file);
lower_file->f_pos = ctx->pos;
rc = iterate_dir(lower_file, &buf.ctx); rc = iterate_dir(lower_file, &buf.ctx);
ctx->pos = buf.ctx.pos; ctx->pos = buf.ctx.pos;
if (rc < 0) if (rc < 0)
@ -223,14 +222,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
} }
ecryptfs_set_file_lower( ecryptfs_set_file_lower(
file, ecryptfs_inode_to_private(inode)->lower_file); file, ecryptfs_inode_to_private(inode)->lower_file);
if (d_is_dir(ecryptfs_dentry)) {
ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
mutex_lock(&crypt_stat->cs_mutex);
crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
mutex_unlock(&crypt_stat->cs_mutex);
rc = 0;
goto out;
}
rc = read_or_initialize_metadata(ecryptfs_dentry); rc = read_or_initialize_metadata(ecryptfs_dentry);
if (rc) if (rc)
goto out_put; goto out_put;
@ -247,6 +238,45 @@ out:
return rc; return rc;
} }
/**
* ecryptfs_dir_open
* @inode: inode speciying file to open
* @file: Structure to return filled in
*
* Opens the file specified by inode.
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_dir_open(struct inode *inode, struct file *file)
{
struct dentry *ecryptfs_dentry = file->f_path.dentry;
/* Private value of ecryptfs_dentry allocated in
* ecryptfs_lookup() */
struct ecryptfs_file_info *file_info;
struct file *lower_file;
/* Released in ecryptfs_release or end of function if failure */
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
ecryptfs_set_file_private(file, file_info);
if (unlikely(!file_info)) {
ecryptfs_printk(KERN_ERR,
"Error attempting to allocate memory\n");
return -ENOMEM;
}
lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
file->f_flags, current_cred());
if (IS_ERR(lower_file)) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
"[%pd]; rc = [%ld]\n", __func__,
ecryptfs_dentry, PTR_ERR(lower_file));
kmem_cache_free(ecryptfs_file_info_cache, file_info);
return PTR_ERR(lower_file);
}
ecryptfs_set_file_lower(file, lower_file);
return 0;
}
static int ecryptfs_flush(struct file *file, fl_owner_t td) static int ecryptfs_flush(struct file *file, fl_owner_t td)
{ {
struct file *lower_file = ecryptfs_file_to_lower(file); struct file *lower_file = ecryptfs_file_to_lower(file);
@ -267,6 +297,19 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
return 0; return 0;
} }
static int ecryptfs_dir_release(struct inode *inode, struct file *file)
{
fput(ecryptfs_file_to_lower(file));
kmem_cache_free(ecryptfs_file_info_cache,
ecryptfs_file_to_private(file));
return 0;
}
static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence)
{
return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence);
}
static int static int
ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{ {
@ -346,20 +389,16 @@ const struct file_operations ecryptfs_dir_fops = {
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl, .compat_ioctl = ecryptfs_compat_ioctl,
#endif #endif
.open = ecryptfs_open, .open = ecryptfs_dir_open,
.flush = ecryptfs_flush, .release = ecryptfs_dir_release,
.release = ecryptfs_release,
.fsync = ecryptfs_fsync, .fsync = ecryptfs_fsync,
.fasync = ecryptfs_fasync, .llseek = ecryptfs_dir_llseek,
.splice_read = generic_file_splice_read,
.llseek = default_llseek,
}; };
const struct file_operations ecryptfs_main_fops = { const struct file_operations ecryptfs_main_fops = {
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
.read_iter = ecryptfs_read_update_atime, .read_iter = ecryptfs_read_update_atime,
.write_iter = generic_file_write_iter, .write_iter = generic_file_write_iter,
.iterate = ecryptfs_readdir,
.unlocked_ioctl = ecryptfs_unlocked_ioctl, .unlocked_ioctl = ecryptfs_unlocked_ioctl,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl, .compat_ioctl = ecryptfs_compat_ioctl,

View file

@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
int retnamlen = 0; int retnamlen = 0;
int truncate = 0; int truncate = 0;
int ret = 0; int ret = 0;
char *p;
int len;
if (!ISOFS_SB(inode->i_sb)->s_rock) if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0; return 0;
@ -267,12 +269,17 @@ repeat:
rr->u.NM.flags); rr->u.NM.flags);
break; break;
} }
if ((strlen(retname) + rr->len - 5) >= 254) { len = rr->len - 5;
if (retnamlen + len >= 254) {
truncate = 1; truncate = 1;
break; break;
} }
strncat(retname, rr->u.NM.name, rr->len - 5); p = memchr(rr->u.NM.name, '\0', len);
retnamlen += rr->len - 5; if (unlikely(p))
len = p - rr->u.NM.name;
memcpy(retname + retnamlen, rr->u.NM.name, len);
retnamlen += len;
retname[retnamlen] = '\0';
break; break;
case SIG('R', 'E'): case SIG('R', 'E'):
kfree(rs.buffer); kfree(rs.buffer);

View file

@ -153,9 +153,9 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
p = buf + len + nlen; p = buf + len + nlen;
*p = '\0'; *p = '\0';
for (kn = kn_to; kn != common; kn = kn->parent) { for (kn = kn_to; kn != common; kn = kn->parent) {
nlen = strlen(kn->name); size_t tmp = strlen(kn->name);
p -= nlen; p -= tmp;
memcpy(p, kn->name, nlen); memcpy(p, kn->name, tmp);
*(--p) = '/'; *(--p) = '/';
} }

View file

@ -15,6 +15,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/namei.h> #include <linux/namei.h>
#include <linux/seq_file.h>
#include "kernfs-internal.h" #include "kernfs-internal.h"
@ -40,6 +41,19 @@ static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry)
return 0; return 0;
} }
static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry)
{
struct kernfs_node *node = dentry->d_fsdata;
struct kernfs_root *root = kernfs_root(node);
struct kernfs_syscall_ops *scops = root->syscall_ops;
if (scops && scops->show_path)
return scops->show_path(sf, node, root);
seq_dentry(sf, dentry, " \t\n\\");
return 0;
}
const struct super_operations kernfs_sops = { const struct super_operations kernfs_sops = {
.statfs = simple_statfs, .statfs = simple_statfs,
.drop_inode = generic_delete_inode, .drop_inode = generic_delete_inode,
@ -47,6 +61,7 @@ const struct super_operations kernfs_sops = {
.remount_fs = kernfs_sop_remount_fs, .remount_fs = kernfs_sop_remount_fs,
.show_options = kernfs_sop_show_options, .show_options = kernfs_sop_show_options,
.show_path = kernfs_sop_show_path,
}; };
/** /**

View file

@ -2266,6 +2266,33 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
} }
EXPORT_SYMBOL(vfs_path_lookup); EXPORT_SYMBOL(vfs_path_lookup);
/**
* lookup_hash - lookup single pathname component on already hashed name
* @name: name and hash to lookup
* @base: base directory to lookup from
*
* The name must have been verified and hashed (see lookup_one_len()). Using
* this after just full_name_hash() is unsafe.
*
* This function also doesn't check for search permission on base directory.
*
* Use lookup_one_len_unlocked() instead, unless you really know what you are
* doing.
*
* Do not hold i_mutex; this helper takes i_mutex if necessary.
*/
struct dentry *lookup_hash(const struct qstr *name, struct dentry *base)
{
struct dentry *ret;
ret = lookup_dcache(name, base, 0);
if (!ret)
ret = lookup_slow(name, base, 0);
return ret;
}
EXPORT_SYMBOL(lookup_hash);
/** /**
* lookup_one_len - filesystem helper to lookup single pathname component * lookup_one_len - filesystem helper to lookup single pathname component
* @name: pathname component to lookup * @name: pathname component to lookup
@ -2337,7 +2364,6 @@ struct dentry *lookup_one_len_unlocked(const char *name,
struct qstr this; struct qstr this;
unsigned int c; unsigned int c;
int err; int err;
struct dentry *ret;
this.name = name; this.name = name;
this.len = len; this.len = len;
@ -2369,10 +2395,7 @@ struct dentry *lookup_one_len_unlocked(const char *name,
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
ret = lookup_dcache(&this, base, 0); return lookup_hash(&this, base);
if (!ret)
ret = lookup_slow(&this, base, 0);
return ret;
} }
EXPORT_SYMBOL(lookup_one_len_unlocked); EXPORT_SYMBOL(lookup_one_len_unlocked);
@ -2942,22 +2965,10 @@ no_open:
dentry = lookup_real(dir, dentry, nd->flags); dentry = lookup_real(dir, dentry, nd->flags);
if (IS_ERR(dentry)) if (IS_ERR(dentry))
return PTR_ERR(dentry); return PTR_ERR(dentry);
}
if (create_error) { if (create_error && !dentry->d_inode) {
int open_flag = op->open_flag; error = create_error;
goto out;
error = create_error;
if ((open_flag & O_EXCL)) {
if (!dentry->d_inode)
goto out;
} else if (!dentry->d_inode) {
goto out;
} else if ((open_flag & O_TRUNC) &&
d_is_reg(dentry)) {
goto out;
}
/* will fail later, go on to get the right error */
}
} }
looked_up: looked_up:
path->dentry = dentry; path->dentry = dentry;
@ -4213,7 +4224,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
bool new_is_dir = false; bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links; unsigned max_links = new_dir->i_sb->s_max_links;
if (source == target) /*
* Check source == target.
* On overlayfs need to look at underlying inodes.
*/
if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
return 0; return 0;
error = may_delete(old_dir, old_dentry, is_dir); error = may_delete(old_dir, old_dentry, is_dir);

View file

@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
brelse(di_bh); brelse(di_bh);
return acl; return acl;
} }
int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl;
int ret;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
return 0;
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
if (IS_ERR(acl) || !acl)
return PTR_ERR(acl);
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (ret)
return ret;
ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
acl, NULL, NULL);
posix_acl_release(acl);
return ret;
}
/*
* Initialize the ACLs of a new inode. If parent directory has default ACL,
* then clone to new inode. Called from ocfs2_mknod.
*/
int ocfs2_init_acl(handle_t *handle,
struct inode *inode,
struct inode *dir,
struct buffer_head *di_bh,
struct buffer_head *dir_bh,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl = NULL;
int ret = 0, ret2;
umode_t mode;
if (!S_ISLNK(inode->i_mode)) {
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
dir_bh);
if (IS_ERR(acl))
return PTR_ERR(acl);
}
if (!acl) {
mode = inode->i_mode & ~current_umask();
ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
if (ret) {
mlog_errno(ret);
goto cleanup;
}
}
}
if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
if (S_ISDIR(inode->i_mode)) {
ret = ocfs2_set_acl(handle, inode, di_bh,
ACL_TYPE_DEFAULT, acl,
meta_ac, data_ac);
if (ret)
goto cleanup;
}
mode = inode->i_mode;
ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
if (ret < 0)
return ret;
ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
if (ret2) {
mlog_errno(ret2);
ret = ret2;
goto cleanup;
}
if (ret > 0) {
ret = ocfs2_set_acl(handle, inode,
di_bh, ACL_TYPE_ACCESS,
acl, meta_ac, data_ac);
}
}
cleanup:
posix_acl_release(acl);
return ret;
}

View file

@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
struct posix_acl *acl, struct posix_acl *acl,
struct ocfs2_alloc_context *meta_ac, struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac); struct ocfs2_alloc_context *data_ac);
extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
struct buffer_head *, struct buffer_head *,
struct ocfs2_alloc_context *,
struct ocfs2_alloc_context *);
#endif /* OCFS2_ACL_H */ #endif /* OCFS2_ACL_H */

View file

@ -1268,20 +1268,20 @@ bail_unlock_rw:
if (size_change) if (size_change)
ocfs2_rw_unlock(inode, 1); ocfs2_rw_unlock(inode, 1);
bail: bail:
brelse(bh);
/* Release quota pointers in case we acquired them */ /* Release quota pointers in case we acquired them */
for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++) for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
dqput(transfer_to[qtype]); dqput(transfer_to[qtype]);
if (!status && attr->ia_valid & ATTR_MODE) { if (!status && attr->ia_valid & ATTR_MODE) {
status = posix_acl_chmod(inode, inode->i_mode); status = ocfs2_acl_chmod(inode, bh);
if (status < 0) if (status < 0)
mlog_errno(status); mlog_errno(status);
} }
if (inode_locked) if (inode_locked)
ocfs2_inode_unlock(inode, 1); ocfs2_inode_unlock(inode, 1);
brelse(bh);
return status; return status;
} }

View file

@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir,
struct ocfs2_dir_lookup_result lookup = { NULL, }; struct ocfs2_dir_lookup_result lookup = { NULL, };
sigset_t oldset; sigset_t oldset;
int did_block_signals = 0; int did_block_signals = 0;
struct posix_acl *default_acl = NULL, *acl = NULL;
struct ocfs2_dentry_lock *dl = NULL; struct ocfs2_dentry_lock *dl = NULL;
trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name, trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir,
goto leave; goto leave;
} }
status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
if (status) {
mlog_errno(status);
goto leave;
}
handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
S_ISDIR(mode), S_ISDIR(mode),
xattr_credits)); xattr_credits));
@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir,
inc_nlink(dir); inc_nlink(dir);
} }
if (default_acl) { status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
status = ocfs2_set_acl(handle, inode, new_fe_bh, meta_ac, data_ac);
ACL_TYPE_DEFAULT, default_acl,
meta_ac, data_ac);
}
if (!status && acl) {
status = ocfs2_set_acl(handle, inode, new_fe_bh,
ACL_TYPE_ACCESS, acl,
meta_ac, data_ac);
}
if (status < 0) { if (status < 0) {
mlog_errno(status); mlog_errno(status);
@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir,
d_instantiate(dentry, inode); d_instantiate(dentry, inode);
status = 0; status = 0;
leave: leave:
if (default_acl)
posix_acl_release(default_acl);
if (acl)
posix_acl_release(acl);
if (status < 0 && did_quota_inode) if (status < 0 && did_quota_inode)
dquot_free_inode(inode); dquot_free_inode(inode);
if (handle) if (handle)

View file

@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
struct inode *inode = d_inode(old_dentry); struct inode *inode = d_inode(old_dentry);
struct buffer_head *old_bh = NULL; struct buffer_head *old_bh = NULL;
struct inode *new_orphan_inode = NULL; struct inode *new_orphan_inode = NULL;
struct posix_acl *default_acl, *acl;
umode_t mode;
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
mode = inode->i_mode;
error = posix_acl_create(dir, &mode, &default_acl, &acl);
if (error) {
mlog_errno(error);
return error;
}
error = ocfs2_create_inode_in_orphan(dir, mode, error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
&new_orphan_inode); &new_orphan_inode);
if (error) { if (error) {
mlog_errno(error); mlog_errno(error);
@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
/* If the security isn't preserved, we need to re-initialize them. */ /* If the security isn't preserved, we need to re-initialize them. */
if (!preserve) { if (!preserve) {
error = ocfs2_init_security_and_acl(dir, new_orphan_inode, error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
&new_dentry->d_name, &new_dentry->d_name);
default_acl, acl);
if (error) if (error)
mlog_errno(error); mlog_errno(error);
} }
out: out:
if (default_acl)
posix_acl_release(default_acl);
if (acl)
posix_acl_release(acl);
if (!error) { if (!error) {
error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
new_dentry); new_dentry);

View file

@ -7216,12 +7216,10 @@ out:
*/ */
int ocfs2_init_security_and_acl(struct inode *dir, int ocfs2_init_security_and_acl(struct inode *dir,
struct inode *inode, struct inode *inode,
const struct qstr *qstr, const struct qstr *qstr)
struct posix_acl *default_acl,
struct posix_acl *acl)
{ {
struct buffer_head *dir_bh = NULL;
int ret = 0; int ret = 0;
struct buffer_head *dir_bh = NULL;
ret = ocfs2_init_security_get(inode, dir, qstr, NULL); ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
if (ret) { if (ret) {
@ -7234,11 +7232,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
mlog_errno(ret); mlog_errno(ret);
goto leave; goto leave;
} }
ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
if (!ret && default_acl) if (ret)
ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); mlog_errno(ret);
if (!ret && acl)
ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
ocfs2_inode_unlock(dir, 0); ocfs2_inode_unlock(dir, 0);
brelse(dir_bh); brelse(dir_bh);

View file

@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
bool preserve_security); bool preserve_security);
int ocfs2_init_security_and_acl(struct inode *dir, int ocfs2_init_security_and_acl(struct inode *dir,
struct inode *inode, struct inode *inode,
const struct qstr *qstr, const struct qstr *qstr);
struct posix_acl *default_acl,
struct posix_acl *acl);
#endif /* OCFS2_XATTR_H */ #endif /* OCFS2_XATTR_H */

View file

@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path);
int vfs_open(const struct path *path, struct file *file, int vfs_open(const struct path *path, struct file *file,
const struct cred *cred) const struct cred *cred)
{ {
struct dentry *dentry = path->dentry; struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
struct inode *inode = dentry->d_inode;
if (IS_ERR(inode))
return PTR_ERR(inode);
file->f_path = *path; file->f_path = *path;
if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
if (IS_ERR(inode))
return PTR_ERR(inode);
}
return do_dentry_open(file, inode, NULL, cred); return do_dentry_open(file, inode, NULL, cred);
} }

View file

@ -411,9 +411,7 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir,
{ {
struct dentry *dentry; struct dentry *dentry;
inode_lock(dir->d_inode); dentry = lookup_hash(name, dir);
dentry = lookup_one_len(name->name, dir, name->len);
inode_unlock(dir->d_inode);
if (IS_ERR(dentry)) { if (IS_ERR(dentry)) {
if (PTR_ERR(dentry) == -ENOENT) if (PTR_ERR(dentry) == -ENOENT)

View file

@ -434,7 +434,7 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
&& !lookup_symbol_name(wchan, symname)) && !lookup_symbol_name(wchan, symname))
seq_printf(m, "%s", symname); seq_printf(m, "%s", symname);
else else
seq_puts(m, "0\n"); seq_putc(m, '0');
return 0; return 0;
} }

View file

@ -1143,6 +1143,9 @@ static long do_splice_to(struct file *in, loff_t *ppos,
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
if (unlikely(len > MAX_RW_COUNT))
len = MAX_RW_COUNT;
if (in->f_op->splice_read) if (in->f_op->splice_read)
splice_read = in->f_op->splice_read; splice_read = in->f_op->splice_read;
else else

View file

@ -565,4 +565,16 @@ static inline struct dentry *d_real(struct dentry *dentry)
return dentry; return dentry;
} }
static inline struct inode *vfs_select_inode(struct dentry *dentry,
unsigned open_flags)
{
struct inode *inode = d_inode(dentry);
if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
inode = dentry->d_op->d_select_inode(dentry, open_flags);
return inode;
}
#endif /* __LINUX_DCACHE_H */ #endif /* __LINUX_DCACHE_H */

View file

@ -152,6 +152,8 @@ struct kernfs_syscall_ops {
int (*rmdir)(struct kernfs_node *kn); int (*rmdir)(struct kernfs_node *kn);
int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent, int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name); const char *new_name);
int (*show_path)(struct seq_file *sf, struct kernfs_node *kn,
struct kernfs_root *root);
}; };
struct kernfs_root { struct kernfs_root {

View file

@ -173,10 +173,12 @@ enum s2mps11_regulators {
#define S2MPS11_LDO_VSEL_MASK 0x3F #define S2MPS11_LDO_VSEL_MASK 0x3F
#define S2MPS11_BUCK_VSEL_MASK 0xFF #define S2MPS11_BUCK_VSEL_MASK 0xFF
#define S2MPS11_BUCK9_VSEL_MASK 0x1F
#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT) #define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
#define S2MPS11_ENABLE_SHIFT 0x06 #define S2MPS11_ENABLE_SHIFT 0x06
#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1) #define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */ #define S2MPS11_RAMP_DELAY 25000 /* uV/us */
#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4) #define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4)

View file

@ -500,11 +500,20 @@ static inline int page_mapcount(struct page *page)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
int total_mapcount(struct page *page); int total_mapcount(struct page *page);
int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
#else #else
static inline int total_mapcount(struct page *page) static inline int total_mapcount(struct page *page)
{ {
return page_mapcount(page); return page_mapcount(page);
} }
static inline int page_trans_huge_mapcount(struct page *page,
int *total_mapcount)
{
int mapcount = page_mapcount(page);
if (total_mapcount)
*total_mapcount = mapcount;
return mapcount;
}
#endif #endif
static inline struct page *virt_to_head_page(const void *x) static inline struct page *virt_to_head_page(const void *x)

View file

@ -79,6 +79,8 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
struct qstr;
extern struct dentry *lookup_hash(const struct qstr *, struct dentry *);
extern int follow_down_one(struct path *); extern int follow_down_one(struct path *);
extern int follow_down(struct path *); extern int follow_down(struct path *);

View file

@ -418,7 +418,7 @@ extern sector_t swapdev_block(int, pgoff_t);
extern int page_swapcount(struct page *); extern int page_swapcount(struct page *);
extern int swp_swapcount(swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *); extern struct swap_info_struct *page_swap_info(struct page *);
extern int reuse_swap_page(struct page *); extern bool reuse_swap_page(struct page *, int *);
extern int try_to_free_swap(struct page *); extern int try_to_free_swap(struct page *);
struct backing_dev_info; struct backing_dev_info;
@ -513,8 +513,8 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0; return 0;
} }
#define reuse_swap_page(page) \ #define reuse_swap_page(page, total_mapcount) \
(!PageTransCompound(page) && page_mapcount(page) == 1) (page_trans_huge_mapcount(page, total_mapcount) == 1)
static inline int try_to_free_swap(struct page *page) static inline int try_to_free_swap(struct page *page)
{ {

View file

@ -87,6 +87,7 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
size_t iov_iter_zero(size_t bytes, struct iov_iter *); size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i); unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count); unsigned long nr_segs, size_t count);
void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec, void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,

View file

@ -19,14 +19,20 @@
#ifndef _LINUX_IF_H #ifndef _LINUX_IF_H
#define _LINUX_IF_H #define _LINUX_IF_H
#include <linux/libc-compat.h> /* for compatibility with glibc */
#include <linux/types.h> /* for "__kernel_caddr_t" et al */ #include <linux/types.h> /* for "__kernel_caddr_t" et al */
#include <linux/socket.h> /* for "struct sockaddr" et al */ #include <linux/socket.h> /* for "struct sockaddr" et al */
#include <linux/compiler.h> /* for "__user" et al */ #include <linux/compiler.h> /* for "__user" et al */
#if __UAPI_DEF_IF_IFNAMSIZ
#define IFNAMSIZ 16 #define IFNAMSIZ 16
#endif /* __UAPI_DEF_IF_IFNAMSIZ */
#define IFALIASZ 256 #define IFALIASZ 256
#include <linux/hdlc/ioctl.h> #include <linux/hdlc/ioctl.h>
/* For glibc compatibility. An empty enum does not compile. */
#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
__UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
/** /**
* enum net_device_flags - &struct net_device flags * enum net_device_flags - &struct net_device flags
* *
@ -68,6 +74,8 @@
* @IFF_ECHO: echo sent packets. Volatile. * @IFF_ECHO: echo sent packets. Volatile.
*/ */
enum net_device_flags { enum net_device_flags {
/* for compatibility with glibc net/if.h */
#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
IFF_UP = 1<<0, /* sysfs */ IFF_UP = 1<<0, /* sysfs */
IFF_BROADCAST = 1<<1, /* volatile */ IFF_BROADCAST = 1<<1, /* volatile */
IFF_DEBUG = 1<<2, /* sysfs */ IFF_DEBUG = 1<<2, /* sysfs */
@ -84,11 +92,17 @@ enum net_device_flags {
IFF_PORTSEL = 1<<13, /* sysfs */ IFF_PORTSEL = 1<<13, /* sysfs */
IFF_AUTOMEDIA = 1<<14, /* sysfs */ IFF_AUTOMEDIA = 1<<14, /* sysfs */
IFF_DYNAMIC = 1<<15, /* sysfs */ IFF_DYNAMIC = 1<<15, /* sysfs */
#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
IFF_LOWER_UP = 1<<16, /* volatile */ IFF_LOWER_UP = 1<<16, /* volatile */
IFF_DORMANT = 1<<17, /* volatile */ IFF_DORMANT = 1<<17, /* volatile */
IFF_ECHO = 1<<18, /* volatile */ IFF_ECHO = 1<<18, /* volatile */
#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
}; };
#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
/* for compatibility with glibc net/if.h */
#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
#define IFF_UP IFF_UP #define IFF_UP IFF_UP
#define IFF_BROADCAST IFF_BROADCAST #define IFF_BROADCAST IFF_BROADCAST
#define IFF_DEBUG IFF_DEBUG #define IFF_DEBUG IFF_DEBUG
@ -105,9 +119,13 @@ enum net_device_flags {
#define IFF_PORTSEL IFF_PORTSEL #define IFF_PORTSEL IFF_PORTSEL
#define IFF_AUTOMEDIA IFF_AUTOMEDIA #define IFF_AUTOMEDIA IFF_AUTOMEDIA
#define IFF_DYNAMIC IFF_DYNAMIC #define IFF_DYNAMIC IFF_DYNAMIC
#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
#define IFF_LOWER_UP IFF_LOWER_UP #define IFF_LOWER_UP IFF_LOWER_UP
#define IFF_DORMANT IFF_DORMANT #define IFF_DORMANT IFF_DORMANT
#define IFF_ECHO IFF_ECHO #define IFF_ECHO IFF_ECHO
#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\ #define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT) IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
@ -166,6 +184,8 @@ enum {
* being very small might be worth keeping for clean configuration. * being very small might be worth keeping for clean configuration.
*/ */
/* for compatibility with glibc net/if.h */
#if __UAPI_DEF_IF_IFMAP
struct ifmap { struct ifmap {
unsigned long mem_start; unsigned long mem_start;
unsigned long mem_end; unsigned long mem_end;
@ -175,6 +195,7 @@ struct ifmap {
unsigned char port; unsigned char port;
/* 3 bytes spare */ /* 3 bytes spare */
}; };
#endif /* __UAPI_DEF_IF_IFMAP */
struct if_settings { struct if_settings {
unsigned int type; /* Type of physical device or protocol */ unsigned int type; /* Type of physical device or protocol */
@ -200,6 +221,8 @@ struct if_settings {
* remainder may be interface specific. * remainder may be interface specific.
*/ */
/* for compatibility with glibc net/if.h */
#if __UAPI_DEF_IF_IFREQ
struct ifreq { struct ifreq {
#define IFHWADDRLEN 6 #define IFHWADDRLEN 6
union union
@ -223,6 +246,7 @@ struct ifreq {
struct if_settings ifru_settings; struct if_settings ifru_settings;
} ifr_ifru; } ifr_ifru;
}; };
#endif /* __UAPI_DEF_IF_IFREQ */
#define ifr_name ifr_ifrn.ifrn_name /* interface name */ #define ifr_name ifr_ifrn.ifrn_name /* interface name */
#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */ #define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
@ -249,6 +273,8 @@ struct ifreq {
* must know all networks accessible). * must know all networks accessible).
*/ */
/* for compatibility with glibc net/if.h */
#if __UAPI_DEF_IF_IFCONF
struct ifconf { struct ifconf {
int ifc_len; /* size of buffer */ int ifc_len; /* size of buffer */
union { union {
@ -256,6 +282,8 @@ struct ifconf {
struct ifreq __user *ifcu_req; struct ifreq __user *ifcu_req;
} ifc_ifcu; } ifc_ifcu;
}; };
#endif /* __UAPI_DEF_IF_IFCONF */
#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ #define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
#define ifc_req ifc_ifcu.ifcu_req /* array of structures */ #define ifc_req ifc_ifcu.ifcu_req /* array of structures */

View file

@ -51,6 +51,40 @@
/* We have included glibc headers... */ /* We have included glibc headers... */
#if defined(__GLIBC__) #if defined(__GLIBC__)
/* Coordinate with glibc net/if.h header. */
#if defined(_NET_IF_H)
/* GLIBC headers included first so don't define anything
* that would already be defined. */
#define __UAPI_DEF_IF_IFCONF 0
#define __UAPI_DEF_IF_IFMAP 0
#define __UAPI_DEF_IF_IFNAMSIZ 0
#define __UAPI_DEF_IF_IFREQ 0
/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
#else /* _NET_IF_H */
/* Linux headers included first, and we must define everything
* we need. The expectation is that glibc will check the
* __UAPI_DEF_* defines and adjust appropriately. */
#define __UAPI_DEF_IF_IFCONF 1
#define __UAPI_DEF_IF_IFMAP 1
#define __UAPI_DEF_IF_IFNAMSIZ 1
#define __UAPI_DEF_IF_IFREQ 1
/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
#endif /* _NET_IF_H */
/* Coordinate with glibc netinet/in.h header. */ /* Coordinate with glibc netinet/in.h header. */
#if defined(_NETINET_IN_H) #if defined(_NETINET_IN_H)
@ -117,6 +151,16 @@
* that we need. */ * that we need. */
#else /* !defined(__GLIBC__) */ #else /* !defined(__GLIBC__) */
/* Definitions for if.h */
#define __UAPI_DEF_IF_IFCONF 1
#define __UAPI_DEF_IF_IFMAP 1
#define __UAPI_DEF_IF_IFNAMSIZ 1
#define __UAPI_DEF_IF_IFREQ 1
/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
/* Definitions for in.h */ /* Definitions for in.h */
#define __UAPI_DEF_IN_ADDR 1 #define __UAPI_DEF_IN_ADDR 1
#define __UAPI_DEF_IN_IPPROTO 1 #define __UAPI_DEF_IN_IPPROTO 1

View file

@ -10,3 +10,4 @@ header-y += tc_skbedit.h
header-y += tc_vlan.h header-y += tc_vlan.h
header-y += tc_bpf.h header-y += tc_bpf.h
header-y += tc_connmark.h header-y += tc_connmark.h
header-y += tc_ife.h

View file

@ -1215,6 +1215,41 @@ static void cgroup_destroy_root(struct cgroup_root *root)
cgroup_free_root(root); cgroup_free_root(root);
} }
/*
* look up cgroup associated with current task's cgroup namespace on the
* specified hierarchy
*/
static struct cgroup *
current_cgns_cgroup_from_root(struct cgroup_root *root)
{
struct cgroup *res = NULL;
struct css_set *cset;
lockdep_assert_held(&css_set_lock);
rcu_read_lock();
cset = current->nsproxy->cgroup_ns->root_cset;
if (cset == &init_css_set) {
res = &root->cgrp;
} else {
struct cgrp_cset_link *link;
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
struct cgroup *c = link->cgrp;
if (c->root == root) {
res = c;
break;
}
}
}
rcu_read_unlock();
BUG_ON(!res);
return res;
}
/* look up cgroup associated with given css_set on the specified hierarchy */ /* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup *cset_cgroup_from_root(struct css_set *cset, static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
struct cgroup_root *root) struct cgroup_root *root)
@ -1593,6 +1628,33 @@ static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
return 0; return 0;
} }
static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
struct kernfs_root *kf_root)
{
int len = 0;
char *buf = NULL;
struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
struct cgroup *ns_cgroup;
buf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!buf)
return -ENOMEM;
spin_lock_bh(&css_set_lock);
ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
spin_unlock_bh(&css_set_lock);
if (len >= PATH_MAX)
len = -ERANGE;
else if (len > 0) {
seq_escape(sf, buf, " \t\n\\");
len = 0;
}
kfree(buf);
return len;
}
static int cgroup_show_options(struct seq_file *seq, static int cgroup_show_options(struct seq_file *seq,
struct kernfs_root *kf_root) struct kernfs_root *kf_root)
{ {
@ -5433,6 +5495,7 @@ static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
.mkdir = cgroup_mkdir, .mkdir = cgroup_mkdir,
.rmdir = cgroup_rmdir, .rmdir = cgroup_rmdir,
.rename = cgroup_rename, .rename = cgroup_rename,
.show_path = cgroup_show_path,
}; };
static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early) static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)

View file

@ -351,7 +351,7 @@ static struct srcu_struct pmus_srcu;
* 1 - disallow cpu events for unpriv * 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv * 2 - disallow kernel profiling for unpriv
*/ */
int sysctl_perf_event_paranoid __read_mostly = 1; int sysctl_perf_event_paranoid __read_mostly = 2;
/* Minimum for 512 kiB + 1 user control page */ /* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */

View file

@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
bool truncated) bool truncated)
{ {
struct ring_buffer *rb = handle->rb; struct ring_buffer *rb = handle->rb;
bool wakeup = truncated;
unsigned long aux_head; unsigned long aux_head;
u64 flags = 0; u64 flags = 0;
@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
aux_head = rb->user_page->aux_head = local_read(&rb->aux_head); aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) { if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
perf_output_wakeup(handle); wakeup = true;
local_add(rb->aux_watermark, &rb->aux_wakeup); local_add(rb->aux_watermark, &rb->aux_wakeup);
} }
if (wakeup) {
if (truncated)
handle->event->pending_disable = 1;
perf_output_wakeup(handle);
}
handle->event = NULL; handle->event = NULL;
local_set(&rb->aux_nest, 0); local_set(&rb->aux_nest, 0);

View file

@ -1394,6 +1394,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
!cpumask_test_cpu(later_rq->cpu, !cpumask_test_cpu(later_rq->cpu,
&task->cpus_allowed) || &task->cpus_allowed) ||
task_running(rq, task) || task_running(rq, task) ||
!dl_task(task) ||
!task_on_rq_queued(task))) { !task_on_rq_queued(task))) {
double_unlock_balance(rq, later_rq); double_unlock_balance(rq, later_rq);
later_rq = NULL; later_rq = NULL;

Some files were not shown because too many files have changed in this diff Show more