Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	net/netfilter/nf_tables_core.c

The nf_tables_core.c conflict was resolved using a conflict resolution
from Stephen Rothwell as a guide.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-03-23 22:22:43 -04:00
commit d5c1d8c567
134 changed files with 1389 additions and 1634 deletions

View file

@ -10206,6 +10206,13 @@ S: Maintained
F: Documentation/usb/ohci.txt F: Documentation/usb/ohci.txt
F: drivers/usb/host/ohci* F: drivers/usb/host/ohci*
USB OTG FSM (Finite State Machine)
M: Peter Chen <Peter.Chen@freescale.com>
T: git git://github.com/hzpeterchen/linux-usb.git
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/common/usb-otg-fsm.c
USB OVER IP DRIVER USB OVER IP DRIVER
M: Valentina Manea <valentina.manea.m@gmail.com> M: Valentina Manea <valentina.manea.m@gmail.com>
M: Shuah Khan <shuah.kh@samsung.com> M: Shuah Khan <shuah.kh@samsung.com>

View file

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 0 PATCHLEVEL = 0
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc5
NAME = Hurr durr I'ma sheep NAME = Hurr durr I'ma sheep
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -246,12 +246,9 @@ static int __get_cpu_architecture(void)
if (cpu_arch) if (cpu_arch)
cpu_arch += CPU_ARCH_ARMv3; cpu_arch += CPU_ARCH_ARMv3;
} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
unsigned int mmfr0;
/* Revised CPUID format. Read the Memory Model Feature /* Revised CPUID format. Read the Memory Model Feature
* Register 0 and check for VMSAv7 or PMSAv7 */ * Register 0 and check for VMSAv7 or PMSAv7 */
asm("mrc p15, 0, %0, c0, c1, 4" unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
: "=r" (mmfr0));
if ((mmfr0 & 0x0000000f) >= 0x00000003 || if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
(mmfr0 & 0x000000f0) >= 0x00000030) (mmfr0 & 0x000000f0) >= 0x00000030)
cpu_arch = CPU_ARCH_ARMv7; cpu_arch = CPU_ARCH_ARMv7;

View file

@ -1131,23 +1131,22 @@ static void __init l2c310_of_parse(const struct device_node *np,
} }
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
if (ret) if (!ret) {
return; switch (assoc) {
case 16:
switch (assoc) { *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
case 16: *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
*aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; break;
*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; case 8:
break; *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
case 8: *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; break;
*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; default:
break; pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
default: assoc);
pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", break;
assoc); }
break;
} }
prefetch = l2x0_saved_regs.prefetch_ctrl; prefetch = l2x0_saved_regs.prefetch_ctrl;

View file

@ -171,7 +171,7 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn)
*/ */
if (sizeof(mask) != sizeof(dma_addr_t) && if (sizeof(mask) != sizeof(dma_addr_t) &&
mask > (dma_addr_t)~0 && mask > (dma_addr_t)~0 &&
dma_to_pfn(dev, ~0) < max_pfn) { dma_to_pfn(dev, ~0) < max_pfn - 1) {
if (warn) { if (warn) {
dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
mask); mask);

View file

@ -552,6 +552,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr); inf->name, fsr, addr);
show_pte(current->mm, addr);
info.si_signo = inf->sig; info.si_signo = inf->sig;
info.si_errno = 0; info.si_errno = 0;

View file

@ -49,7 +49,10 @@ static int change_memory_common(unsigned long addr, int numpages,
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
} }
if (!is_module_address(start) || !is_module_address(end - 1)) if (start < MODULES_VADDR || start >= MODULES_END)
return -EINVAL;
if (end < MODULES_VADDR || start >= MODULES_END)
return -EINVAL; return -EINVAL;
data.set_mask = set_mask; data.set_mask = set_mask;

View file

@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h> #include <asm/memory.h>
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) #define cpu_switch_mm(pgd,mm) \
do { \
BUG_ON(pgd == swapper_pg_dir); \
cpu_do_switch_mm(virt_to_phys(pgd),mm); \
} while (0)
#define cpu_get_pgd() \ #define cpu_get_pgd() \
({ \ ({ \

View file

@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init);
static void efi_set_pgd(struct mm_struct *mm) static void efi_set_pgd(struct mm_struct *mm)
{ {
cpu_switch_mm(mm->pgd, mm); if (mm == &init_mm)
cpu_set_reserved_ttbr0();
else
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all(); flush_tlb_all();
if (icache_is_aivivt()) if (icache_is_aivivt())
__flush_icache_all(); __flush_icache_all();

View file

@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
} }
early_param("coherent_pool", early_coherent_pool); early_param("coherent_pool", early_coherent_pool);
static void *__alloc_from_pool(size_t size, struct page **ret_page) static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
{ {
unsigned long val; unsigned long val;
void *ptr = NULL; void *ptr = NULL;
@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
*ret_page = phys_to_page(phys); *ret_page = phys_to_page(phys);
ptr = (void *)val; ptr = (void *)val;
if (flags & __GFP_ZERO)
memset(ptr, 0, size);
} }
return ptr; return ptr;
@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
flags |= GFP_DMA; flags |= GFP_DMA;
if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
struct page *page; struct page *page;
void *addr;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
return NULL; return NULL;
*dma_handle = phys_to_dma(dev, page_to_phys(page)); *dma_handle = phys_to_dma(dev, page_to_phys(page));
return page_address(page); addr = page_address(page);
if (flags & __GFP_ZERO)
memset(addr, 0, size);
return addr;
} else { } else {
return swiotlb_alloc_coherent(dev, size, dma_handle, flags); return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
} }
@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
if (!coherent && !(flags & __GFP_WAIT)) { if (!coherent && !(flags & __GFP_WAIT)) {
struct page *page = NULL; struct page *page = NULL;
void *addr = __alloc_from_pool(size, &page); void *addr = __alloc_from_pool(size, &page, flags);
if (addr) if (addr)
*dma_handle = phys_to_dma(dev, page_to_phys(page)); *dma_handle = phys_to_dma(dev, page_to_phys(page));

View file

@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
unsigned long reg_val); unsigned long reg_val);
#endif #endif
#define HV_FAST_M7_GET_PERFREG 0x43
#define HV_FAST_M7_SET_PERFREG 0x44
#ifndef __ASSEMBLY__
unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
unsigned long *reg_val);
unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
unsigned long reg_val);
#endif
/* Function numbers for HV_CORE_TRAP. */ /* Function numbers for HV_CORE_TRAP. */
#define HV_CORE_SET_VER 0x00 #define HV_CORE_SET_VER 0x00
#define HV_CORE_PUTCHAR 0x01 #define HV_CORE_PUTCHAR 0x01
@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
#define HV_GRP_SDIO 0x0108 #define HV_GRP_SDIO 0x0108
#define HV_GRP_SDIO_ERR 0x0109 #define HV_GRP_SDIO_ERR 0x0109
#define HV_GRP_REBOOT_DATA 0x0110 #define HV_GRP_REBOOT_DATA 0x0110
#define HV_GRP_M7_PERF 0x0114
#define HV_GRP_NIAG_PERF 0x0200 #define HV_GRP_NIAG_PERF 0x0200
#define HV_GRP_FIRE_PERF 0x0201 #define HV_GRP_FIRE_PERF 0x0201
#define HV_GRP_N2_CPU 0x0202 #define HV_GRP_N2_CPU 0x0202

View file

@ -48,6 +48,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_VT_CPU, }, { .group = HV_GRP_VT_CPU, },
{ .group = HV_GRP_T5_CPU, }, { .group = HV_GRP_T5_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
{ .group = HV_GRP_M7_PERF, },
}; };
static DEFINE_SPINLOCK(hvapi_lock); static DEFINE_SPINLOCK(hvapi_lock);

View file

@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg)
retl retl
nop nop
ENDPROC(sun4v_t5_set_perfreg) ENDPROC(sun4v_t5_set_perfreg)
ENTRY(sun4v_m7_get_perfreg)
mov %o1, %o4
mov HV_FAST_M7_GET_PERFREG, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_m7_get_perfreg)
ENTRY(sun4v_m7_set_perfreg)
mov HV_FAST_M7_SET_PERFREG, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_m7_set_perfreg)

View file

@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = {
.pcr_nmi_disable = PCR_N4_PICNPT, .pcr_nmi_disable = PCR_N4_PICNPT,
}; };
static u64 m7_pcr_read(unsigned long reg_num)
{
unsigned long val;
(void) sun4v_m7_get_perfreg(reg_num, &val);
return val;
}
static void m7_pcr_write(unsigned long reg_num, u64 val)
{
(void) sun4v_m7_set_perfreg(reg_num, val);
}
static const struct pcr_ops m7_pcr_ops = {
.read_pcr = m7_pcr_read,
.write_pcr = m7_pcr_write,
.read_pic = n4_pic_read,
.write_pic = n4_pic_write,
.nmi_picl_value = n4_picl_value,
.pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
PCR_N4_UTRACE | PCR_N4_TOE |
(26 << PCR_N4_SL_SHIFT)),
.pcr_nmi_disable = PCR_N4_PICNPT,
};
static unsigned long perf_hsvc_group; static unsigned long perf_hsvc_group;
static unsigned long perf_hsvc_major; static unsigned long perf_hsvc_major;
@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void)
perf_hsvc_group = HV_GRP_T5_CPU; perf_hsvc_group = HV_GRP_T5_CPU;
break; break;
case SUN4V_CHIP_SPARC_M7:
perf_hsvc_group = HV_GRP_M7_PERF;
break;
default: default:
return -ENODEV; return -ENODEV;
} }
@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void)
pcr_ops = &n5_pcr_ops; pcr_ops = &n5_pcr_ops;
break; break;
case SUN4V_CHIP_SPARC_M7:
pcr_ops = &m7_pcr_ops;
break;
default: default:
ret = -ENODEV; ret = -ENODEV;
break; break;

View file

@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = {
.num_pic_regs = 4, .num_pic_regs = 4,
}; };
static void sparc_m7_write_pmc(int idx, u64 val)
{
u64 pcr;
pcr = pcr_ops->read_pcr(idx);
/* ensure ov and ntc are reset */
pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
pcr_ops->write_pic(idx, val & 0xffffffff);
pcr_ops->write_pcr(idx, pcr);
}
static const struct sparc_pmu sparc_m7_pmu = {
.event_map = niagara4_event_map,
.cache_map = &niagara4_cache_map,
.max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
.read_pmc = sparc_vt_read_pmc,
.write_pmc = sparc_m7_write_pmc,
.upper_shift = 5,
.lower_shift = 5,
.event_mask = 0x7ff,
.user_bit = PCR_N4_UTRACE,
.priv_bit = PCR_N4_STRACE,
/* We explicitly don't support hypervisor tracing. */
.hv_bit = 0,
.irq_bit = PCR_N4_TOE,
.upper_nop = 0,
.lower_nop = 0,
.flags = 0,
.max_hw_events = 4,
.num_pcrs = 4,
.num_pic_regs = 4,
};
static const struct sparc_pmu *sparc_pmu __read_mostly; static const struct sparc_pmu *sparc_pmu __read_mostly;
static u64 event_encoding(u64 event_id, int idx) static u64 event_encoding(u64 event_id, int idx)
@ -960,6 +996,8 @@ out:
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
} }
static void sparc_pmu_start(struct perf_event *event, int flags);
/* On this PMU each PIC has it's own PCR control register. */ /* On this PMU each PIC has it's own PCR control register. */
static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
{ {
@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
struct perf_event *cp = cpuc->event[i]; struct perf_event *cp = cpuc->event[i];
struct hw_perf_event *hwc = &cp->hw; struct hw_perf_event *hwc = &cp->hw;
int idx = hwc->idx; int idx = hwc->idx;
u64 enc;
if (cpuc->current_idx[i] != PIC_NO_INDEX) if (cpuc->current_idx[i] != PIC_NO_INDEX)
continue; continue;
sparc_perf_event_set_period(cp, hwc, idx);
cpuc->current_idx[i] = idx; cpuc->current_idx[i] = idx;
enc = perf_event_get_enc(cpuc->events[i]); sparc_pmu_start(cp, PERF_EF_RELOAD);
cpuc->pcr[idx] &= ~mask_for_index(idx);
if (hwc->state & PERF_HES_STOPPED)
cpuc->pcr[idx] |= nop_for_index(idx);
else
cpuc->pcr[idx] |= event_encoding(enc, idx);
} }
out: out:
for (i = 0; i < cpuc->n_events; i++) { for (i = 0; i < cpuc->n_events; i++) {
@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
int i; int i;
local_irq_save(flags); local_irq_save(flags);
perf_pmu_disable(event->pmu);
for (i = 0; i < cpuc->n_events; i++) { for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event[i]) { if (event == cpuc->event[i]) {
@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
} }
} }
perf_pmu_enable(event->pmu);
local_irq_restore(flags); local_irq_restore(flags);
} }
@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
perf_pmu_disable(event->pmu);
n0 = cpuc->n_events; n0 = cpuc->n_events;
if (n0 >= sparc_pmu->max_hw_events) if (n0 >= sparc_pmu->max_hw_events)
@ -1394,7 +1422,6 @@ nocheck:
ret = 0; ret = 0;
out: out:
perf_pmu_enable(event->pmu);
local_irq_restore(flags); local_irq_restore(flags);
return ret; return ret;
} }
@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void)
sparc_pmu = &niagara4_pmu; sparc_pmu = &niagara4_pmu;
return true; return true;
} }
if (!strcmp(sparc_pmu_type, "sparc-m7")) {
sparc_pmu = &sparc_m7_pmu;
return true;
}
return false; return false;
} }

View file

@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
gp->tpc, gp->o7, gp->i7, gp->rpc); gp->tpc, gp->o7, gp->i7, gp->rpc);
} }
touch_nmi_watchdog();
} }
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
(cpu == this_cpu ? '*' : ' '), cpu, (cpu == this_cpu ? '*' : ' '), cpu,
pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
touch_nmi_watchdog();
} }
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));

View file

@ -8,9 +8,11 @@
.text .text
ENTRY(memmove) /* o0=dst o1=src o2=len */ ENTRY(memmove) /* o0=dst o1=src o2=len */
mov %o0, %g1 brz,pn %o2, 99f
mov %o0, %g1
cmp %o0, %o1 cmp %o0, %o1
bleu,pt %xcc, memcpy bleu,pt %xcc, 2f
add %o1, %o2, %g7 add %o1, %o2, %g7
cmp %g7, %o0 cmp %g7, %o0
bleu,pt %xcc, memcpy bleu,pt %xcc, memcpy
@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
stb %g7, [%o0] stb %g7, [%o0]
bne,pt %icc, 1b bne,pt %icc, 1b
sub %o0, 1, %o0 sub %o0, 1, %o0
99:
retl retl
mov %g1, %o0 mov %g1, %o0
/* We can't just call memcpy for these memmove cases. On some
* chips the memcpy uses cache initializing stores and when dst
* and src are close enough, those can clobber the source data
* before we've loaded it in.
*/
2: or %o0, %o1, %g7
or %o2, %g7, %g7
andcc %g7, 0x7, %g0
bne,pn %xcc, 4f
nop
3: ldx [%o1], %g7
add %o1, 8, %o1
subcc %o2, 8, %o2
add %o0, 8, %o0
bne,pt %icc, 3b
stx %g7, [%o0 - 0x8]
ba,a,pt %xcc, 99b
4: ldub [%o1], %g7
add %o1, 1, %o1
subcc %o2, 1, %o2
add %o0, 1, %o0
bne,pt %icc, 4b
stb %g7, [%o0 - 0x1]
ba,a,pt %xcc, 99b
ENDPROC(memmove) ENDPROC(memmove)

View file

@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev); extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev); extern void (*pcibios_disable_irq)(struct pci_dev *dev);
extern bool mp_should_keep_irq(struct device *dev);
struct pci_raw_ops { struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val); int reg, int len, u32 *val);

View file

@ -513,31 +513,6 @@ void __init pcibios_set_cache_line_size(void)
} }
} }
/*
* Some device drivers assume dev->irq won't change after calling
* pci_disable_device(). So delay releasing of IRQ resource to driver
* unbinding time. Otherwise it will break PM subsystem and drivers
* like xen-pciback etc.
*/
static int pci_irq_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
struct pci_dev *dev = to_pci_dev(data);
if (action != BUS_NOTIFY_UNBOUND_DRIVER)
return NOTIFY_DONE;
if (pcibios_disable_irq)
pcibios_disable_irq(dev);
return NOTIFY_OK;
}
static struct notifier_block pci_irq_nb = {
.notifier_call = pci_irq_notifier,
.priority = INT_MIN,
};
int __init pcibios_init(void) int __init pcibios_init(void)
{ {
if (!raw_pci_ops) { if (!raw_pci_ops) {
@ -550,9 +525,6 @@ int __init pcibios_init(void)
if (pci_bf_sort >= pci_force_bf) if (pci_bf_sort >= pci_force_bf)
pci_sort_breadthfirst(); pci_sort_breadthfirst();
bus_register_notifier(&pci_bus_type, &pci_irq_nb);
return 0; return 0;
} }
@ -711,6 +683,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return 0; return 0;
} }
void pcibios_disable_device (struct pci_dev *dev)
{
if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
pcibios_disable_irq(dev);
}
int pci_ext_cfg_avail(void) int pci_ext_cfg_avail(void)
{ {
if (raw_pci_ext_ops) if (raw_pci_ext_ops)

View file

@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
static void intel_mid_pci_irq_disable(struct pci_dev *dev) static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{ {
if (dev->irq_managed && dev->irq > 0) { if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
dev->irq > 0) {
mp_unmap_irq(dev->irq); mp_unmap_irq(dev->irq);
dev->irq_managed = 0; dev->irq_managed = 0;
dev->irq = 0;
} }
} }

View file

@ -1256,9 +1256,22 @@ static int pirq_enable_irq(struct pci_dev *dev)
return 0; return 0;
} }
bool mp_should_keep_irq(struct device *dev)
{
if (dev->power.is_prepared)
return true;
#ifdef CONFIG_PM
if (dev->power.runtime_status == RPM_SUSPENDING)
return true;
#endif
return false;
}
static void pirq_disable_irq(struct pci_dev *dev) static void pirq_disable_irq(struct pci_dev *dev)
{ {
if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) { if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq); mp_unmap_irq(dev->irq);
dev->irq = 0; dev->irq = 0;
dev->irq_managed = 0; dev->irq_managed = 0;

View file

@ -485,6 +485,14 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
if (!pin || !dev->irq_managed || dev->irq <= 0) if (!pin || !dev->irq_managed || dev->irq <= 0)
return; return;
/* Keep IOAPIC pin configuration when suspending */
if (dev->dev.power.is_prepared)
return;
#ifdef CONFIG_PM
if (dev->dev.power.runtime_status == RPM_SUSPENDING)
return;
#endif
entry = acpi_pci_irq_lookup(dev, pin); entry = acpi_pci_irq_lookup(dev, pin);
if (!entry) if (!entry)
return; return;
@ -505,6 +513,5 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
if (gsi >= 0) { if (gsi >= 0) {
acpi_unregister_gsi(gsi); acpi_unregister_gsi(gsi);
dev->irq_managed = 0; dev->irq_managed = 0;
dev->irq = 0;
} }
} }

View file

@ -37,11 +37,11 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
deepidle = true; deepidle = true;
ret = mvebu_v7_cpu_suspend(deepidle); ret = mvebu_v7_cpu_suspend(deepidle);
cpu_pm_exit();
if (ret) if (ret)
return ret; return ret;
cpu_pm_exit();
return index; return index;
} }
@ -50,17 +50,17 @@ static struct cpuidle_driver armadaxp_idle_driver = {
.states[0] = ARM_CPUIDLE_WFI_STATE, .states[0] = ARM_CPUIDLE_WFI_STATE,
.states[1] = { .states[1] = {
.enter = mvebu_v7_enter_idle, .enter = mvebu_v7_enter_idle,
.exit_latency = 10, .exit_latency = 100,
.power_usage = 50, .power_usage = 50,
.target_residency = 100, .target_residency = 1000,
.name = "MV CPU IDLE", .name = "MV CPU IDLE",
.desc = "CPU power down", .desc = "CPU power down",
}, },
.states[2] = { .states[2] = {
.enter = mvebu_v7_enter_idle, .enter = mvebu_v7_enter_idle,
.exit_latency = 100, .exit_latency = 1000,
.power_usage = 5, .power_usage = 5,
.target_residency = 1000, .target_residency = 10000,
.flags = MVEBU_V7_FLAG_DEEP_IDLE, .flags = MVEBU_V7_FLAG_DEEP_IDLE,
.name = "MV CPU DEEP IDLE", .name = "MV CPU DEEP IDLE",
.desc = "CPU and L2 Fabric power down", .desc = "CPU and L2 Fabric power down",

View file

@ -97,6 +97,12 @@
#define DRIVER_NAME "pl08xdmac" #define DRIVER_NAME "pl08xdmac"
#define PL80X_DMA_BUSWIDTHS \
BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
static struct amba_driver pl08x_amba_driver; static struct amba_driver pl08x_amba_driver;
struct pl08x_driver_data; struct pl08x_driver_data;
@ -2070,6 +2076,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->memcpy.device_pause = pl08x_pause; pl08x->memcpy.device_pause = pl08x_pause;
pl08x->memcpy.device_resume = pl08x_resume; pl08x->memcpy.device_resume = pl08x_resume;
pl08x->memcpy.device_terminate_all = pl08x_terminate_all; pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
/* Initialize slave engine */ /* Initialize slave engine */
dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@ -2086,6 +2096,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.device_pause = pl08x_pause; pl08x->slave.device_pause = pl08x_pause;
pl08x->slave.device_resume = pl08x_resume; pl08x->slave.device_resume = pl08x_resume;
pl08x->slave.device_terminate_all = pl08x_terminate_all; pl08x->slave.device_terminate_all = pl08x_terminate_all;
pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
/* Get the platform data */ /* Get the platform data */
pl08x->pd = dev_get_platdata(&adev->dev); pl08x->pd = dev_get_platdata(&adev->dev);

View file

@ -238,93 +238,126 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
} }
/* /*
* atc_get_current_descriptors - * atc_get_desc_by_cookie - get the descriptor of a cookie
* locate the descriptor which equal to physical address in DSCR * @atchan: the DMA channel
* @atchan: the channel we want to start * @cookie: the cookie to get the descriptor for
* @dscr_addr: physical descriptor address in DSCR
*/ */
static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
u32 dscr_addr) dma_cookie_t cookie)
{ {
struct at_desc *desc, *_desc, *child, *desc_cur = NULL; struct at_desc *desc, *_desc;
list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
if (desc->txd.cookie == cookie)
return desc;
}
list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
if (desc->lli.dscr == dscr_addr) { if (desc->txd.cookie == cookie)
desc_cur = desc; return desc;
break;
}
list_for_each_entry(child, &desc->tx_list, desc_node) {
if (child->lli.dscr == dscr_addr) {
desc_cur = child;
break;
}
}
} }
return desc_cur; return NULL;
} }
/* /**
* atc_get_bytes_left - * atc_calc_bytes_left - calculates the number of bytes left according to the
* Get the number of bytes residue in dma buffer, * value read from CTRLA.
* @chan: the channel we want to start *
* @current_len: the number of bytes left before reading CTRLA
* @ctrla: the value of CTRLA
* @desc: the descriptor containing the transfer width
*/ */
static int atc_get_bytes_left(struct dma_chan *chan) static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
struct at_desc *desc)
{
return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
}
/**
* atc_calc_bytes_left_from_reg - calculates the number of bytes left according
* to the current value of CTRLA.
*
* @current_len: the number of bytes left before reading CTRLA
* @atchan: the channel to read CTRLA for
* @desc: the descriptor containing the transfer width
*/
static inline int atc_calc_bytes_left_from_reg(int current_len,
struct at_dma_chan *atchan, struct at_desc *desc)
{
u32 ctrla = channel_readl(atchan, CTRLA);
return atc_calc_bytes_left(current_len, ctrla, desc);
}
/**
* atc_get_bytes_left - get the number of bytes residue for a cookie
* @chan: DMA channel
* @cookie: transaction identifier to check status of
*/
static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id;
struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc_cur; struct at_desc *desc;
int ret = 0, count = 0; int ret;
u32 ctrla, dscr;
/* /*
* Initialize necessary values in the first time. * If the cookie doesn't match to the currently running transfer then
* remain_desc record remain desc length. * we can return the total length of the associated DMA transfer,
* because it is still queued.
*/ */
if (atchan->remain_desc == 0) desc = atc_get_desc_by_cookie(atchan, cookie);
/* First descriptor embedds the transaction length */ if (desc == NULL)
atchan->remain_desc = desc_first->len; return -EINVAL;
else if (desc != desc_first)
return desc->total_len;
/* /* cookie matches to the currently running transfer */
* This happens when current descriptor transfer complete. ret = desc_first->total_len;
* The residual buffer size should reduce current descriptor length.
*/
if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) {
clear_bit(ATC_IS_BTC, &atchan->status);
desc_cur = atc_get_current_descriptors(atchan,
channel_readl(atchan, DSCR));
if (!desc_cur) {
ret = -EINVAL;
goto out;
}
count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) if (desc_first->lli.dscr) {
<< desc_first->tx_width; /* hardware linked list transfer */
if (atchan->remain_desc < count) {
ret = -EINVAL;
goto out;
}
atchan->remain_desc -= count;
ret = atchan->remain_desc;
} else {
/* /*
* Get residual bytes when current * Calculate the residue by removing the length of the child
* descriptor transfer in progress. * descriptors already transferred from the total length.
* To get the current child descriptor we can use the value of
* the channel's DSCR register and compare it against the value
* of the hardware linked list structure of each child
* descriptor.
*/ */
count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX)
<< (desc_first->tx_width);
ret = atchan->remain_desc - count;
}
/*
* Check fifo empty.
*/
if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
atc_issue_pending(chan);
out: ctrla = channel_readl(atchan, CTRLA);
rmb(); /* ensure CTRLA is read before DSCR */
dscr = channel_readl(atchan, DSCR);
/* for the first descriptor we can be more accurate */
if (desc_first->lli.dscr == dscr)
return atc_calc_bytes_left(ret, ctrla, desc_first);
ret -= desc_first->len;
list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
if (desc->lli.dscr == dscr)
break;
ret -= desc->len;
}
/*
* For the last descriptor in the chain we can calculate
* the remaining bytes using the channel's register.
* Note that the transfer width of the first and last
* descriptor may differ.
*/
if (!desc->lli.dscr)
ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
} else {
/* single transfer */
ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
}
return ret; return ret;
} }
@ -539,8 +572,6 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
/* Give information to tasklet */ /* Give information to tasklet */
set_bit(ATC_IS_ERROR, &atchan->status); set_bit(ATC_IS_ERROR, &atchan->status);
} }
if (pending & AT_DMA_BTC(i))
set_bit(ATC_IS_BTC, &atchan->status);
tasklet_schedule(&atchan->tasklet); tasklet_schedule(&atchan->tasklet);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
} }
@ -653,14 +684,18 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc->lli.ctrlb = ctrlb; desc->lli.ctrlb = ctrlb;
desc->txd.cookie = 0; desc->txd.cookie = 0;
desc->len = xfer_count << src_width;
atc_desc_chain(&first, &prev, desc); atc_desc_chain(&first, &prev, desc);
} }
/* First descriptor of the chain embedds additional information */ /* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY; first->txd.cookie = -EBUSY;
first->len = len; first->total_len = len;
/* set transfer width for the calculation of the residue */
first->tx_width = src_width; first->tx_width = src_width;
prev->tx_width = src_width;
/* set end-of-link to the last link descriptor of list*/ /* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc); set_desc_eol(desc);
@ -752,6 +787,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| ATC_SRC_WIDTH(mem_width) | ATC_SRC_WIDTH(mem_width)
| len >> mem_width; | len >> mem_width;
desc->lli.ctrlb = ctrlb; desc->lli.ctrlb = ctrlb;
desc->len = len;
atc_desc_chain(&first, &prev, desc); atc_desc_chain(&first, &prev, desc);
total_len += len; total_len += len;
@ -792,6 +828,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| ATC_DST_WIDTH(mem_width) | ATC_DST_WIDTH(mem_width)
| len >> reg_width; | len >> reg_width;
desc->lli.ctrlb = ctrlb; desc->lli.ctrlb = ctrlb;
desc->len = len;
atc_desc_chain(&first, &prev, desc); atc_desc_chain(&first, &prev, desc);
total_len += len; total_len += len;
@ -806,8 +843,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* First descriptor of the chain embedds additional information */ /* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY; first->txd.cookie = -EBUSY;
first->len = total_len; first->total_len = total_len;
/* set transfer width for the calculation of the residue */
first->tx_width = reg_width; first->tx_width = reg_width;
prev->tx_width = reg_width;
/* first link descriptor of list is responsible of flags */ /* first link descriptor of list is responsible of flags */
first->txd.flags = flags; /* client is in control of this ack */ first->txd.flags = flags; /* client is in control of this ack */
@ -872,6 +912,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
| ATC_FC_MEM2PER | ATC_FC_MEM2PER
| ATC_SIF(atchan->mem_if) | ATC_SIF(atchan->mem_if)
| ATC_DIF(atchan->per_if); | ATC_DIF(atchan->per_if);
desc->len = period_len;
break; break;
case DMA_DEV_TO_MEM: case DMA_DEV_TO_MEM:
@ -883,6 +924,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
| ATC_FC_PER2MEM | ATC_FC_PER2MEM
| ATC_SIF(atchan->per_if) | ATC_SIF(atchan->per_if)
| ATC_DIF(atchan->mem_if); | ATC_DIF(atchan->mem_if);
desc->len = period_len;
break; break;
default: default:
@ -964,7 +1006,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* First descriptor of the chain embedds additional information */ /* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY; first->txd.cookie = -EBUSY;
first->len = buf_len; first->total_len = buf_len;
first->tx_width = reg_width; first->tx_width = reg_width;
return &first->txd; return &first->txd;
@ -1118,7 +1160,7 @@ atc_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&atchan->lock, flags); spin_lock_irqsave(&atchan->lock, flags);
/* Get number of bytes left in the active transactions */ /* Get number of bytes left in the active transactions */
bytes = atc_get_bytes_left(chan); bytes = atc_get_bytes_left(chan, cookie);
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
@ -1214,7 +1256,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&atchan->lock, flags); spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i; atchan->descs_allocated = i;
atchan->remain_desc = 0;
list_splice(&tmp_list, &atchan->free_list); list_splice(&tmp_list, &atchan->free_list);
dma_cookie_init(chan); dma_cookie_init(chan);
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
@ -1257,7 +1298,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
list_splice_init(&atchan->free_list, &list); list_splice_init(&atchan->free_list, &list);
atchan->descs_allocated = 0; atchan->descs_allocated = 0;
atchan->status = 0; atchan->status = 0;
atchan->remain_desc = 0;
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
} }

View file

@ -181,8 +181,9 @@ struct at_lli {
* @at_lli: hardware lli structure * @at_lli: hardware lli structure
* @txd: support for the async_tx api * @txd: support for the async_tx api
* @desc_node: node on the channed descriptors list * @desc_node: node on the channed descriptors list
* @len: total transaction bytecount * @len: descriptor byte count
* @tx_width: transfer width * @tx_width: transfer width
* @total_len: total transaction byte count
*/ */
struct at_desc { struct at_desc {
/* FIRST values the hardware uses */ /* FIRST values the hardware uses */
@ -194,6 +195,7 @@ struct at_desc {
struct list_head desc_node; struct list_head desc_node;
size_t len; size_t len;
u32 tx_width; u32 tx_width;
size_t total_len;
}; };
static inline struct at_desc * static inline struct at_desc *
@ -213,7 +215,6 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
enum atc_status { enum atc_status {
ATC_IS_ERROR = 0, ATC_IS_ERROR = 0,
ATC_IS_PAUSED = 1, ATC_IS_PAUSED = 1,
ATC_IS_BTC = 2,
ATC_IS_CYCLIC = 24, ATC_IS_CYCLIC = 24,
}; };
@ -231,7 +232,6 @@ enum atc_status {
* @save_cfg: configuration register that is saved on suspend/resume cycle * @save_cfg: configuration register that is saved on suspend/resume cycle
* @save_dscr: for cyclic operations, preserve next descriptor address in * @save_dscr: for cyclic operations, preserve next descriptor address in
* the cyclic list on suspend/resume cycle * the cyclic list on suspend/resume cycle
* @remain_desc: to save remain desc length
* @dma_sconfig: configuration for slave transfers, passed via * @dma_sconfig: configuration for slave transfers, passed via
* .device_config * .device_config
* @lock: serializes enqueue/dequeue operations to descriptors lists * @lock: serializes enqueue/dequeue operations to descriptors lists
@ -251,7 +251,6 @@ struct at_dma_chan {
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u32 save_cfg; u32 save_cfg;
u32 save_dscr; u32 save_dscr;
u32 remain_desc;
struct dma_slave_config dma_sconfig; struct dma_slave_config dma_sconfig;
spinlock_t lock; spinlock_t lock;

View file

@ -26,6 +26,8 @@
#include "internal.h" #include "internal.h"
#define DRV_NAME "dw_dmac"
static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma) struct of_dma *ofdma)
{ {
@ -284,7 +286,7 @@ static struct platform_driver dw_driver = {
.remove = dw_remove, .remove = dw_remove,
.shutdown = dw_shutdown, .shutdown = dw_shutdown,
.driver = { .driver = {
.name = "dw_dmac", .name = DRV_NAME,
.pm = &dw_dev_pm_ops, .pm = &dw_dev_pm_ops,
.of_match_table = of_match_ptr(dw_dma_of_id_table), .of_match_table = of_match_ptr(dw_dma_of_id_table),
.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
@ -305,3 +307,4 @@ module_exit(dw_exit);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
MODULE_ALIAS("platform:" DRV_NAME);

View file

@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
} }
/* Set bits of CONFIG register with dynamic context switching */
if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
return ret ? 0 : -ETIMEDOUT; return ret ? 0 : -ETIMEDOUT;
} }
@ -1394,9 +1398,6 @@ static int sdma_init(struct sdma_engine *sdma)
writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
/* Set bits of CONFIG register with given context switching mode */
writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
/* Initializes channel's priorities */ /* Initializes channel's priorities */
sdma_set_channel_priority(&sdma->channel[0], 7); sdma_set_channel_priority(&sdma->channel[0], 7);

View file

@ -645,6 +645,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties); &q->gart_mqd_addr, &q->properties);
if (retval != 0) { if (retval != 0) {
@ -652,7 +653,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
return retval; return retval;
} }
init_sdma_vm(dqm, q, qpd); retval = mqd->load_mqd(mqd, q->mqd, 0,
0, NULL);
if (retval != 0) {
deallocate_sdma_queue(dqm, q->sdma_id);
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
return retval;
}
return 0; return 0;
} }

View file

@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
BUG_ON(!kq || !dev); BUG_ON(!kq || !dev);
BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
pr_debug("kfd: In func %s initializing queue type %d size %d\n", pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
__func__, KFD_QUEUE_TYPE_HIQ, queue_size); __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
nop.opcode = IT_NOP; nop.opcode = IT_NOP;
@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
if (prop.doorbell_ptr == NULL) if (prop.doorbell_ptr == NULL) {
pr_err("amdkfd: error init doorbell");
goto err_get_kernel_doorbell; goto err_get_kernel_doorbell;
}
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0) if (retval != 0) {
pr_err("amdkfd: error init pq queues size (%d)\n", queue_size);
goto err_pq_allocate_vidmem; goto err_pq_allocate_vidmem;
}
kq->pq_kernel_addr = kq->pq->cpu_ptr; kq->pq_kernel_addr = kq->pq->cpu_ptr;
kq->pq_gpu_addr = kq->pq->gpu_addr; kq->pq_gpu_addr = kq->pq->gpu_addr;
@ -165,10 +169,8 @@ err_rptr_allocate_vidmem:
err_eop_allocate_vidmem: err_eop_allocate_vidmem:
kfd_gtt_sa_free(dev, kq->pq); kfd_gtt_sa_free(dev, kq->pq);
err_pq_allocate_vidmem: err_pq_allocate_vidmem:
pr_err("kfd: error init pq\n");
kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
err_get_kernel_doorbell: err_get_kernel_doorbell:
pr_err("kfd: error init doorbell");
return false; return false;
} }
@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq)
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
kfd_gtt_sa_free(kq->dev, kq->rptr_mem); kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem); kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
kq->ops_asic_specific.uninitialize(kq); kq->ops_asic_specific.uninitialize(kq);
@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
queue_address = (unsigned int *)kq->pq_kernel_addr; queue_address = (unsigned int *)kq->pq_kernel_addr;
queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
__func__, rptr, wptr, queue_address); __func__, rptr, wptr, queue_address);
available_size = (rptr - 1 - wptr + queue_size_dwords) % available_size = (rptr - 1 - wptr + queue_size_dwords) %
@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
} }
if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
pr_err("kfd: failed to init kernel queue\n"); pr_err("amdkfd: failed to init kernel queue\n");
kfree(kq); kfree(kq);
return NULL; return NULL;
} }
@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
BUG_ON(!dev); BUG_ON(!dev);
pr_err("kfd: starting kernel queue test\n"); pr_err("amdkfd: starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
BUG_ON(!kq); BUG_ON(!kq);
@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
buffer[i] = kq->nop_packet; buffer[i] = kq->nop_packet;
kq->ops.submit_packet(kq); kq->ops.submit_packet(kq);
pr_err("kfd: ending kernel queue test\n"); pr_err("amdkfd: ending kernel queue test\n");
} }

View file

@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI
config DRM_EXYNOS_DP config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support" bool "EXYNOS DRM DP driver support"
depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
default DRM_EXYNOS default DRM_EXYNOS
select DRM_PANEL select DRM_PANEL
help help

View file

@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev)
of_node_put(i80_if_timings); of_node_put(i80_if_timings);
ctx->regs = of_iomap(dev->of_node, 0); ctx->regs = of_iomap(dev->of_node, 0);
if (IS_ERR(ctx->regs)) { if (!ctx->regs) {
ret = PTR_ERR(ctx->regs); ret = -ENOMEM;
goto err_del_component; goto err_del_component;
} }

View file

@ -1,245 +0,0 @@
/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authors:
* Inki Dae <inki.dae@samsung.com>
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
#include "exynos_drm_connector.h"
#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
drm_connector)
struct exynos_drm_connector {
struct drm_connector drm_connector;
uint32_t encoder_id;
struct exynos_drm_display *display;
};
static int exynos_drm_connector_get_modes(struct drm_connector *connector)
{
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
struct exynos_drm_display *display = exynos_connector->display;
struct edid *edid = NULL;
unsigned int count = 0;
int ret;
/*
* if get_edid() exists then get_edid() callback of hdmi side
* is called to get edid data through i2c interface else
* get timing from the FIMD driver(display controller).
*
* P.S. in case of lcd panel, count is always 1 if success
* because lcd panel has only one mode.
*/
if (display->ops->get_edid) {
edid = display->ops->get_edid(display, connector);
if (IS_ERR_OR_NULL(edid)) {
ret = PTR_ERR(edid);
edid = NULL;
DRM_ERROR("Panel operation get_edid failed %d\n", ret);
goto out;
}
count = drm_add_edid_modes(connector, edid);
if (!count) {
DRM_ERROR("Add edid modes failed %d\n", count);
goto out;
}
drm_mode_connector_update_edid_property(connector, edid);
} else {
struct exynos_drm_panel_info *panel;
struct drm_display_mode *mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_ERROR("failed to create a new display mode.\n");
return 0;
}
if (display->ops->get_panel)
panel = display->ops->get_panel(display);
else {
drm_mode_destroy(connector->dev, mode);
return 0;
}
drm_display_mode_from_videomode(&panel->vm, mode);
mode->width_mm = panel->width_mm;
mode->height_mm = panel->height_mm;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
count = 1;
}
out:
kfree(edid);
return count;
}
static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
struct exynos_drm_display *display = exynos_connector->display;
int ret = MODE_BAD;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (display->ops->check_mode)
if (!display->ops->check_mode(display, mode))
ret = MODE_OK;
return ret;
}
static struct drm_encoder *exynos_drm_best_encoder(
struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
return drm_encoder_find(dev, exynos_connector->encoder_id);
}
static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
.get_modes = exynos_drm_connector_get_modes,
.mode_valid = exynos_drm_connector_mode_valid,
.best_encoder = exynos_drm_best_encoder,
};
static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
unsigned int max_width, unsigned int max_height)
{
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
struct exynos_drm_display *display = exynos_connector->display;
unsigned int width, height;
width = max_width;
height = max_height;
/*
* if specific driver want to find desired_mode using maxmum
* resolution then get max width and height from that driver.
*/
if (display->ops->get_max_resol)
display->ops->get_max_resol(display, &width, &height);
return drm_helper_probe_single_connector_modes(connector, width,
height);
}
/* get detection status of display device. */
static enum drm_connector_status
exynos_drm_connector_detect(struct drm_connector *connector, bool force)
{
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
struct exynos_drm_display *display = exynos_connector->display;
enum drm_connector_status status = connector_status_disconnected;
if (display->ops->is_connected) {
if (display->ops->is_connected(display))
status = connector_status_connected;
else
status = connector_status_disconnected;
}
return status;
}
static void exynos_drm_connector_destroy(struct drm_connector *connector)
{
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(exynos_connector);
}
static struct drm_connector_funcs exynos_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = exynos_drm_connector_fill_modes,
.detect = exynos_drm_connector_detect,
.destroy = exynos_drm_connector_destroy,
};
struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
struct drm_encoder *encoder)
{
struct exynos_drm_connector *exynos_connector;
struct exynos_drm_display *display = exynos_drm_get_display(encoder);
struct drm_connector *connector;
int type;
int err;
exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
if (!exynos_connector)
return NULL;
connector = &exynos_connector->drm_connector;
switch (display->type) {
case EXYNOS_DISPLAY_TYPE_HDMI:
type = DRM_MODE_CONNECTOR_HDMIA;
connector->interlace_allowed = true;
connector->polled = DRM_CONNECTOR_POLL_HPD;
break;
case EXYNOS_DISPLAY_TYPE_VIDI:
type = DRM_MODE_CONNECTOR_VIRTUAL;
connector->polled = DRM_CONNECTOR_POLL_HPD;
break;
default:
type = DRM_MODE_CONNECTOR_Unknown;
break;
}
drm_connector_init(dev, connector, &exynos_connector_funcs, type);
drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
err = drm_connector_register(connector);
if (err)
goto err_connector;
exynos_connector->encoder_id = encoder->base.id;
exynos_connector->display = display;
connector->dpms = DRM_MODE_DPMS_OFF;
connector->encoder = encoder;
err = drm_mode_connector_attach_encoder(connector, encoder);
if (err) {
DRM_ERROR("failed to attach a connector to a encoder\n");
goto err_sysfs;
}
DRM_DEBUG_KMS("connector has been created\n");
return connector;
err_sysfs:
drm_connector_unregister(connector);
err_connector:
drm_connector_cleanup(connector);
kfree(exynos_connector);
return NULL;
}

View file

@ -1,20 +0,0 @@
/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authors:
* Inki Dae <inki.dae@samsung.com>
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _EXYNOS_DRM_CONNECTOR_H_
#define _EXYNOS_DRM_CONNECTOR_H_
struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
struct drm_encoder *encoder);
#endif

View file

@ -284,14 +284,9 @@ static void fimd_clear_channel(struct fimd_context *ctx)
} }
} }
static int fimd_ctx_initialize(struct fimd_context *ctx, static int fimd_iommu_attach_devices(struct fimd_context *ctx,
struct drm_device *drm_dev) struct drm_device *drm_dev)
{ {
struct exynos_drm_private *priv;
priv = drm_dev->dev_private;
ctx->drm_dev = drm_dev;
ctx->pipe = priv->pipe++;
/* attach this sub driver to iommu mapping if supported. */ /* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev)) { if (is_drm_iommu_supported(ctx->drm_dev)) {
@ -313,7 +308,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx,
return 0; return 0;
} }
static void fimd_ctx_remove(struct fimd_context *ctx) static void fimd_iommu_detach_devices(struct fimd_context *ctx)
{ {
/* detach this sub driver from iommu mapping if supported. */ /* detach this sub driver from iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev)) if (is_drm_iommu_supported(ctx->drm_dev))
@ -1056,25 +1051,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
{ {
struct fimd_context *ctx = dev_get_drvdata(dev); struct fimd_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data; struct drm_device *drm_dev = data;
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret; int ret;
ret = fimd_ctx_initialize(ctx, drm_dev); ctx->drm_dev = drm_dev;
if (ret) { ctx->pipe = priv->pipe++;
DRM_ERROR("fimd_ctx_initialize failed.\n");
return ret;
}
ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
EXYNOS_DISPLAY_TYPE_LCD, EXYNOS_DISPLAY_TYPE_LCD,
&fimd_crtc_ops, ctx); &fimd_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
fimd_ctx_remove(ctx);
return PTR_ERR(ctx->crtc);
}
if (ctx->display) if (ctx->display)
exynos_drm_create_enc_conn(drm_dev, ctx->display); exynos_drm_create_enc_conn(drm_dev, ctx->display);
ret = fimd_iommu_attach_devices(ctx, drm_dev);
if (ret)
return ret;
return 0; return 0;
} }
@ -1086,10 +1079,10 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF);
fimd_iommu_detach_devices(ctx);
if (ctx->display) if (ctx->display)
exynos_dpi_remove(ctx->display); exynos_dpi_remove(ctx->display);
fimd_ctx_remove(ctx);
} }
static const struct component_ops fimd_component_ops = { static const struct component_ops fimd_component_ops = {

View file

@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane)
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc);
if (exynos_crtc->ops->win_disable) if (exynos_crtc && exynos_crtc->ops->win_disable)
exynos_crtc->ops->win_disable(exynos_crtc, exynos_crtc->ops->win_disable(exynos_crtc,
exynos_plane->zpos); exynos_plane->zpos);

View file

@ -37,6 +37,7 @@
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h> #include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
@ -2416,6 +2417,14 @@ out_unref_obj:
return false; return false;
} }
/* Update plane->state->fb to match plane->fb after driver-internal updates */
static void
update_state_fb(struct drm_plane *plane)
{
if (plane->fb != plane->state->fb)
drm_atomic_set_fb_for_plane(plane->state, plane->fb);
}
static void static void
intel_find_plane_obj(struct intel_crtc *intel_crtc, intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config) struct intel_initial_plane_config *plane_config)
@ -2462,6 +2471,8 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
break; break;
} }
} }
update_state_fb(intel_crtc->base.primary);
} }
static void i9xx_update_primary_plane(struct drm_crtc *crtc, static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@ -6602,6 +6613,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb; struct intel_framebuffer *intel_fb;
val = I915_READ(DSPCNTR(plane));
if (!(val & DISPLAY_PLANE_ENABLE))
return;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) { if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n"); DRM_DEBUG_KMS("failed to alloc fb\n");
@ -6610,8 +6625,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base; fb = &intel_fb->base;
val = I915_READ(DSPCNTR(plane));
if (INTEL_INFO(dev)->gen >= 4) if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED) if (val & DISPPLANE_TILED)
plane_config->tiling = I915_TILING_X; plane_config->tiling = I915_TILING_X;
@ -6650,6 +6663,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size); plane_config->size);
crtc->base.primary->fb = fb; crtc->base.primary->fb = fb;
update_state_fb(crtc->base.primary);
} }
static void chv_crtc_clock_get(struct intel_crtc *crtc, static void chv_crtc_clock_get(struct intel_crtc *crtc,
@ -7643,6 +7657,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base; fb = &intel_fb->base;
val = I915_READ(PLANE_CTL(pipe, 0)); val = I915_READ(PLANE_CTL(pipe, 0));
if (!(val & PLANE_CTL_ENABLE))
goto error;
if (val & PLANE_CTL_TILED_MASK) if (val & PLANE_CTL_TILED_MASK)
plane_config->tiling = I915_TILING_X; plane_config->tiling = I915_TILING_X;
@ -7687,6 +7704,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size); plane_config->size);
crtc->base.primary->fb = fb; crtc->base.primary->fb = fb;
update_state_fb(crtc->base.primary);
return; return;
error: error:
@ -7730,6 +7748,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb; struct intel_framebuffer *intel_fb;
val = I915_READ(DSPCNTR(pipe));
if (!(val & DISPLAY_PLANE_ENABLE))
return;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) { if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n"); DRM_DEBUG_KMS("failed to alloc fb\n");
@ -7738,8 +7760,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base; fb = &intel_fb->base;
val = I915_READ(DSPCNTR(pipe));
if (INTEL_INFO(dev)->gen >= 4) if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED) if (val & DISPPLANE_TILED)
plane_config->tiling = I915_TILING_X; plane_config->tiling = I915_TILING_X;
@ -7778,6 +7798,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size); plane_config->size);
crtc->base.primary->fb = fb; crtc->base.primary->fb = fb;
update_state_fb(crtc->base.primary);
} }
static bool ironlake_get_pipe_config(struct intel_crtc *crtc, static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
@ -9816,6 +9837,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
drm_gem_object_reference(&obj->base); drm_gem_object_reference(&obj->base);
crtc->primary->fb = fb; crtc->primary->fb = fb;
update_state_fb(crtc->primary);
work->pending_flip_obj = obj; work->pending_flip_obj = obj;
@ -9884,6 +9906,7 @@ cleanup_unpin:
cleanup_pending: cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count); atomic_dec(&intel_crtc->unpin_work_count);
crtc->primary->fb = old_fb; crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
@ -13718,6 +13741,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
to_intel_crtc(c)->pipe); to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb); drm_framebuffer_unreference(c->primary->fb);
c->primary->fb = NULL; c->primary->fb = NULL;
update_state_fb(c->primary);
} }
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);

View file

@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
/* switch mmio to cpu's native endianness */ /* switch mmio to cpu's native endianness */
#ifndef __BIG_ENDIAN #ifndef __BIG_ENDIAN
if (ioread32_native(map + 0x000004) != 0x00000000) if (ioread32_native(map + 0x000004) != 0x00000000) {
#else #else
if (ioread32_native(map + 0x000004) == 0x00000000) if (ioread32_native(map + 0x000004) == 0x00000000) {
#endif #endif
iowrite32_native(0x01000001, map + 0x000004); iowrite32_native(0x01000001, map + 0x000004);
ioread32_native(map);
}
/* read boot0 and strapping information */ /* read boot0 and strapping information */
boot0 = ioread32_native(map + 0x000000); boot0 = ioread32_native(map + 0x000000);

View file

@ -140,6 +140,49 @@ gm100_identify(struct nvkm_device *device)
device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass; device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass; device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
#endif
break;
case 0x126:
device->cname = "GM206";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
#if 0
/* looks to be some non-trivial changes */
device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
/* priv ring says no to 0x10eb14 writes */
device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
#endif
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
#if 0
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
#endif
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
#if 0
device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
#endif
device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
#if 0
device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
#endif #endif
break; break;
default: default:

View file

@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
{ {
struct nvkm_device *device = nv_device(subdev); struct nvkm_device *device = nv_device(subdev);
struct nv04_fifo_priv *priv = (void *)subdev; struct nv04_fifo_priv *priv = (void *)subdev;
uint32_t status, reassign; u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0);
int cnt = 0; u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask;
u32 reassign, chid, get, sem;
reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { nv_wr32(priv, NV03_PFIFO_CACHES, 0);
uint32_t chid, get;
nv_wr32(priv, NV03_PFIFO_CACHES, 0); chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); nv04_fifo_cache_error(device, priv, chid, get);
stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
nv04_fifo_cache_error(device, priv, chid, get);
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
nv04_fifo_dma_pusher(device, priv, chid);
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
if (status & NV_PFIFO_INTR_SEMAPHORE) {
uint32_t sem;
status &= ~NV_PFIFO_INTR_SEMAPHORE;
nv_wr32(priv, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_SEMAPHORE);
sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
}
if (device->card_type == NV_50) {
if (status & 0x00000010) {
status &= ~0x00000010;
nv_wr32(priv, 0x002100, 0x00000010);
}
if (status & 0x40000000) {
nv_wr32(priv, 0x002100, 0x40000000);
nvkm_fifo_uevent(&priv->base);
status &= ~0x40000000;
}
}
if (status) {
nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
status, chid);
nv_wr32(priv, NV03_PFIFO_INTR_0, status);
status = 0;
}
nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
} }
if (status) { if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
nv_error(priv, "still angry after %d spins, halt\n", cnt); nv04_fifo_dma_pusher(device, priv, chid);
nv_wr32(priv, 0x002140, 0); stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
nv_wr32(priv, 0x000140, 0);
} }
nv_wr32(priv, 0x000100, 0x00000100); if (stat & NV_PFIFO_INTR_SEMAPHORE) {
stat &= ~NV_PFIFO_INTR_SEMAPHORE;
nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
}
if (device->card_type == NV_50) {
if (stat & 0x00000010) {
stat &= ~0x00000010;
nv_wr32(priv, 0x002100, 0x00000010);
}
if (stat & 0x40000000) {
nv_wr32(priv, 0x002100, 0x40000000);
nvkm_fifo_uevent(&priv->base);
stat &= ~0x40000000;
}
}
if (stat) {
nv_warn(priv, "unknown intr 0x%08x\n", stat);
nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
nv_wr32(priv, NV03_PFIFO_INTR_0, stat);
}
nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
} }
static int static int

View file

@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info)
const int s = 8; const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b); mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b); mmio_refn(info, 0x418808, 0x00000000, s, b);
mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
} }
void void

View file

@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info)
const int s = 8; const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b); mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b); mmio_refn(info, 0x418808, 0x00000000, s, b);
mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
} }

View file

@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
const int s = 8; const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b); mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_refn(info, 0x418e24, 0x00000000, s, b); mmio_refn(info, 0x418e24, 0x00000000, s, b);
mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
} }

View file

@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
if (ent) { if (ent) {
if (ver >= 0x41) { if (ver >= 0x41) {
if (!(nv_ro32(bios, ent) & 0x80000000)) u32 ent_value = nv_ro32(bios, ent);
u8 i2c_port = (ent_value >> 27) & 0x1f;
u8 dpaux_port = (ent_value >> 22) & 0x1f;
/* value 0x1f means unused according to DCB 4.x spec */
if (i2c_port == 0x1f && dpaux_port == 0x1f)
info->type = DCB_I2C_UNUSED; info->type = DCB_I2C_UNUSED;
else else
info->type = DCB_I2C_PMGR; info->type = DCB_I2C_PMGR;

View file

@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
.compute_vmid_bitmap = 0xFF00, .compute_vmid_bitmap = 0xFF00,
.first_compute_pipe = 1, .first_compute_pipe = 1,
.compute_pipe_count = 8 - 1, .compute_pipe_count = 4 - 1,
}; };
radeon_doorbell_get_kfd_info(rdev, radeon_doorbell_get_kfd_info(rdev,

View file

@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
else else
rbo->placements[i].lpfn = 0; rbo->placements[i].lpfn = 0;
} }
/*
* Use two-ended allocation depending on the buffer size to
* improve fragmentation quality.
* 512kb was measured as the most optimal number.
*/
if (rbo->tbo.mem.size > 512 * 1024) {
for (i = 0; i < c; i++) {
rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
}
}
} }
int radeon_bo_create(struct radeon_device *rdev, int radeon_bo_create(struct radeon_device *rdev,

View file

@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
struct request_queue *q = bdev_get_queue(where->bdev); struct request_queue *q = bdev_get_queue(where->bdev);
unsigned short logical_block_size = queue_logical_block_size(q); unsigned short logical_block_size = queue_logical_block_size(q);
sector_t num_sectors; sector_t num_sectors;
unsigned int uninitialized_var(special_cmd_max_sectors);
/* Reject unsupported discard requests */ /*
if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { * Reject unsupported discard and write same requests.
*/
if (rw & REQ_DISCARD)
special_cmd_max_sectors = q->limits.max_discard_sectors;
else if (rw & REQ_WRITE_SAME)
special_cmd_max_sectors = q->limits.max_write_same_sectors;
if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
dec_count(io, region, -EOPNOTSUPP); dec_count(io, region, -EOPNOTSUPP);
return; return;
} }
@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
store_io_and_region_in_bio(bio, io, region); store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) { if (rw & REQ_DISCARD) {
num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors; remaining -= num_sectors;
} else if (rw & REQ_WRITE_SAME) { } else if (rw & REQ_WRITE_SAME) {
@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
*/ */
dp->get_page(dp, &page, &len, &offset); dp->get_page(dp, &page, &len, &offset);
bio_add_page(bio, page, logical_block_size, offset); bio_add_page(bio, page, logical_block_size, offset);
num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
offset = 0; offset = 0;

View file

@ -20,6 +20,8 @@
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/dm-kcopyd.h> #include <linux/dm-kcopyd.h>
#include "dm.h"
#include "dm-exception-store.h" #include "dm-exception-store.h"
#define DM_MSG_PREFIX "snapshots" #define DM_MSG_PREFIX "snapshots"
@ -290,6 +292,16 @@ struct origin {
struct list_head snapshots; struct list_head snapshots;
}; };
/*
* This structure is allocated for each origin target
*/
struct dm_origin {
struct dm_dev *dev;
struct dm_target *ti;
unsigned split_boundary;
struct list_head hash_list;
};
/* /*
* Size of the hash table for origin volumes. If we make this * Size of the hash table for origin volumes. If we make this
* the size of the minors list then it should be nearly perfect * the size of the minors list then it should be nearly perfect
@ -297,6 +309,7 @@ struct origin {
#define ORIGIN_HASH_SIZE 256 #define ORIGIN_HASH_SIZE 256
#define ORIGIN_MASK 0xFF #define ORIGIN_MASK 0xFF
static struct list_head *_origins; static struct list_head *_origins;
static struct list_head *_dm_origins;
static struct rw_semaphore _origins_lock; static struct rw_semaphore _origins_lock;
static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
@ -310,12 +323,22 @@ static int init_origin_hash(void)
_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
GFP_KERNEL); GFP_KERNEL);
if (!_origins) { if (!_origins) {
DMERR("unable to allocate memory"); DMERR("unable to allocate memory for _origins");
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < ORIGIN_HASH_SIZE; i++) for (i = 0; i < ORIGIN_HASH_SIZE; i++)
INIT_LIST_HEAD(_origins + i); INIT_LIST_HEAD(_origins + i);
_dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
GFP_KERNEL);
if (!_dm_origins) {
DMERR("unable to allocate memory for _dm_origins");
kfree(_origins);
return -ENOMEM;
}
for (i = 0; i < ORIGIN_HASH_SIZE; i++)
INIT_LIST_HEAD(_dm_origins + i);
init_rwsem(&_origins_lock); init_rwsem(&_origins_lock);
return 0; return 0;
@ -324,6 +347,7 @@ static int init_origin_hash(void)
static void exit_origin_hash(void) static void exit_origin_hash(void)
{ {
kfree(_origins); kfree(_origins);
kfree(_dm_origins);
} }
static unsigned origin_hash(struct block_device *bdev) static unsigned origin_hash(struct block_device *bdev)
@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o)
list_add_tail(&o->hash_list, sl); list_add_tail(&o->hash_list, sl);
} }
static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
{
struct list_head *ol;
struct dm_origin *o;
ol = &_dm_origins[origin_hash(origin)];
list_for_each_entry (o, ol, hash_list)
if (bdev_equal(o->dev->bdev, origin))
return o;
return NULL;
}
static void __insert_dm_origin(struct dm_origin *o)
{
struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
list_add_tail(&o->hash_list, sl);
}
static void __remove_dm_origin(struct dm_origin *o)
{
list_del(&o->hash_list);
}
/* /*
* _origins_lock must be held when calling this function. * _origins_lock must be held when calling this function.
* Returns number of snapshots registered using the supplied cow device, plus: * Returns number of snapshots registered using the supplied cow device, plus:
@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti)
static void snapshot_resume(struct dm_target *ti) static void snapshot_resume(struct dm_target *ti)
{ {
struct dm_snapshot *s = ti->private; struct dm_snapshot *s = ti->private;
struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
struct dm_origin *o;
struct mapped_device *origin_md = NULL;
bool must_restart_merging = false;
down_read(&_origins_lock); down_read(&_origins_lock);
o = __lookup_dm_origin(s->origin->bdev);
if (o)
origin_md = dm_table_get_md(o->ti->table);
if (!origin_md) {
(void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
if (snap_merging)
origin_md = dm_table_get_md(snap_merging->ti->table);
}
if (origin_md == dm_table_get_md(ti->table))
origin_md = NULL;
if (origin_md) {
if (dm_hold(origin_md))
origin_md = NULL;
}
up_read(&_origins_lock);
if (origin_md) {
dm_internal_suspend_fast(origin_md);
if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
must_restart_merging = true;
stop_merge(snap_merging);
}
}
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) { if (snap_src && snap_dest) {
down_write(&snap_src->lock); down_write(&snap_src->lock);
@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti)
up_write(&snap_dest->lock); up_write(&snap_dest->lock);
up_write(&snap_src->lock); up_write(&snap_src->lock);
} }
up_read(&_origins_lock); up_read(&_origins_lock);
if (origin_md) {
if (must_restart_merging)
start_merge(snap_merging);
dm_internal_resume_fast(origin_md);
dm_put(origin_md);
}
/* Now we have correct chunk size, reregister */ /* Now we have correct chunk size, reregister */
reregister_snapshot(s); reregister_snapshot(s);
@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
* Origin: maps a linear range of a device, with hooks for snapshotting. * Origin: maps a linear range of a device, with hooks for snapshotting.
*/ */
struct dm_origin {
struct dm_dev *dev;
unsigned split_boundary;
};
/* /*
* Construct an origin mapping: <dev_path> * Construct an origin mapping: <dev_path>
* The context for an origin is merely a 'struct dm_dev *' * The context for an origin is merely a 'struct dm_dev *'
@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_open; goto bad_open;
} }
o->ti = ti;
ti->private = o; ti->private = o;
ti->num_flush_bios = 1; ti->num_flush_bios = 1;
@ -2180,6 +2263,7 @@ bad_alloc:
static void origin_dtr(struct dm_target *ti) static void origin_dtr(struct dm_target *ti)
{ {
struct dm_origin *o = ti->private; struct dm_origin *o = ti->private;
dm_put_device(ti, o->dev); dm_put_device(ti, o->dev);
kfree(o); kfree(o);
} }
@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti)
struct dm_origin *o = ti->private; struct dm_origin *o = ti->private;
o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
down_write(&_origins_lock);
__insert_dm_origin(o);
up_write(&_origins_lock);
}
static void origin_postsuspend(struct dm_target *ti)
{
struct dm_origin *o = ti->private;
down_write(&_origins_lock);
__remove_dm_origin(o);
up_write(&_origins_lock);
} }
static void origin_status(struct dm_target *ti, status_type_t type, static void origin_status(struct dm_target *ti, status_type_t type,
@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti,
static struct target_type origin_target = { static struct target_type origin_target = {
.name = "snapshot-origin", .name = "snapshot-origin",
.version = {1, 8, 1}, .version = {1, 9, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = origin_ctr, .ctr = origin_ctr,
.dtr = origin_dtr, .dtr = origin_dtr,
.map = origin_map, .map = origin_map,
.resume = origin_resume, .resume = origin_resume,
.postsuspend = origin_postsuspend,
.status = origin_status, .status = origin_status,
.merge = origin_merge, .merge = origin_merge,
.iterate_devices = origin_iterate_devices, .iterate_devices = origin_iterate_devices,
@ -2271,7 +2369,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = { static struct target_type snapshot_target = {
.name = "snapshot", .name = "snapshot",
.version = {1, 12, 0}, .version = {1, 13, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = snapshot_ctr, .ctr = snapshot_ctr,
.dtr = snapshot_dtr, .dtr = snapshot_dtr,
@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = {
static struct target_type merge_target = { static struct target_type merge_target = {
.name = dm_snapshot_merge_target_name, .name = dm_snapshot_merge_target_name,
.version = {1, 2, 0}, .version = {1, 3, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = snapshot_ctr, .ctr = snapshot_ctr,
.dtr = snapshot_dtr, .dtr = snapshot_dtr,

View file

@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
case -ENODATA: case -ENODATA:
if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
/*
* This block isn't provisioned, and we have no way
* of doing so.
*/
handle_unserviceable_bio(tc->pool, bio);
cell_defer_no_holder(tc, virt_cell);
return DM_MAPIO_SUBMITTED;
}
/* fall through */
case -EWOULDBLOCK: case -EWOULDBLOCK:
thin_defer_cell(tc, virt_cell); thin_defer_cell(tc, virt_cell);
return DM_MAPIO_SUBMITTED; return DM_MAPIO_SUBMITTED;

View file

@ -2616,6 +2616,19 @@ void dm_get(struct mapped_device *md)
BUG_ON(test_bit(DMF_FREEING, &md->flags)); BUG_ON(test_bit(DMF_FREEING, &md->flags));
} }
int dm_hold(struct mapped_device *md)
{
spin_lock(&_minor_lock);
if (test_bit(DMF_FREEING, &md->flags)) {
spin_unlock(&_minor_lock);
return -EBUSY;
}
dm_get(md);
spin_unlock(&_minor_lock);
return 0;
}
EXPORT_SYMBOL_GPL(dm_hold);
const char *dm_device_name(struct mapped_device *md) const char *dm_device_name(struct mapped_device *md)
{ {
return md->name; return md->name;
@ -2638,10 +2651,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
if (dm_request_based(md)) if (dm_request_based(md))
flush_kthread_worker(&md->kworker); flush_kthread_worker(&md->kworker);
/*
* Take suspend_lock so that presuspend and postsuspend methods
* do not race with internal suspend.
*/
mutex_lock(&md->suspend_lock);
if (!dm_suspended_md(md)) { if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map); dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map); dm_table_postsuspend_targets(map);
} }
mutex_unlock(&md->suspend_lock);
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */ /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
dm_put_live_table(md, srcu_idx); dm_put_live_table(md, srcu_idx);
@ -3115,6 +3134,7 @@ void dm_internal_suspend_fast(struct mapped_device *md)
flush_workqueue(md->wq); flush_workqueue(md->wq);
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
} }
EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
void dm_internal_resume_fast(struct mapped_device *md) void dm_internal_resume_fast(struct mapped_device *md)
{ {
@ -3126,6 +3146,7 @@ void dm_internal_resume_fast(struct mapped_device *md)
done: done:
mutex_unlock(&md->suspend_lock); mutex_unlock(&md->suspend_lock);
} }
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
/*----------------------------------------------------------------- /*-----------------------------------------------------------------
* Event notification. * Event notification.

View file

@ -5080,7 +5080,8 @@ int md_run(struct mddev *mddev)
} }
if (err) { if (err) {
mddev_detach(mddev); mddev_detach(mddev);
pers->free(mddev, mddev->private); if (mddev->private)
pers->free(mddev, mddev->private);
module_put(pers->owner); module_put(pers->owner);
bitmap_destroy(mddev); bitmap_destroy(mddev);
return err; return err;

View file

@ -467,8 +467,6 @@ static int raid0_run(struct mddev *mddev)
dump_zones(mddev); dump_zones(mddev);
ret = md_integrity_register(mddev); ret = md_integrity_register(mddev);
if (ret)
raid0_free(mddev, conf);
return ret; return ret;
} }

View file

@ -425,9 +425,10 @@ retry:
ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
pnum, vol_id, lnum); pnum, vol_id, lnum);
err = -EBADMSG; err = -EBADMSG;
} else } else {
err = -EINVAL; err = -EINVAL;
ubi_ro_mode(ubi); ubi_ro_mode(ubi);
}
} }
goto out_free; goto out_free;
} else if (err == UBI_IO_BITFLIPS) } else if (err == UBI_IO_BITFLIPS)

View file

@ -46,8 +46,7 @@ enum cx82310_status {
}; };
#define CMD_PACKET_SIZE 64 #define CMD_PACKET_SIZE 64
/* first command after power on can take around 8 seconds */ #define CMD_TIMEOUT 100
#define CMD_TIMEOUT 15000
#define CMD_REPLY_RETRY 5 #define CMD_REPLY_RETRY 5
#define CX82310_MTU 1514 #define CX82310_MTU 1514
@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
if (ret < 0) { if (ret < 0) {
dev_err(&dev->udev->dev, "send command %#x: error %d\n", if (cmd != CMD_GET_LINK_STATUS)
cmd, ret); dev_err(&dev->udev->dev, "send command %#x: error %d\n",
cmd, ret);
goto end; goto end;
} }
@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
buf, CMD_PACKET_SIZE, &actual_len, buf, CMD_PACKET_SIZE, &actual_len,
CMD_TIMEOUT); CMD_TIMEOUT);
if (ret < 0) { if (ret < 0) {
dev_err(&dev->udev->dev, if (cmd != CMD_GET_LINK_STATUS)
"reply receive error %d\n", ret); dev_err(&dev->udev->dev,
"reply receive error %d\n",
ret);
goto end; goto end;
} }
if (actual_len > 0) if (actual_len > 0)
@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
int ret; int ret;
char buf[15]; char buf[15];
struct usb_device *udev = dev->udev; struct usb_device *udev = dev->udev;
u8 link[3];
int timeout = 50;
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
if (!dev->partial_data) if (!dev->partial_data)
return -ENOMEM; return -ENOMEM;
/* wait for firmware to become ready (indicated by the link being up) */
while (--timeout) {
ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
link, sizeof(link));
/* the command can time out during boot - it's not an error */
if (!ret && link[0] == 1 && link[2] == 1)
break;
msleep(500);
};
if (!timeout) {
dev_err(&udev->dev, "firmware not ready in time\n");
return -ETIMEDOUT;
}
/* enable ethernet mode (?) */ /* enable ethernet mode (?) */
ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
if (ret) { if (ret) {

View file

@ -715,13 +715,8 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
{ {
struct device_node *child; struct device_node *child;
int len; int len;
const char *end;
end = strchr(path, ':'); len = strcspn(path, "/:");
if (!end)
end = strchrnul(path, '/');
len = end - path;
if (!len) if (!len)
return NULL; return NULL;
@ -1893,10 +1888,8 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
name = of_get_property(of_chosen, "linux,stdout-path", NULL); name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (IS_ENABLED(CONFIG_PPC) && !name) if (IS_ENABLED(CONFIG_PPC) && !name)
name = of_get_property(of_aliases, "stdout", NULL); name = of_get_property(of_aliases, "stdout", NULL);
if (name) { if (name)
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
add_preferred_console("stdout-path", 0, NULL);
}
} }
if (!of_aliases) if (!of_aliases)

View file

@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
struct device_node *p; struct device_node *p;
const __be32 *intspec, *tmp, *addr; const __be32 *intspec, *tmp, *addr;
u32 intsize, intlen; u32 intsize, intlen;
int i, res = -EINVAL; int i, res;
pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);
@ -323,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
/* Get size of interrupt specifier */ /* Get size of interrupt specifier */
tmp = of_get_property(p, "#interrupt-cells", NULL); tmp = of_get_property(p, "#interrupt-cells", NULL);
if (tmp == NULL) if (tmp == NULL) {
res = -EINVAL;
goto out; goto out;
}
intsize = be32_to_cpu(*tmp); intsize = be32_to_cpu(*tmp);
pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);
/* Check index */ /* Check index */
if ((index + 1) * intsize > intlen) if ((index + 1) * intsize > intlen) {
res = -EINVAL;
goto out; goto out;
}
/* Copy intspec into irq structure */ /* Copy intspec into irq structure */
intspec += index * intsize; intspec += index * intsize;

View file

@ -97,6 +97,11 @@ static void __init of_selftest_find_node_by_name(void)
"option path test, subcase #1 failed\n"); "option path test, subcase #1 failed\n");
of_node_put(np); of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options);
selftest(np && !strcmp("test/option", options),
"option path test, subcase #2 failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
selftest(np, "NULL option path test failed\n"); selftest(np, "NULL option path test failed\n");
of_node_put(np); of_node_put(np);

View file

@ -69,8 +69,7 @@ config YENTA
tristate "CardBus yenta-compatible bridge support" tristate "CardBus yenta-compatible bridge support"
depends on PCI depends on PCI
select CARDBUS if !EXPERT select CARDBUS if !EXPERT
select PCCARD_NONSTATIC if PCMCIA != n && ISA select PCCARD_NONSTATIC if PCMCIA != n
select PCCARD_PCI if PCMCIA !=n && !ISA
---help--- ---help---
This option enables support for CardBus host bridges. Virtually This option enables support for CardBus host bridges. Virtually
all modern PCMCIA bridges are CardBus compatible. A "bridge" is all modern PCMCIA bridges are CardBus compatible. A "bridge" is
@ -110,8 +109,7 @@ config YENTA_TOSHIBA
config PD6729 config PD6729
tristate "Cirrus PD6729 compatible bridge support" tristate "Cirrus PD6729 compatible bridge support"
depends on PCMCIA && PCI depends on PCMCIA && PCI
select PCCARD_NONSTATIC if PCMCIA != n && ISA select PCCARD_NONSTATIC
select PCCARD_PCI if PCMCIA !=n && !ISA
help help
This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge
device, found in some older laptops and PCMCIA card readers. device, found in some older laptops and PCMCIA card readers.
@ -119,8 +117,7 @@ config PD6729
config I82092 config I82092
tristate "i82092 compatible bridge support" tristate "i82092 compatible bridge support"
depends on PCMCIA && PCI depends on PCMCIA && PCI
select PCCARD_NONSTATIC if PCMCIA != n && ISA select PCCARD_NONSTATIC
select PCCARD_PCI if PCMCIA !=n && !ISA
help help
This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device,
found in some older laptops and more commonly in evaluation boards for the found in some older laptops and more commonly in evaluation boards for the
@ -291,9 +288,6 @@ config ELECTRA_CF
Say Y here to support the CompactFlash controller on the Say Y here to support the CompactFlash controller on the
PA Semi Electra eval board. PA Semi Electra eval board.
config PCCARD_PCI
bool
config PCCARD_NONSTATIC config PCCARD_NONSTATIC
bool bool

View file

@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o
pcmcia_rsrc-y += rsrc_mgr.o pcmcia_rsrc-y += rsrc_mgr.o
pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
pcmcia_rsrc-$(CONFIG_PCCARD_PCI) += rsrc_pci.o
obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o

View file

@ -1,173 +0,0 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
#include "cs_internal.h"
struct pcmcia_align_data {
unsigned long mask;
unsigned long offset;
};
static resource_size_t pcmcia_align(void *align_data,
const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pcmcia_align_data *data = align_data;
resource_size_t start;
start = (res->start & ~data->mask) + data->offset;
if (start < res->start)
start += data->mask + 1;
return start;
}
static struct resource *find_io_region(struct pcmcia_socket *s,
unsigned long base, int num,
unsigned long align)
{
struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
dev_name(&s->dev));
struct pcmcia_align_data data;
int ret;
data.mask = align - 1;
data.offset = base & data.mask;
ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
base, 0, pcmcia_align, &data);
if (ret != 0) {
kfree(res);
res = NULL;
}
return res;
}
static int res_pci_find_io(struct pcmcia_socket *s, unsigned int attr,
unsigned int *base, unsigned int num,
unsigned int align, struct resource **parent)
{
int i, ret = 0;
/* Check for an already-allocated window that must conflict with
* what was asked for. It is a hack because it does not catch all
* potential conflicts, just the most obvious ones.
*/
for (i = 0; i < MAX_IO_WIN; i++) {
if (!s->io[i].res)
continue;
if (!*base)
continue;
if ((s->io[i].res->start & (align-1)) == *base)
return -EBUSY;
}
for (i = 0; i < MAX_IO_WIN; i++) {
struct resource *res = s->io[i].res;
unsigned int try;
if (res && (res->flags & IORESOURCE_BITS) !=
(attr & IORESOURCE_BITS))
continue;
if (!res) {
if (align == 0)
align = 0x10000;
res = s->io[i].res = find_io_region(s, *base, num,
align);
if (!res)
return -EINVAL;
*base = res->start;
s->io[i].res->flags =
((res->flags & ~IORESOURCE_BITS) |
(attr & IORESOURCE_BITS));
s->io[i].InUse = num;
*parent = res;
return 0;
}
/* Try to extend top of window */
try = res->end + 1;
if ((*base == 0) || (*base == try)) {
ret = adjust_resource(s->io[i].res, res->start,
resource_size(res) + num);
if (ret)
continue;
*base = try;
s->io[i].InUse += num;
*parent = res;
return 0;
}
/* Try to extend bottom of window */
try = res->start - num;
if ((*base == 0) || (*base == try)) {
ret = adjust_resource(s->io[i].res,
res->start - num,
resource_size(res) + num);
if (ret)
continue;
*base = try;
s->io[i].InUse += num;
*parent = res;
return 0;
}
}
return -EINVAL;
}
static struct resource *res_pci_find_mem(u_long base, u_long num,
u_long align, int low, struct pcmcia_socket *s)
{
struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM,
dev_name(&s->dev));
struct pcmcia_align_data data;
unsigned long min;
int ret;
if (align < 0x20000)
align = 0x20000;
data.mask = align - 1;
data.offset = base & data.mask;
min = 0;
if (!low)
min = 0x100000UL;
ret = pci_bus_alloc_resource(s->cb_dev->bus,
res, num, 1, min, 0,
pcmcia_align, &data);
if (ret != 0) {
kfree(res);
res = NULL;
}
return res;
}
static int res_pci_init(struct pcmcia_socket *s)
{
if (!s->cb_dev || !(s->features & SS_CAP_PAGE_REGS)) {
dev_err(&s->dev, "not supported by res_pci\n");
return -EOPNOTSUPP;
}
return 0;
}
struct pccard_resource_ops pccard_nonstatic_ops = {
.validate_mem = NULL,
.find_io = res_pci_find_io,
.find_mem = res_pci_find_mem,
.init = res_pci_init,
.exit = NULL,
};
EXPORT_SYMBOL(pccard_nonstatic_ops);

View file

@ -37,7 +37,7 @@ static int armada375_usb_phy_init(struct phy *phy)
struct armada375_cluster_phy *cluster_phy; struct armada375_cluster_phy *cluster_phy;
u32 reg; u32 reg;
cluster_phy = dev_get_drvdata(phy->dev.parent); cluster_phy = phy_get_drvdata(phy);
if (!cluster_phy) if (!cluster_phy)
return -ENODEV; return -ENODEV;
@ -131,6 +131,7 @@ static int armada375_usb_phy_probe(struct platform_device *pdev)
cluster_phy->reg = usb_cluster_base; cluster_phy->reg = usb_cluster_base;
dev_set_drvdata(dev, cluster_phy); dev_set_drvdata(dev, cluster_phy);
phy_set_drvdata(phy, cluster_phy);
phy_provider = devm_of_phy_provider_register(&pdev->dev, phy_provider = devm_of_phy_provider_register(&pdev->dev,
armada375_usb_phy_xlate); armada375_usb_phy_xlate);

View file

@ -52,7 +52,9 @@ static void devm_phy_consume(struct device *dev, void *res)
static int devm_phy_match(struct device *dev, void *res, void *match_data) static int devm_phy_match(struct device *dev, void *res, void *match_data)
{ {
return res == match_data; struct phy **phy = res;
return *phy == match_data;
} }
/** /**
@ -223,6 +225,7 @@ int phy_init(struct phy *phy)
ret = phy_pm_runtime_get_sync(phy); ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP) if (ret < 0 && ret != -ENOTSUPP)
return ret; return ret;
ret = 0; /* Override possible ret == -ENOTSUPP */
mutex_lock(&phy->mutex); mutex_lock(&phy->mutex);
if (phy->init_count == 0 && phy->ops->init) { if (phy->init_count == 0 && phy->ops->init) {
@ -231,8 +234,6 @@ int phy_init(struct phy *phy)
dev_err(&phy->dev, "phy init failed --> %d\n", ret); dev_err(&phy->dev, "phy init failed --> %d\n", ret);
goto out; goto out;
} }
} else {
ret = 0; /* Override possible ret == -ENOTSUPP */
} }
++phy->init_count; ++phy->init_count;
@ -253,6 +254,7 @@ int phy_exit(struct phy *phy)
ret = phy_pm_runtime_get_sync(phy); ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP) if (ret < 0 && ret != -ENOTSUPP)
return ret; return ret;
ret = 0; /* Override possible ret == -ENOTSUPP */
mutex_lock(&phy->mutex); mutex_lock(&phy->mutex);
if (phy->init_count == 1 && phy->ops->exit) { if (phy->init_count == 1 && phy->ops->exit) {
@ -287,6 +289,7 @@ int phy_power_on(struct phy *phy)
ret = phy_pm_runtime_get_sync(phy); ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP) if (ret < 0 && ret != -ENOTSUPP)
return ret; return ret;
ret = 0; /* Override possible ret == -ENOTSUPP */
mutex_lock(&phy->mutex); mutex_lock(&phy->mutex);
if (phy->power_count == 0 && phy->ops->power_on) { if (phy->power_count == 0 && phy->ops->power_on) {
@ -295,8 +298,6 @@ int phy_power_on(struct phy *phy)
dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
goto out; goto out;
} }
} else {
ret = 0; /* Override possible ret == -ENOTSUPP */
} }
++phy->power_count; ++phy->power_count;
mutex_unlock(&phy->mutex); mutex_unlock(&phy->mutex);

View file

@ -30,28 +30,13 @@ struct exynos_dp_video_phy {
const struct exynos_dp_video_phy_drvdata *drvdata; const struct exynos_dp_video_phy_drvdata *drvdata;
}; };
static void exynos_dp_video_phy_pwr_isol(struct exynos_dp_video_phy *state,
unsigned int on)
{
unsigned int val;
if (IS_ERR(state->regs))
return;
val = on ? 0 : EXYNOS5_PHY_ENABLE;
regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
EXYNOS5_PHY_ENABLE, val);
}
static int exynos_dp_video_phy_power_on(struct phy *phy) static int exynos_dp_video_phy_power_on(struct phy *phy)
{ {
struct exynos_dp_video_phy *state = phy_get_drvdata(phy); struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
/* Disable power isolation on DP-PHY */ /* Disable power isolation on DP-PHY */
exynos_dp_video_phy_pwr_isol(state, 0); return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
EXYNOS5_PHY_ENABLE, EXYNOS5_PHY_ENABLE);
return 0;
} }
static int exynos_dp_video_phy_power_off(struct phy *phy) static int exynos_dp_video_phy_power_off(struct phy *phy)
@ -59,9 +44,8 @@ static int exynos_dp_video_phy_power_off(struct phy *phy)
struct exynos_dp_video_phy *state = phy_get_drvdata(phy); struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
/* Enable power isolation on DP-PHY */ /* Enable power isolation on DP-PHY */
exynos_dp_video_phy_pwr_isol(state, 1); return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
EXYNOS5_PHY_ENABLE, 0);
return 0;
} }
static struct phy_ops exynos_dp_video_phy_ops = { static struct phy_ops exynos_dp_video_phy_ops = {

View file

@ -43,7 +43,6 @@ struct exynos_mipi_video_phy {
} phys[EXYNOS_MIPI_PHYS_NUM]; } phys[EXYNOS_MIPI_PHYS_NUM];
spinlock_t slock; spinlock_t slock;
void __iomem *regs; void __iomem *regs;
struct mutex mutex;
struct regmap *regmap; struct regmap *regmap;
}; };
@ -59,8 +58,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
else else
reset = EXYNOS4_MIPI_PHY_SRESETN; reset = EXYNOS4_MIPI_PHY_SRESETN;
if (state->regmap) { spin_lock(&state->slock);
mutex_lock(&state->mutex);
if (!IS_ERR(state->regmap)) {
regmap_read(state->regmap, offset, &val); regmap_read(state->regmap, offset, &val);
if (on) if (on)
val |= reset; val |= reset;
@ -72,11 +72,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK))
val &= ~EXYNOS4_MIPI_PHY_ENABLE; val &= ~EXYNOS4_MIPI_PHY_ENABLE;
regmap_write(state->regmap, offset, val); regmap_write(state->regmap, offset, val);
mutex_unlock(&state->mutex);
} else { } else {
addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2);
spin_lock(&state->slock);
val = readl(addr); val = readl(addr);
if (on) if (on)
val |= reset; val |= reset;
@ -90,9 +88,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
val &= ~EXYNOS4_MIPI_PHY_ENABLE; val &= ~EXYNOS4_MIPI_PHY_ENABLE;
writel(val, addr); writel(val, addr);
spin_unlock(&state->slock);
} }
spin_unlock(&state->slock);
return 0; return 0;
} }
@ -158,7 +156,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, state); dev_set_drvdata(dev, state);
spin_lock_init(&state->slock); spin_lock_init(&state->slock);
mutex_init(&state->mutex);
for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
struct phy *phy = devm_phy_create(dev, NULL, struct phy *phy = devm_phy_create(dev, NULL,

View file

@ -250,7 +250,6 @@ static const struct samsung_usb2_common_phy exynos4210_phys[] = {
.power_on = exynos4210_power_on, .power_on = exynos4210_power_on,
.power_off = exynos4210_power_off, .power_off = exynos4210_power_off,
}, },
{},
}; };
const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = {

View file

@ -361,7 +361,6 @@ static const struct samsung_usb2_common_phy exynos4x12_phys[] = {
.power_on = exynos4x12_power_on, .power_on = exynos4x12_power_on,
.power_off = exynos4x12_power_off, .power_off = exynos4x12_power_off,
}, },
{},
}; };
const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = {

View file

@ -531,7 +531,7 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev,
{ {
struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev);
if (WARN_ON(args->args[0] > EXYNOS5_DRDPHYS_NUM)) if (WARN_ON(args->args[0] >= EXYNOS5_DRDPHYS_NUM))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
return phy_drd->phys[args->args[0]].phy; return phy_drd->phys[args->args[0]].phy;

View file

@ -391,7 +391,6 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = {
.power_on = exynos5250_power_on, .power_on = exynos5250_power_on,
.power_off = exynos5250_power_off, .power_off = exynos5250_power_off,
}, },
{},
}; };
const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = {

View file

@ -147,6 +147,9 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
priv->base = devm_ioremap(dev, res->start, resource_size(res)); priv->base = devm_ioremap(dev, res->start, resource_size(res));
if (!priv->base) if (!priv->base)
return -ENOMEM; return -ENOMEM;

View file

@ -228,6 +228,7 @@ struct miphy28lp_dev {
struct regmap *regmap; struct regmap *regmap;
struct mutex miphy_mutex; struct mutex miphy_mutex;
struct miphy28lp_phy **phys; struct miphy28lp_phy **phys;
int nphys;
}; };
struct miphy_initval { struct miphy_initval {
@ -1116,7 +1117,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
for (index = 0; index < of_get_child_count(dev->of_node); index++) for (index = 0; index < miphy_dev->nphys; index++)
if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
miphy_phy = miphy_dev->phys[index]; miphy_phy = miphy_dev->phys[index];
break; break;
@ -1138,6 +1139,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
static struct phy_ops miphy28lp_ops = { static struct phy_ops miphy28lp_ops = {
.init = miphy28lp_init, .init = miphy28lp_init,
.owner = THIS_MODULE,
}; };
static int miphy28lp_probe_resets(struct device_node *node, static int miphy28lp_probe_resets(struct device_node *node,
@ -1200,16 +1202,15 @@ static int miphy28lp_probe(struct platform_device *pdev)
struct miphy28lp_dev *miphy_dev; struct miphy28lp_dev *miphy_dev;
struct phy_provider *provider; struct phy_provider *provider;
struct phy *phy; struct phy *phy;
int chancount, port = 0; int ret, port = 0;
int ret;
miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
if (!miphy_dev) if (!miphy_dev)
return -ENOMEM; return -ENOMEM;
chancount = of_get_child_count(np); miphy_dev->nphys = of_get_child_count(np);
miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
GFP_KERNEL); sizeof(*miphy_dev->phys), GFP_KERNEL);
if (!miphy_dev->phys) if (!miphy_dev->phys)
return -ENOMEM; return -ENOMEM;

View file

@ -150,6 +150,7 @@ struct miphy365x_dev {
struct regmap *regmap; struct regmap *regmap;
struct mutex miphy_mutex; struct mutex miphy_mutex;
struct miphy365x_phy **phys; struct miphy365x_phy **phys;
int nphys;
}; };
/* /*
@ -485,7 +486,7 @@ static struct phy *miphy365x_xlate(struct device *dev,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
for (index = 0; index < of_get_child_count(dev->of_node); index++) for (index = 0; index < miphy_dev->nphys; index++)
if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
miphy_phy = miphy_dev->phys[index]; miphy_phy = miphy_dev->phys[index];
break; break;
@ -541,16 +542,15 @@ static int miphy365x_probe(struct platform_device *pdev)
struct miphy365x_dev *miphy_dev; struct miphy365x_dev *miphy_dev;
struct phy_provider *provider; struct phy_provider *provider;
struct phy *phy; struct phy *phy;
int chancount, port = 0; int ret, port = 0;
int ret;
miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
if (!miphy_dev) if (!miphy_dev)
return -ENOMEM; return -ENOMEM;
chancount = of_get_child_count(np); miphy_dev->nphys = of_get_child_count(np);
miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
GFP_KERNEL); sizeof(*miphy_dev->phys), GFP_KERNEL);
if (!miphy_dev->phys) if (!miphy_dev->phys)
return -ENOMEM; return -ENOMEM;

View file

@ -360,7 +360,7 @@ static void __exit omap_control_phy_exit(void)
} }
module_exit(omap_control_phy_exit); module_exit(omap_control_phy_exit);
MODULE_ALIAS("platform: omap_control_phy"); MODULE_ALIAS("platform:omap_control_phy");
MODULE_AUTHOR("Texas Instruments Inc."); MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); MODULE_DESCRIPTION("OMAP Control Module PHY Driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");

View file

@ -296,10 +296,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, dev_warn(&pdev->dev,
"found usb_otg_ss_refclk960m, please fix DTS\n"); "found usb_otg_ss_refclk960m, please fix DTS\n");
} }
} else {
clk_prepare(phy->optclk);
} }
if (!IS_ERR(phy->optclk))
clk_prepare(phy->optclk);
usb_add_phy_dev(&phy->phy); usb_add_phy_dev(&phy->phy);
return 0; return 0;
@ -383,7 +384,7 @@ static struct platform_driver omap_usb2_driver = {
module_platform_driver(omap_usb2_driver); module_platform_driver(omap_usb2_driver);
MODULE_ALIAS("platform: omap_usb2"); MODULE_ALIAS("platform:omap_usb2");
MODULE_AUTHOR("Texas Instruments Inc."); MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP USB2 phy driver"); MODULE_DESCRIPTION("OMAP USB2 phy driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");

View file

@ -61,8 +61,6 @@ static int rockchip_usb_phy_power_off(struct phy *_phy)
return ret; return ret;
clk_disable_unprepare(phy->clk); clk_disable_unprepare(phy->clk);
if (ret)
return ret;
return 0; return 0;
} }
@ -78,8 +76,10 @@ static int rockchip_usb_phy_power_on(struct phy *_phy)
/* Power up usb phy analog blocks by set siddq 0 */ /* Power up usb phy analog blocks by set siddq 0 */
ret = rockchip_usb_phy_power(phy, 0); ret = rockchip_usb_phy_power(phy, 0);
if (ret) if (ret) {
clk_disable_unprepare(phy->clk);
return ret; return ret;
}
return 0; return 0;
} }

View file

@ -165,15 +165,11 @@ static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy)
cpu_relax(); cpu_relax();
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
if (val & PLL_LOCK) if (val & PLL_LOCK)
break; return 0;
} while (!time_after(jiffies, timeout)); } while (!time_after(jiffies, timeout));
if (!(val & PLL_LOCK)) { dev_err(phy->dev, "DPLL failed to lock\n");
dev_err(phy->dev, "DPLL failed to lock\n"); return -EBUSY;
return -EBUSY;
}
return 0;
} }
static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
@ -608,7 +604,7 @@ static struct platform_driver ti_pipe3_driver = {
module_platform_driver(ti_pipe3_driver); module_platform_driver(ti_pipe3_driver);
MODULE_ALIAS("platform: ti_pipe3"); MODULE_ALIAS("platform:ti_pipe3");
MODULE_AUTHOR("Texas Instruments Inc."); MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("TI PIPE3 phy driver"); MODULE_DESCRIPTION("TI PIPE3 phy driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");

View file

@ -666,7 +666,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
twl->dev = &pdev->dev; twl->dev = &pdev->dev;
twl->irq = platform_get_irq(pdev, 0); twl->irq = platform_get_irq(pdev, 0);
twl->vbus_supplied = false; twl->vbus_supplied = false;
twl->linkstat = -EINVAL;
twl->linkstat = OMAP_MUSB_UNKNOWN; twl->linkstat = OMAP_MUSB_UNKNOWN;
twl->phy.dev = twl->dev; twl->phy.dev = twl->dev;

View file

@ -1704,7 +1704,6 @@ static int xgene_phy_probe(struct platform_device *pdev)
for (i = 0; i < MAX_LANE; i++) for (i = 0; i < MAX_LANE; i++)
ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ ctx->sata_param.speed[i] = 2; /* Default to Gen3 */
ctx->dev = &pdev->dev;
platform_set_drvdata(pdev, ctx); platform_set_drvdata(pdev, ctx);
ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops);

View file

@ -73,7 +73,7 @@
#define TIME_WINDOW_MAX_MSEC 40000 #define TIME_WINDOW_MAX_MSEC 40000
#define TIME_WINDOW_MIN_MSEC 250 #define TIME_WINDOW_MIN_MSEC 250
#define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */
enum unit_type { enum unit_type {
ARBITRARY_UNIT, /* no translation */ ARBITRARY_UNIT, /* no translation */
POWER_UNIT, POWER_UNIT,
@ -158,6 +158,7 @@ struct rapl_domain {
struct rapl_power_limit rpl[NR_POWER_LIMITS]; struct rapl_power_limit rpl[NR_POWER_LIMITS];
u64 attr_map; /* track capabilities */ u64 attr_map; /* track capabilities */
unsigned int state; unsigned int state;
unsigned int domain_energy_unit;
int package_id; int package_id;
}; };
#define power_zone_to_rapl_domain(_zone) \ #define power_zone_to_rapl_domain(_zone) \
@ -190,6 +191,7 @@ struct rapl_defaults {
void (*set_floor_freq)(struct rapl_domain *rd, bool mode); void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
u64 (*compute_time_window)(struct rapl_package *rp, u64 val, u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
bool to_raw); bool to_raw);
unsigned int dram_domain_energy_unit;
}; };
static struct rapl_defaults *rapl_defaults; static struct rapl_defaults *rapl_defaults;
@ -227,7 +229,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
static int rapl_write_data_raw(struct rapl_domain *rd, static int rapl_write_data_raw(struct rapl_domain *rd,
enum rapl_primitives prim, enum rapl_primitives prim,
unsigned long long value); unsigned long long value);
static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
enum unit_type type, u64 value,
int to_raw); int to_raw);
static void package_power_limit_irq_save(int package_id); static void package_power_limit_irq_save(int package_id);
@ -305,7 +308,9 @@ static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
{ {
*energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev);
*energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
return 0; return 0;
} }
@ -639,6 +644,11 @@ static void rapl_init_domains(struct rapl_package *rp)
rd->msrs[4] = MSR_DRAM_POWER_INFO; rd->msrs[4] = MSR_DRAM_POWER_INFO;
rd->rpl[0].prim_id = PL1_ENABLE; rd->rpl[0].prim_id = PL1_ENABLE;
rd->rpl[0].name = pl1_name; rd->rpl[0].name = pl1_name;
rd->domain_energy_unit =
rapl_defaults->dram_domain_energy_unit;
if (rd->domain_energy_unit)
pr_info("DRAM domain energy unit %dpj\n",
rd->domain_energy_unit);
break; break;
} }
if (mask) { if (mask) {
@ -648,11 +658,13 @@ static void rapl_init_domains(struct rapl_package *rp)
} }
} }
static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
enum unit_type type, u64 value,
int to_raw) int to_raw)
{ {
u64 units = 1; u64 units = 1;
struct rapl_package *rp; struct rapl_package *rp;
u64 scale = 1;
rp = find_package_by_id(package); rp = find_package_by_id(package);
if (!rp) if (!rp)
@ -663,7 +675,12 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
units = rp->power_unit; units = rp->power_unit;
break; break;
case ENERGY_UNIT: case ENERGY_UNIT:
units = rp->energy_unit; scale = ENERGY_UNIT_SCALE;
/* per domain unit takes precedence */
if (rd && rd->domain_energy_unit)
units = rd->domain_energy_unit;
else
units = rp->energy_unit;
break; break;
case TIME_UNIT: case TIME_UNIT:
return rapl_defaults->compute_time_window(rp, value, to_raw); return rapl_defaults->compute_time_window(rp, value, to_raw);
@ -673,11 +690,11 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
}; };
if (to_raw) if (to_raw)
return div64_u64(value, units); return div64_u64(value, units) * scale;
value *= units; value *= units;
return value; return div64_u64(value, scale);
} }
/* in the order of enum rapl_primitives */ /* in the order of enum rapl_primitives */
@ -773,7 +790,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
final = value & rp->mask; final = value & rp->mask;
final = final >> rp->shift; final = final >> rp->shift;
if (xlate) if (xlate)
*data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0); *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0);
else else
*data = final; *data = final;
@ -799,7 +816,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
"failed to read msr 0x%x on cpu %d\n", msr, cpu); "failed to read msr 0x%x on cpu %d\n", msr, cpu);
return -EIO; return -EIO;
} }
value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1); value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
msr_val &= ~rp->mask; msr_val &= ~rp->mask;
msr_val |= value << rp->shift; msr_val |= value << rp->shift;
if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
@ -818,7 +835,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
* calculate units differ on different CPUs. * calculate units differ on different CPUs.
* We convert the units to below format based on CPUs. * We convert the units to below format based on CPUs.
* i.e. * i.e.
* energy unit: microJoules : Represented in microJoules by default * energy unit: picoJoules : Represented in picoJoules by default
* power unit : microWatts : Represented in milliWatts by default * power unit : microWatts : Represented in milliWatts by default
* time unit : microseconds: Represented in seconds by default * time unit : microseconds: Represented in seconds by default
*/ */
@ -834,7 +851,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
} }
value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
rp->energy_unit = 1000000 / (1 << value); rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
rp->power_unit = 1000000 / (1 << value); rp->power_unit = 1000000 / (1 << value);
@ -842,7 +859,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
rp->time_unit = 1000000 / (1 << value); rp->time_unit = 1000000 / (1 << value);
pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n", pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n",
rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
return 0; return 0;
@ -859,7 +876,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
return -ENODEV; return -ENODEV;
} }
value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
rp->energy_unit = 1 << value; rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
rp->power_unit = (1 << value) * 1000; rp->power_unit = (1 << value) * 1000;
@ -867,7 +884,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
rp->time_unit = 1000000 / (1 << value); rp->time_unit = 1000000 / (1 << value);
pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n", pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n",
rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
return 0; return 0;
@ -1017,6 +1034,13 @@ static const struct rapl_defaults rapl_defaults_core = {
.compute_time_window = rapl_compute_time_window_core, .compute_time_window = rapl_compute_time_window_core,
}; };
static const struct rapl_defaults rapl_defaults_hsw_server = {
.check_unit = rapl_check_unit_core,
.set_floor_freq = set_floor_freq_default,
.compute_time_window = rapl_compute_time_window_core,
.dram_domain_energy_unit = 15300,
};
static const struct rapl_defaults rapl_defaults_atom = { static const struct rapl_defaults rapl_defaults_atom = {
.check_unit = rapl_check_unit_atom, .check_unit = rapl_check_unit_atom,
.set_floor_freq = set_floor_freq_atom, .set_floor_freq = set_floor_freq_atom,
@ -1037,7 +1061,7 @@ static const struct x86_cpu_id rapl_ids[] = {
RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */ RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */

View file

@ -324,7 +324,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
} }
spin_lock(&suspended_lock); spin_unlock(&suspended_lock);
return ret; return ret;
} }

View file

@ -1596,7 +1596,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
/* /*
* Finally register the new FC Nexus with TCM * Finally register the new FC Nexus with TCM
*/ */
__transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
return 0; return 0;
} }

View file

@ -330,16 +330,6 @@ static void device_init_registers(struct vnt_private *pDevice)
/* zonetype initial */ /* zonetype initial */
pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
/* Get RFType */
pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE);
/* force change RevID for VT3253 emu */
if ((pDevice->byRFType & RF_EMU) != 0)
pDevice->byRevId = 0x80;
pDevice->byRFType &= RF_MASK;
pr_debug("pDevice->byRFType = %x\n", pDevice->byRFType);
if (!pDevice->bZoneRegExist) if (!pDevice->bZoneRegExist)
pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
@ -1187,12 +1177,14 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
{ {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
PSTxDesc head_td; PSTxDesc head_td;
u32 dma_idx = TYPE_AC0DMA; u32 dma_idx;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
if (!ieee80211_is_data(hdr->frame_control)) if (ieee80211_is_data(hdr->frame_control))
dma_idx = TYPE_AC0DMA;
else
dma_idx = TYPE_TXDMA0; dma_idx = TYPE_TXDMA0;
if (AVAIL_TD(priv, dma_idx) < 1) { if (AVAIL_TD(priv, dma_idx) < 1) {
@ -1206,6 +1198,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
head_td->pTDInfo->skb = skb; head_td->pTDInfo->skb = skb;
if (dma_idx == TYPE_AC0DMA)
head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
priv->iTDUsed[dma_idx]++; priv->iTDUsed[dma_idx]++;
/* Take ownership */ /* Take ownership */
@ -1234,13 +1229,10 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
if (dma_idx == TYPE_AC0DMA) { if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
MACvTransmitAC0(priv->PortOffset); MACvTransmitAC0(priv->PortOffset);
} else { else
MACvTransmit0(priv->PortOffset); MACvTransmit0(priv->PortOffset);
}
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
@ -1778,6 +1770,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
MACvInitialize(priv->PortOffset); MACvInitialize(priv->PortOffset);
MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
/* Get RFType */
priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE);
priv->byRFType &= RF_MASK;
dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
device_get_options(priv); device_get_options(priv);
device_set_options(priv); device_set_options(priv);
/* Mask out the options cannot be set to the chip */ /* Mask out the options cannot be set to the chip */

View file

@ -794,6 +794,7 @@ bool RFbSetPower(
break; break;
case RATE_6M: case RATE_6M:
case RATE_9M: case RATE_9M:
case RATE_12M:
case RATE_18M: case RATE_18M:
byPwr = priv->abyOFDMPwrTbl[uCH]; byPwr = priv->abyOFDMPwrTbl[uCH];
if (priv->byRFType == RF_UW2452) if (priv->byRFType == RF_UW2452)

View file

@ -640,6 +640,7 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel)
break; break;
case RATE_6M: case RATE_6M:
case RATE_9M: case RATE_9M:
case RATE_12M:
case RATE_18M: case RATE_18M:
case RATE_24M: case RATE_24M:
case RATE_36M: case RATE_36M:

View file

@ -4256,11 +4256,17 @@ int iscsit_close_connection(
pr_debug("Closing iSCSI connection CID %hu on SID:" pr_debug("Closing iSCSI connection CID %hu on SID:"
" %u\n", conn->cid, sess->sid); " %u\n", conn->cid, sess->sid);
/* /*
* Always up conn_logout_comp just in case the RX Thread is sleeping * Always up conn_logout_comp for the traditional TCP case just in case
* and the logout response never got sent because the connection * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
* failed. * response never got sent because the connection failed.
*
* However for iser-target, isert_wait4logout() is using conn_logout_comp
* to signal logout response TX interrupt completion. Go ahead and skip
* this for iser since isert_rx_opcode() does not wait on logout failure,
* and to avoid iscsi_conn pointer dereference in iser-target code.
*/ */
complete(&conn->conn_logout_comp); if (conn->conn_transport->transport_type == ISCSI_TCP)
complete(&conn->conn_logout_comp);
iscsi_release_thread_set(conn); iscsi_release_thread_set(conn);

View file

@ -22,7 +22,6 @@
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_target_core.h> #include <target/iscsi/iscsi_target_core.h>
#include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_tq.h" #include "iscsi_target_tq.h"
#include "iscsi_target_erl0.h" #include "iscsi_target_erl0.h"
@ -940,8 +939,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
spin_unlock_bh(&conn->state_lock); spin_unlock_bh(&conn->state_lock);
if (conn->conn_transport->transport_type == ISCSI_TCP) iscsit_close_connection(conn);
iscsit_close_connection(conn);
return; return;
} }

View file

@ -953,11 +953,8 @@ static int tcm_loop_make_nexus(
transport_free_session(tl_nexus->se_sess); transport_free_session(tl_nexus->se_sess);
goto out; goto out;
} }
/* /* Now, register the SAS I_T Nexus as active. */
* Now, register the SAS I_T Nexus as active with the call to transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
* transport_register_session()
*/
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
tl_nexus->se_sess, tl_nexus); tl_nexus->se_sess, tl_nexus);
tl_tpg->tl_nexus = tl_nexus; tl_tpg->tl_nexus = tl_nexus;
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"

View file

@ -650,6 +650,18 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
return aligned_max_sectors; return aligned_max_sectors;
} }
bool se_dev_check_wce(struct se_device *dev)
{
bool wce = false;
if (dev->transport->get_write_cache)
wce = dev->transport->get_write_cache(dev);
else if (dev->dev_attrib.emulate_write_cache > 0)
wce = true;
return wce;
}
int se_dev_set_max_unmap_lba_count( int se_dev_set_max_unmap_lba_count(
struct se_device *dev, struct se_device *dev,
u32 max_unmap_lba_count) u32 max_unmap_lba_count)
@ -767,6 +779,16 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag); pr_err("Illegal value %d\n", flag);
return -EINVAL; return -EINVAL;
} }
if (flag &&
dev->transport->get_write_cache) {
pr_err("emulate_fua_write not supported for this device\n");
return -EINVAL;
}
if (dev->export_count) {
pr_err("emulate_fua_write cannot be changed with active"
" exports: %d\n", dev->export_count);
return -EINVAL;
}
dev->dev_attrib.emulate_fua_write = flag; dev->dev_attrib.emulate_fua_write = flag;
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
dev, dev->dev_attrib.emulate_fua_write); dev, dev->dev_attrib.emulate_fua_write);
@ -801,7 +823,11 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("emulate_write_cache not supported for this device\n"); pr_err("emulate_write_cache not supported for this device\n");
return -EINVAL; return -EINVAL;
} }
if (dev->export_count) {
pr_err("emulate_write_cache cannot be changed with active"
" exports: %d\n", dev->export_count);
return -EINVAL;
}
dev->dev_attrib.emulate_write_cache = flag; dev->dev_attrib.emulate_write_cache = flag;
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
dev, dev->dev_attrib.emulate_write_cache); dev, dev->dev_attrib.emulate_write_cache);
@ -1534,8 +1560,6 @@ int target_configure_device(struct se_device *dev)
ret = dev->transport->configure_device(dev); ret = dev->transport->configure_device(dev);
if (ret) if (ret)
goto out; goto out;
dev->dev_flags |= DF_CONFIGURED;
/* /*
* XXX: there is not much point to have two different values here.. * XXX: there is not much point to have two different values here..
*/ */
@ -1597,6 +1621,8 @@ int target_configure_device(struct se_device *dev)
list_add_tail(&dev->g_dev_node, &g_device_list); list_add_tail(&dev->g_dev_node, &g_device_list);
mutex_unlock(&g_device_mutex); mutex_unlock(&g_device_mutex);
dev->dev_flags |= DF_CONFIGURED;
return 0; return 0;
out_free_alua: out_free_alua:

View file

@ -1121,7 +1121,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd; struct scsi_device *sd = pdv->pdv_sd;
return sd->type; return (sd) ? sd->type : TYPE_NO_LUN;
} }
static sector_t pscsi_get_blocks(struct se_device *dev) static sector_t pscsi_get_blocks(struct se_device *dev)

View file

@ -708,8 +708,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
} }
} }
if (cdb[1] & 0x8) { if (cdb[1] & 0x8) {
if (!dev->dev_attrib.emulate_fua_write || if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) {
!dev->dev_attrib.emulate_write_cache) {
pr_err("Got CDB: 0x%02x with FUA bit set, but device" pr_err("Got CDB: 0x%02x with FUA bit set, but device"
" does not advertise support for FUA write\n", " does not advertise support for FUA write\n",
cdb[0]); cdb[0]);

View file

@ -454,19 +454,6 @@ check_scsi_name:
} }
EXPORT_SYMBOL(spc_emulate_evpd_83); EXPORT_SYMBOL(spc_emulate_evpd_83);
static bool
spc_check_dev_wce(struct se_device *dev)
{
bool wce = false;
if (dev->transport->get_write_cache)
wce = dev->transport->get_write_cache(dev);
else if (dev->dev_attrib.emulate_write_cache > 0)
wce = true;
return wce;
}
/* Extended INQUIRY Data VPD Page */ /* Extended INQUIRY Data VPD Page */
static sense_reason_t static sense_reason_t
spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
@ -490,7 +477,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
buf[5] = 0x07; buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */ /* If WriteCache emulation is enabled, set V_SUP */
if (spc_check_dev_wce(dev)) if (se_dev_check_wce(dev))
buf[6] = 0x01; buf[6] = 0x01;
/* If an LBA map is present set R_SUP */ /* If an LBA map is present set R_SUP */
spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
@ -897,7 +884,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
if (pc == 1) if (pc == 1)
goto out; goto out;
if (spc_check_dev_wce(dev)) if (se_dev_check_wce(dev))
p[2] = 0x04; /* Write Cache Enable */ p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */ p[12] = 0x20; /* Disabled Read Ahead */
@ -1009,7 +996,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
spc_modesense_write_protect(&buf[length], type); spc_modesense_write_protect(&buf[length], type);
if ((spc_check_dev_wce(dev)) && if ((se_dev_check_wce(dev)) &&
(dev->dev_attrib.emulate_fua_write > 0)) (dev->dev_attrib.emulate_fua_write > 0))
spc_modesense_dpofua(&buf[length], type); spc_modesense_dpofua(&buf[length], type);

View file

@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
out: out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
if (ret && ack_kref)
target_put_sess_cmd(se_sess, se_cmd);
return ret; return ret;
} }
EXPORT_SYMBOL(target_get_sess_cmd); EXPORT_SYMBOL(target_get_sess_cmd);

View file

@ -359,7 +359,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
ep = fc_seq_exch(seq); ep = fc_seq_exch(seq);
if (ep) { if (ep) {
lport = ep->lp; lport = ep->lp;
if (lport && (ep->xid <= lport->lro_xid)) if (lport && (ep->xid <= lport->lro_xid)) {
/* /*
* "ddp_done" trigger invalidation of HW * "ddp_done" trigger invalidation of HW
* specific DDP context * specific DDP context
@ -374,6 +374,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
* identified using ep->xid) * identified using ep->xid)
*/ */
cmd->was_ddp_setup = 0; cmd->was_ddp_setup = 0;
}
} }
} }
} }

View file

@ -119,7 +119,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
dw8250_force_idle(p); dw8250_force_idle(p);
writeb(value, p->membase + (UART_LCR << p->regshift)); writeb(value, p->membase + (UART_LCR << p->regshift));
} }
dev_err(p->dev, "Couldn't set LCR to %d\n", value); /*
* FIXME: this deadlocks if port->lock is already held
* dev_err(p->dev, "Couldn't set LCR to %d\n", value);
*/
} }
} }
@ -163,7 +166,10 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
__raw_writeq(value & 0xff, __raw_writeq(value & 0xff,
p->membase + (UART_LCR << p->regshift)); p->membase + (UART_LCR << p->regshift));
} }
dev_err(p->dev, "Couldn't set LCR to %d\n", value); /*
* FIXME: this deadlocks if port->lock is already held
* dev_err(p->dev, "Couldn't set LCR to %d\n", value);
*/
} }
} }
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
@ -187,7 +193,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
dw8250_force_idle(p); dw8250_force_idle(p);
writel(value, p->membase + (UART_LCR << p->regshift)); writel(value, p->membase + (UART_LCR << p->regshift));
} }
dev_err(p->dev, "Couldn't set LCR to %d\n", value); /*
* FIXME: this deadlocks if port->lock is already held
* dev_err(p->dev, "Couldn't set LCR to %d\n", value);
*/
} }
} }

View file

@ -929,6 +929,13 @@ __acquires(hwep->lock)
return retval; return retval;
} }
static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
{
dev_warn(&ci->gadget.dev,
"connect the device to an alternate port if you want HNP\n");
return isr_setup_status_phase(ci);
}
/** /**
* isr_setup_packet_handler: setup packet handler * isr_setup_packet_handler: setup packet handler
* @ci: UDC descriptor * @ci: UDC descriptor
@ -1061,6 +1068,10 @@ __acquires(ci->lock)
ci); ci);
} }
break; break;
case USB_DEVICE_A_ALT_HNP_SUPPORT:
if (ci_otg_is_fsm_mode(ci))
err = otg_a_alt_hnp_support(ci);
break;
default: default:
goto delegate; goto delegate;
} }

View file

@ -150,9 +150,9 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
break; break;
case OTG_STATE_B_PERIPHERAL: case OTG_STATE_B_PERIPHERAL:
otg_chrg_vbus(fsm, 0); otg_chrg_vbus(fsm, 0);
otg_loc_conn(fsm, 1);
otg_loc_sof(fsm, 0); otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_GADGET); otg_set_protocol(fsm, PROTO_GADGET);
otg_loc_conn(fsm, 1);
break; break;
case OTG_STATE_B_WAIT_ACON: case OTG_STATE_B_WAIT_ACON:
otg_chrg_vbus(fsm, 0); otg_chrg_vbus(fsm, 0);
@ -213,10 +213,10 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
break; break;
case OTG_STATE_A_PERIPHERAL: case OTG_STATE_A_PERIPHERAL:
otg_loc_conn(fsm, 1);
otg_loc_sof(fsm, 0); otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_GADGET); otg_set_protocol(fsm, PROTO_GADGET);
otg_drv_vbus(fsm, 1); otg_drv_vbus(fsm, 1);
otg_loc_conn(fsm, 1);
otg_add_timer(fsm, A_BIDL_ADIS); otg_add_timer(fsm, A_BIDL_ADIS);
break; break;
case OTG_STATE_A_WAIT_VFALL: case OTG_STATE_A_WAIT_VFALL:

View file

@ -377,6 +377,9 @@ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg)
dwc2_is_host_mode(hsotg) ? "Host" : "Device", dwc2_is_host_mode(hsotg) ? "Host" : "Device",
dwc2_op_state_str(hsotg)); dwc2_op_state_str(hsotg));
if (hsotg->op_state == OTG_STATE_A_HOST)
dwc2_hcd_disconnect(hsotg);
/* Change to L3 (OFF) state */ /* Change to L3 (OFF) state */
hsotg->lx_state = DWC2_L3; hsotg->lx_state = DWC2_L3;

View file

@ -289,8 +289,7 @@ static void disable_loopback(struct f_loopback *loop)
struct usb_composite_dev *cdev; struct usb_composite_dev *cdev;
cdev = loop->function.config->cdev; cdev = loop->function.config->cdev;
disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL, NULL, disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL);
NULL);
VDBG(cdev, "%s disabled\n", loop->function.name); VDBG(cdev, "%s disabled\n", loop->function.name);
} }

View file

@ -23,15 +23,6 @@
#include "gadget_chips.h" #include "gadget_chips.h"
#include "u_f.h" #include "u_f.h"
#define USB_MS_TO_SS_INTERVAL(x) USB_MS_TO_HS_INTERVAL(x)
enum eptype {
EP_CONTROL = 0,
EP_BULK,
EP_ISOC,
EP_INTERRUPT,
};
/* /*
* SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
* controller drivers. * controller drivers.
@ -64,8 +55,6 @@ struct f_sourcesink {
struct usb_ep *out_ep; struct usb_ep *out_ep;
struct usb_ep *iso_in_ep; struct usb_ep *iso_in_ep;
struct usb_ep *iso_out_ep; struct usb_ep *iso_out_ep;
struct usb_ep *int_in_ep;
struct usb_ep *int_out_ep;
int cur_alt; int cur_alt;
}; };
@ -79,10 +68,6 @@ static unsigned isoc_interval;
static unsigned isoc_maxpacket; static unsigned isoc_maxpacket;
static unsigned isoc_mult; static unsigned isoc_mult;
static unsigned isoc_maxburst; static unsigned isoc_maxburst;
static unsigned int_interval; /* In ms */
static unsigned int_maxpacket;
static unsigned int_mult;
static unsigned int_maxburst;
static unsigned buflen; static unsigned buflen;
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
@ -107,16 +92,6 @@ static struct usb_interface_descriptor source_sink_intf_alt1 = {
/* .iInterface = DYNAMIC */ /* .iInterface = DYNAMIC */
}; };
static struct usb_interface_descriptor source_sink_intf_alt2 = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 2,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
/* .iInterface = DYNAMIC */
};
/* full speed support: */ /* full speed support: */
static struct usb_endpoint_descriptor fs_source_desc = { static struct usb_endpoint_descriptor fs_source_desc = {
@ -155,26 +130,6 @@ static struct usb_endpoint_descriptor fs_iso_sink_desc = {
.bInterval = 4, .bInterval = 4,
}; };
static struct usb_endpoint_descriptor fs_int_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(64),
.bInterval = GZERO_INT_INTERVAL,
};
static struct usb_endpoint_descriptor fs_int_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(64),
.bInterval = GZERO_INT_INTERVAL,
};
static struct usb_descriptor_header *fs_source_sink_descs[] = { static struct usb_descriptor_header *fs_source_sink_descs[] = {
(struct usb_descriptor_header *) &source_sink_intf_alt0, (struct usb_descriptor_header *) &source_sink_intf_alt0,
(struct usb_descriptor_header *) &fs_sink_desc, (struct usb_descriptor_header *) &fs_sink_desc,
@ -185,10 +140,6 @@ static struct usb_descriptor_header *fs_source_sink_descs[] = {
(struct usb_descriptor_header *) &fs_source_desc, (struct usb_descriptor_header *) &fs_source_desc,
(struct usb_descriptor_header *) &fs_iso_sink_desc, (struct usb_descriptor_header *) &fs_iso_sink_desc,
(struct usb_descriptor_header *) &fs_iso_source_desc, (struct usb_descriptor_header *) &fs_iso_source_desc,
(struct usb_descriptor_header *) &source_sink_intf_alt2,
#define FS_ALT_IFC_2_OFFSET 8
(struct usb_descriptor_header *) &fs_int_sink_desc,
(struct usb_descriptor_header *) &fs_int_source_desc,
NULL, NULL,
}; };
@ -228,24 +179,6 @@ static struct usb_endpoint_descriptor hs_iso_sink_desc = {
.bInterval = 4, .bInterval = 4,
}; };
static struct usb_endpoint_descriptor hs_int_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(1024),
.bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL),
};
static struct usb_endpoint_descriptor hs_int_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(1024),
.bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL),
};
static struct usb_descriptor_header *hs_source_sink_descs[] = { static struct usb_descriptor_header *hs_source_sink_descs[] = {
(struct usb_descriptor_header *) &source_sink_intf_alt0, (struct usb_descriptor_header *) &source_sink_intf_alt0,
(struct usb_descriptor_header *) &hs_source_desc, (struct usb_descriptor_header *) &hs_source_desc,
@ -256,10 +189,6 @@ static struct usb_descriptor_header *hs_source_sink_descs[] = {
(struct usb_descriptor_header *) &hs_sink_desc, (struct usb_descriptor_header *) &hs_sink_desc,
(struct usb_descriptor_header *) &hs_iso_source_desc, (struct usb_descriptor_header *) &hs_iso_source_desc,
(struct usb_descriptor_header *) &hs_iso_sink_desc, (struct usb_descriptor_header *) &hs_iso_sink_desc,
(struct usb_descriptor_header *) &source_sink_intf_alt2,
#define HS_ALT_IFC_2_OFFSET 8
(struct usb_descriptor_header *) &hs_int_source_desc,
(struct usb_descriptor_header *) &hs_int_sink_desc,
NULL, NULL,
}; };
@ -335,42 +264,6 @@ static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = {
.wBytesPerInterval = cpu_to_le16(1024), .wBytesPerInterval = cpu_to_le16(1024),
}; };
static struct usb_endpoint_descriptor ss_int_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(1024),
.bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
};
static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor ss_int_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(1024),
.bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
};
static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = cpu_to_le16(1024),
};
static struct usb_descriptor_header *ss_source_sink_descs[] = { static struct usb_descriptor_header *ss_source_sink_descs[] = {
(struct usb_descriptor_header *) &source_sink_intf_alt0, (struct usb_descriptor_header *) &source_sink_intf_alt0,
(struct usb_descriptor_header *) &ss_source_desc, (struct usb_descriptor_header *) &ss_source_desc,
@ -387,12 +280,6 @@ static struct usb_descriptor_header *ss_source_sink_descs[] = {
(struct usb_descriptor_header *) &ss_iso_source_comp_desc, (struct usb_descriptor_header *) &ss_iso_source_comp_desc,
(struct usb_descriptor_header *) &ss_iso_sink_desc, (struct usb_descriptor_header *) &ss_iso_sink_desc,
(struct usb_descriptor_header *) &ss_iso_sink_comp_desc, (struct usb_descriptor_header *) &ss_iso_sink_comp_desc,
(struct usb_descriptor_header *) &source_sink_intf_alt2,
#define SS_ALT_IFC_2_OFFSET 14
(struct usb_descriptor_header *) &ss_int_source_desc,
(struct usb_descriptor_header *) &ss_int_source_comp_desc,
(struct usb_descriptor_header *) &ss_int_sink_desc,
(struct usb_descriptor_header *) &ss_int_sink_comp_desc,
NULL, NULL,
}; };
@ -414,21 +301,6 @@ static struct usb_gadget_strings *sourcesink_strings[] = {
}; };
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
static const char *get_ep_string(enum eptype ep_type)
{
switch (ep_type) {
case EP_ISOC:
return "ISOC-";
case EP_INTERRUPT:
return "INTERRUPT-";
case EP_CONTROL:
return "CTRL-";
case EP_BULK:
return "BULK-";
default:
return "UNKNOWN-";
}
}
static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
{ {
@ -456,8 +328,7 @@ static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
void disable_endpoints(struct usb_composite_dev *cdev, void disable_endpoints(struct usb_composite_dev *cdev,
struct usb_ep *in, struct usb_ep *out, struct usb_ep *in, struct usb_ep *out,
struct usb_ep *iso_in, struct usb_ep *iso_out, struct usb_ep *iso_in, struct usb_ep *iso_out)
struct usb_ep *int_in, struct usb_ep *int_out)
{ {
disable_ep(cdev, in); disable_ep(cdev, in);
disable_ep(cdev, out); disable_ep(cdev, out);
@ -465,10 +336,6 @@ void disable_endpoints(struct usb_composite_dev *cdev,
disable_ep(cdev, iso_in); disable_ep(cdev, iso_in);
if (iso_out) if (iso_out)
disable_ep(cdev, iso_out); disable_ep(cdev, iso_out);
if (int_in)
disable_ep(cdev, int_in);
if (int_out)
disable_ep(cdev, int_out);
} }
static int static int
@ -485,7 +352,6 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f)
return id; return id;
source_sink_intf_alt0.bInterfaceNumber = id; source_sink_intf_alt0.bInterfaceNumber = id;
source_sink_intf_alt1.bInterfaceNumber = id; source_sink_intf_alt1.bInterfaceNumber = id;
source_sink_intf_alt2.bInterfaceNumber = id;
/* allocate bulk endpoints */ /* allocate bulk endpoints */
ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc);
@ -546,55 +412,14 @@ no_iso:
if (isoc_maxpacket > 1024) if (isoc_maxpacket > 1024)
isoc_maxpacket = 1024; isoc_maxpacket = 1024;
/* sanity check the interrupt module parameters */
if (int_interval < 1)
int_interval = 1;
if (int_interval > 4096)
int_interval = 4096;
if (int_mult > 2)
int_mult = 2;
if (int_maxburst > 15)
int_maxburst = 15;
/* fill in the FS interrupt descriptors from the module parameters */
fs_int_source_desc.wMaxPacketSize = int_maxpacket > 64 ?
64 : int_maxpacket;
fs_int_source_desc.bInterval = int_interval > 255 ?
255 : int_interval;
fs_int_sink_desc.wMaxPacketSize = int_maxpacket > 64 ?
64 : int_maxpacket;
fs_int_sink_desc.bInterval = int_interval > 255 ?
255 : int_interval;
/* allocate int endpoints */
ss->int_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_source_desc);
if (!ss->int_in_ep)
goto no_int;
ss->int_in_ep->driver_data = cdev; /* claim */
ss->int_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_sink_desc);
if (ss->int_out_ep) {
ss->int_out_ep->driver_data = cdev; /* claim */
} else {
ss->int_in_ep->driver_data = NULL;
ss->int_in_ep = NULL;
no_int:
fs_source_sink_descs[FS_ALT_IFC_2_OFFSET] = NULL;
hs_source_sink_descs[HS_ALT_IFC_2_OFFSET] = NULL;
ss_source_sink_descs[SS_ALT_IFC_2_OFFSET] = NULL;
}
if (int_maxpacket > 1024)
int_maxpacket = 1024;
/* support high speed hardware */ /* support high speed hardware */
hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
/* /*
* Fill in the HS isoc and interrupt descriptors from the module * Fill in the HS isoc descriptors from the module parameters.
* parameters. We assume that the user knows what they are doing and * We assume that the user knows what they are doing and won't
* won't give parameters that their UDC doesn't support. * give parameters that their UDC doesn't support.
*/ */
hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket;
hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11;
@ -607,17 +432,6 @@ no_int:
hs_iso_sink_desc.bInterval = isoc_interval; hs_iso_sink_desc.bInterval = isoc_interval;
hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
hs_int_source_desc.wMaxPacketSize = int_maxpacket;
hs_int_source_desc.wMaxPacketSize |= int_mult << 11;
hs_int_source_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval);
hs_int_source_desc.bEndpointAddress =
fs_int_source_desc.bEndpointAddress;
hs_int_sink_desc.wMaxPacketSize = int_maxpacket;
hs_int_sink_desc.wMaxPacketSize |= int_mult << 11;
hs_int_sink_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval);
hs_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress;
/* support super speed hardware */ /* support super speed hardware */
ss_source_desc.bEndpointAddress = ss_source_desc.bEndpointAddress =
fs_source_desc.bEndpointAddress; fs_source_desc.bEndpointAddress;
@ -625,9 +439,9 @@ no_int:
fs_sink_desc.bEndpointAddress; fs_sink_desc.bEndpointAddress;
/* /*
* Fill in the SS isoc and interrupt descriptors from the module * Fill in the SS isoc descriptors from the module parameters.
* parameters. We assume that the user knows what they are doing and * We assume that the user knows what they are doing and won't
* won't give parameters that their UDC doesn't support. * give parameters that their UDC doesn't support.
*/ */
ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket;
ss_iso_source_desc.bInterval = isoc_interval; ss_iso_source_desc.bInterval = isoc_interval;
@ -646,37 +460,17 @@ no_int:
isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1);
ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
ss_int_source_desc.wMaxPacketSize = int_maxpacket;
ss_int_source_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval);
ss_int_source_comp_desc.bmAttributes = int_mult;
ss_int_source_comp_desc.bMaxBurst = int_maxburst;
ss_int_source_comp_desc.wBytesPerInterval =
int_maxpacket * (int_mult + 1) * (int_maxburst + 1);
ss_int_source_desc.bEndpointAddress =
fs_int_source_desc.bEndpointAddress;
ss_int_sink_desc.wMaxPacketSize = int_maxpacket;
ss_int_sink_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval);
ss_int_sink_comp_desc.bmAttributes = int_mult;
ss_int_sink_comp_desc.bMaxBurst = int_maxburst;
ss_int_sink_comp_desc.wBytesPerInterval =
int_maxpacket * (int_mult + 1) * (int_maxburst + 1);
ss_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_source_sink_descs, ret = usb_assign_descriptors(f, fs_source_sink_descs,
hs_source_sink_descs, ss_source_sink_descs); hs_source_sink_descs, ss_source_sink_descs);
if (ret) if (ret)
return ret; return ret;
DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s, " DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n",
"INT-IN/%s, INT-OUT/%s\n",
(gadget_is_superspeed(c->cdev->gadget) ? "super" : (gadget_is_superspeed(c->cdev->gadget) ? "super" :
(gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
f->name, ss->in_ep->name, ss->out_ep->name, f->name, ss->in_ep->name, ss->out_ep->name,
ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", ss->iso_in_ep ? ss->iso_in_ep->name : "<none>",
ss->iso_out_ep ? ss->iso_out_ep->name : "<none>", ss->iso_out_ep ? ss->iso_out_ep->name : "<none>");
ss->int_in_ep ? ss->int_in_ep->name : "<none>",
ss->int_out_ep ? ss->int_out_ep->name : "<none>");
return 0; return 0;
} }
@ -807,15 +601,14 @@ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req)
} }
static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
enum eptype ep_type, int speed) bool is_iso, int speed)
{ {
struct usb_ep *ep; struct usb_ep *ep;
struct usb_request *req; struct usb_request *req;
int i, size, status; int i, size, status;
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
switch (ep_type) { if (is_iso) {
case EP_ISOC:
switch (speed) { switch (speed) {
case USB_SPEED_SUPER: case USB_SPEED_SUPER:
size = isoc_maxpacket * (isoc_mult + 1) * size = isoc_maxpacket * (isoc_mult + 1) *
@ -831,28 +624,9 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
} }
ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; ep = is_in ? ss->iso_in_ep : ss->iso_out_ep;
req = ss_alloc_ep_req(ep, size); req = ss_alloc_ep_req(ep, size);
break; } else {
case EP_INTERRUPT:
switch (speed) {
case USB_SPEED_SUPER:
size = int_maxpacket * (int_mult + 1) *
(int_maxburst + 1);
break;
case USB_SPEED_HIGH:
size = int_maxpacket * (int_mult + 1);
break;
default:
size = int_maxpacket > 1023 ?
1023 : int_maxpacket;
break;
}
ep = is_in ? ss->int_in_ep : ss->int_out_ep;
req = ss_alloc_ep_req(ep, size);
break;
default:
ep = is_in ? ss->in_ep : ss->out_ep; ep = is_in ? ss->in_ep : ss->out_ep;
req = ss_alloc_ep_req(ep, 0); req = ss_alloc_ep_req(ep, 0);
break;
} }
if (!req) if (!req)
@ -870,12 +644,12 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
cdev = ss->function.config->cdev; cdev = ss->function.config->cdev;
ERROR(cdev, "start %s%s %s --> %d\n", ERROR(cdev, "start %s%s %s --> %d\n",
get_ep_string(ep_type), is_in ? "IN" : "OUT", is_iso ? "ISO-" : "", is_in ? "IN" : "OUT",
ep->name, status); ep->name, status);
free_ep_req(ep, req); free_ep_req(ep, req);
} }
if (!(ep_type == EP_ISOC)) if (!is_iso)
break; break;
} }
@ -888,7 +662,7 @@ static void disable_source_sink(struct f_sourcesink *ss)
cdev = ss->function.config->cdev; cdev = ss->function.config->cdev;
disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep,
ss->iso_out_ep, ss->int_in_ep, ss->int_out_ep); ss->iso_out_ep);
VDBG(cdev, "%s disabled\n", ss->function.name); VDBG(cdev, "%s disabled\n", ss->function.name);
} }
@ -900,62 +674,6 @@ enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss,
int speed = cdev->gadget->speed; int speed = cdev->gadget->speed;
struct usb_ep *ep; struct usb_ep *ep;
if (alt == 2) {
/* Configure for periodic interrupt endpoint */
ep = ss->int_in_ep;
if (ep) {
result = config_ep_by_speed(cdev->gadget,
&(ss->function), ep);
if (result)
return result;
result = usb_ep_enable(ep);
if (result < 0)
return result;
ep->driver_data = ss;
result = source_sink_start_ep(ss, true, EP_INTERRUPT,
speed);
if (result < 0) {
fail1:
ep = ss->int_in_ep;
if (ep) {
usb_ep_disable(ep);
ep->driver_data = NULL;
}
return result;
}
}
/*
* one interrupt endpoint reads (sinks) anything OUT (from the
* host)
*/
ep = ss->int_out_ep;
if (ep) {
result = config_ep_by_speed(cdev->gadget,
&(ss->function), ep);
if (result)
goto fail1;
result = usb_ep_enable(ep);
if (result < 0)
goto fail1;
ep->driver_data = ss;
result = source_sink_start_ep(ss, false, EP_INTERRUPT,
speed);
if (result < 0) {
ep = ss->int_out_ep;
usb_ep_disable(ep);
ep->driver_data = NULL;
goto fail1;
}
}
goto out;
}
/* one bulk endpoint writes (sources) zeroes IN (to the host) */ /* one bulk endpoint writes (sources) zeroes IN (to the host) */
ep = ss->in_ep; ep = ss->in_ep;
result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
@ -966,7 +684,7 @@ fail1:
return result; return result;
ep->driver_data = ss; ep->driver_data = ss;
result = source_sink_start_ep(ss, true, EP_BULK, speed); result = source_sink_start_ep(ss, true, false, speed);
if (result < 0) { if (result < 0) {
fail: fail:
ep = ss->in_ep; ep = ss->in_ep;
@ -985,7 +703,7 @@ fail:
goto fail; goto fail;
ep->driver_data = ss; ep->driver_data = ss;
result = source_sink_start_ep(ss, false, EP_BULK, speed); result = source_sink_start_ep(ss, false, false, speed);
if (result < 0) { if (result < 0) {
fail2: fail2:
ep = ss->out_ep; ep = ss->out_ep;
@ -1008,7 +726,7 @@ fail2:
goto fail2; goto fail2;
ep->driver_data = ss; ep->driver_data = ss;
result = source_sink_start_ep(ss, true, EP_ISOC, speed); result = source_sink_start_ep(ss, true, true, speed);
if (result < 0) { if (result < 0) {
fail3: fail3:
ep = ss->iso_in_ep; ep = ss->iso_in_ep;
@ -1031,14 +749,13 @@ fail3:
goto fail3; goto fail3;
ep->driver_data = ss; ep->driver_data = ss;
result = source_sink_start_ep(ss, false, EP_ISOC, speed); result = source_sink_start_ep(ss, false, true, speed);
if (result < 0) { if (result < 0) {
usb_ep_disable(ep); usb_ep_disable(ep);
ep->driver_data = NULL; ep->driver_data = NULL;
goto fail3; goto fail3;
} }
} }
out: out:
ss->cur_alt = alt; ss->cur_alt = alt;
@ -1054,8 +771,6 @@ static int sourcesink_set_alt(struct usb_function *f,
if (ss->in_ep->driver_data) if (ss->in_ep->driver_data)
disable_source_sink(ss); disable_source_sink(ss);
else if (alt == 2 && ss->int_in_ep->driver_data)
disable_source_sink(ss);
return enable_source_sink(cdev, ss, alt); return enable_source_sink(cdev, ss, alt);
} }
@ -1168,10 +883,6 @@ static struct usb_function *source_sink_alloc_func(
isoc_maxpacket = ss_opts->isoc_maxpacket; isoc_maxpacket = ss_opts->isoc_maxpacket;
isoc_mult = ss_opts->isoc_mult; isoc_mult = ss_opts->isoc_mult;
isoc_maxburst = ss_opts->isoc_maxburst; isoc_maxburst = ss_opts->isoc_maxburst;
int_interval = ss_opts->int_interval;
int_maxpacket = ss_opts->int_maxpacket;
int_mult = ss_opts->int_mult;
int_maxburst = ss_opts->int_maxburst;
buflen = ss_opts->bulk_buflen; buflen = ss_opts->bulk_buflen;
ss->function.name = "source/sink"; ss->function.name = "source/sink";
@ -1468,182 +1179,6 @@ static struct f_ss_opts_attribute f_ss_opts_bulk_buflen =
f_ss_opts_bulk_buflen_show, f_ss_opts_bulk_buflen_show,
f_ss_opts_bulk_buflen_store); f_ss_opts_bulk_buflen_store);
static ssize_t f_ss_opts_int_interval_show(struct f_ss_opts *opts, char *page)
{
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u", opts->int_interval);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_int_interval_store(struct f_ss_opts *opts,
const char *page, size_t len)
{
int ret;
u32 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou32(page, 0, &num);
if (ret)
goto end;
if (num > 4096) {
ret = -EINVAL;
goto end;
}
opts->int_interval = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
static struct f_ss_opts_attribute f_ss_opts_int_interval =
__CONFIGFS_ATTR(int_interval, S_IRUGO | S_IWUSR,
f_ss_opts_int_interval_show,
f_ss_opts_int_interval_store);
static ssize_t f_ss_opts_int_maxpacket_show(struct f_ss_opts *opts, char *page)
{
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u", opts->int_maxpacket);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_int_maxpacket_store(struct f_ss_opts *opts,
const char *page, size_t len)
{
int ret;
u16 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou16(page, 0, &num);
if (ret)
goto end;
if (num > 1024) {
ret = -EINVAL;
goto end;
}
opts->int_maxpacket = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
static struct f_ss_opts_attribute f_ss_opts_int_maxpacket =
__CONFIGFS_ATTR(int_maxpacket, S_IRUGO | S_IWUSR,
f_ss_opts_int_maxpacket_show,
f_ss_opts_int_maxpacket_store);
static ssize_t f_ss_opts_int_mult_show(struct f_ss_opts *opts, char *page)
{
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u", opts->int_mult);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_int_mult_store(struct f_ss_opts *opts,
const char *page, size_t len)
{
int ret;
u8 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou8(page, 0, &num);
if (ret)
goto end;
if (num > 2) {
ret = -EINVAL;
goto end;
}
opts->int_mult = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
static struct f_ss_opts_attribute f_ss_opts_int_mult =
__CONFIGFS_ATTR(int_mult, S_IRUGO | S_IWUSR,
f_ss_opts_int_mult_show,
f_ss_opts_int_mult_store);
static ssize_t f_ss_opts_int_maxburst_show(struct f_ss_opts *opts, char *page)
{
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u", opts->int_maxburst);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_int_maxburst_store(struct f_ss_opts *opts,
const char *page, size_t len)
{
int ret;
u8 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou8(page, 0, &num);
if (ret)
goto end;
if (num > 15) {
ret = -EINVAL;
goto end;
}
opts->int_maxburst = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
static struct f_ss_opts_attribute f_ss_opts_int_maxburst =
__CONFIGFS_ATTR(int_maxburst, S_IRUGO | S_IWUSR,
f_ss_opts_int_maxburst_show,
f_ss_opts_int_maxburst_store);
static struct configfs_attribute *ss_attrs[] = { static struct configfs_attribute *ss_attrs[] = {
&f_ss_opts_pattern.attr, &f_ss_opts_pattern.attr,
&f_ss_opts_isoc_interval.attr, &f_ss_opts_isoc_interval.attr,
@ -1651,10 +1186,6 @@ static struct configfs_attribute *ss_attrs[] = {
&f_ss_opts_isoc_mult.attr, &f_ss_opts_isoc_mult.attr,
&f_ss_opts_isoc_maxburst.attr, &f_ss_opts_isoc_maxburst.attr,
&f_ss_opts_bulk_buflen.attr, &f_ss_opts_bulk_buflen.attr,
&f_ss_opts_int_interval.attr,
&f_ss_opts_int_maxpacket.attr,
&f_ss_opts_int_mult.attr,
&f_ss_opts_int_maxburst.attr,
NULL, NULL,
}; };
@ -1684,8 +1215,6 @@ static struct usb_function_instance *source_sink_alloc_inst(void)
ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; ss_opts->isoc_interval = GZERO_ISOC_INTERVAL;
ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET;
ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; ss_opts->bulk_buflen = GZERO_BULK_BUFLEN;
ss_opts->int_interval = GZERO_INT_INTERVAL;
ss_opts->int_maxpacket = GZERO_INT_MAXPACKET;
config_group_init_type_name(&ss_opts->func_inst.group, "", config_group_init_type_name(&ss_opts->func_inst.group, "",
&ss_func_type); &ss_func_type);

View file

@ -10,8 +10,6 @@
#define GZERO_QLEN 32 #define GZERO_QLEN 32
#define GZERO_ISOC_INTERVAL 4 #define GZERO_ISOC_INTERVAL 4
#define GZERO_ISOC_MAXPACKET 1024 #define GZERO_ISOC_MAXPACKET 1024
#define GZERO_INT_INTERVAL 1 /* Default interrupt interval = 1 ms */
#define GZERO_INT_MAXPACKET 1024
struct usb_zero_options { struct usb_zero_options {
unsigned pattern; unsigned pattern;
@ -19,10 +17,6 @@ struct usb_zero_options {
unsigned isoc_maxpacket; unsigned isoc_maxpacket;
unsigned isoc_mult; unsigned isoc_mult;
unsigned isoc_maxburst; unsigned isoc_maxburst;
unsigned int_interval; /* In ms */
unsigned int_maxpacket;
unsigned int_mult;
unsigned int_maxburst;
unsigned bulk_buflen; unsigned bulk_buflen;
unsigned qlen; unsigned qlen;
}; };
@ -34,10 +28,6 @@ struct f_ss_opts {
unsigned isoc_maxpacket; unsigned isoc_maxpacket;
unsigned isoc_mult; unsigned isoc_mult;
unsigned isoc_maxburst; unsigned isoc_maxburst;
unsigned int_interval; /* In ms */
unsigned int_maxpacket;
unsigned int_mult;
unsigned int_maxburst;
unsigned bulk_buflen; unsigned bulk_buflen;
/* /*
@ -72,7 +62,6 @@ int lb_modinit(void);
void free_ep_req(struct usb_ep *ep, struct usb_request *req); void free_ep_req(struct usb_ep *ep, struct usb_request *req);
void disable_endpoints(struct usb_composite_dev *cdev, void disable_endpoints(struct usb_composite_dev *cdev,
struct usb_ep *in, struct usb_ep *out, struct usb_ep *in, struct usb_ep *out,
struct usb_ep *iso_in, struct usb_ep *iso_out, struct usb_ep *iso_in, struct usb_ep *iso_out);
struct usb_ep *int_in, struct usb_ep *int_out);
#endif /* __G_ZERO_H */ #endif /* __G_ZERO_H */

View file

@ -1740,10 +1740,9 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
goto err_session; goto err_session;
} }
/* /*
* Now register the TCM vHost virtual I_T Nexus as active with the * Now register the TCM vHost virtual I_T Nexus as active.
* call to __transport_register_session()
*/ */
__transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
tv_nexus->tvn_se_sess, tv_nexus); tv_nexus->tvn_se_sess, tv_nexus);
tpg->tpg_nexus = tv_nexus; tpg->tpg_nexus = tv_nexus;
mutex_unlock(&tpg->tpg_mutex); mutex_unlock(&tpg->tpg_mutex);

Some files were not shown because too many files have changed in this diff Show more