2005-04-17 00:20:36 +02:00
|
|
|
#ifndef __irq_h
|
|
|
|
#define __irq_h
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Please do not include this file in generic code. There is currently
|
|
|
|
* no requirement for any architecture to implement anything held
|
|
|
|
* within this file.
|
|
|
|
*
|
|
|
|
* Thanks. --rmk
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/config.h>
|
|
|
|
|
|
|
|
#if !defined(CONFIG_ARCH_S390)
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/cache.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* IRQ line status.
|
|
|
|
*/
|
|
|
|
#define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */
|
|
|
|
#define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */
|
|
|
|
#define IRQ_PENDING 4 /* IRQ pending - replay on enable */
|
|
|
|
#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
|
|
|
|
#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
|
|
|
|
#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
|
|
|
|
#define IRQ_LEVEL 64 /* IRQ level triggered */
|
|
|
|
#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
|
2005-09-07 00:17:25 +02:00
|
|
|
#if defined(ARCH_HAS_IRQ_PER_CPU)
|
|
|
|
# define IRQ_PER_CPU 256 /* IRQ is per CPU */
|
|
|
|
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
|
|
|
|
#else
|
|
|
|
# define CHECK_IRQ_PER_CPU(var) 0
|
|
|
|
#endif
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupt controller descriptor. This is all we need
|
|
|
|
* to describe about the low-level hardware.
|
|
|
|
*/
|
|
|
|
struct hw_interrupt_type {
|
|
|
|
const char * typename;
|
|
|
|
unsigned int (*startup)(unsigned int irq);
|
|
|
|
void (*shutdown)(unsigned int irq);
|
|
|
|
void (*enable)(unsigned int irq);
|
|
|
|
void (*disable)(unsigned int irq);
|
|
|
|
void (*ack)(unsigned int irq);
|
|
|
|
void (*end)(unsigned int irq);
|
|
|
|
void (*set_affinity)(unsigned int irq, cpumask_t dest);
|
2005-06-22 02:16:24 +02:00
|
|
|
/* Currently used only by UML, might disappear one day.*/
|
|
|
|
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
[PATCH] uml: add and use generic hw_controller_type->release
With Chris Wedgwood <cw@f00f.org>
Currently UML must explicitly call the UML-specific
free_irq_by_irq_and_dev() for each free_irq call it's done.
This is needed because ->shutdown and/or ->disable are only called when the
last "action" for that irq is removed.
Instead, for UML shared IRQs (UML IRQs are very often, if not always,
shared), for each dev_id some setup is done, which must be cleared on the
release of that fd. For instance, for each open console a new instance
(i.e. new dev_id) of the same IRQ is requested().
Exactly, a fd is stored in an array (pollfds), which is after read by a
host thread and passed to poll(). Each event registered by poll() triggers
an interrupt. So, for each free_irq() we must remove the corresponding
host fd from the table, which we do via this -release() method.
In this patch we add an appropriate hook for this, and remove all uses of
it by pointing the hook to the said procedure; this is safe to do since the
said procedure.
Also some cosmetic improvements are included.
This is heavily based on some work by Chris Wedgwood, which however didn't
get the patch merged for something I'd call a "misunderstanding" (the need
for this patch wasn't cleanly explained, thus adding the generic hook was
felt as undesirable).
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
CC: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-22 02:16:19 +02:00
|
|
|
void (*release)(unsigned int irq, void *dev_id);
|
2005-06-22 02:16:24 +02:00
|
|
|
#endif
|
2005-04-17 00:20:36 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct hw_interrupt_type hw_irq_controller;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the "IRQ descriptor", which contains various information
|
|
|
|
* about the irq, including what kind of hardware handling it has,
|
|
|
|
* whether it is disabled etc etc.
|
|
|
|
*
|
|
|
|
* Pad this out to 32 bytes for cache and indexing reasons.
|
|
|
|
*/
|
|
|
|
typedef struct irq_desc {
|
|
|
|
hw_irq_controller *handler;
|
|
|
|
void *handler_data;
|
|
|
|
struct irqaction *action; /* IRQ action list */
|
|
|
|
unsigned int status; /* IRQ status */
|
|
|
|
unsigned int depth; /* nested irq disables */
|
|
|
|
unsigned int irq_count; /* For detecting broken interrupts */
|
|
|
|
unsigned int irqs_unhandled;
|
|
|
|
spinlock_t lock;
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 00:16:15 +02:00
|
|
|
#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
|
|
|
|
unsigned int move_irq; /* Flag need to re-target intr dest*/
|
|
|
|
#endif
|
2005-04-17 00:20:36 +02:00
|
|
|
} ____cacheline_aligned irq_desc_t;
|
|
|
|
|
|
|
|
extern irq_desc_t irq_desc [NR_IRQS];
|
|
|
|
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 00:16:15 +02:00
|
|
|
/* Return a pointer to the irq descriptor for IRQ. */
|
|
|
|
static inline irq_desc_t *
|
|
|
|
irq_descp (int irq)
|
|
|
|
{
|
|
|
|
return irq_desc + irq;
|
|
|
|
}
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
#include <asm/hw_irq.h> /* the arch dependent stuff */
|
|
|
|
|
|
|
|
extern int setup_irq(unsigned int irq, struct irqaction * new);
|
|
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_HARDIRQS
|
|
|
|
extern cpumask_t irq_affinity[NR_IRQS];
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 00:16:15 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static inline void set_native_irq_info(int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
irq_affinity[irq] = mask;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void set_native_irq_info(int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
|
|
|
|
extern cpumask_t pending_irq_cpumask[NR_IRQS];
|
|
|
|
|
|
|
|
static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
irq_desc_t *desc = irq_desc + irq;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&desc->lock, flags);
|
|
|
|
desc->move_irq = 1;
|
|
|
|
pending_irq_cpumask[irq] = mask;
|
|
|
|
spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
move_native_irq(int irq)
|
|
|
|
{
|
|
|
|
cpumask_t tmp;
|
|
|
|
irq_desc_t *desc = irq_descp(irq);
|
|
|
|
|
|
|
|
if (likely (!desc->move_irq))
|
|
|
|
return;
|
|
|
|
|
|
|
|
desc->move_irq = 0;
|
|
|
|
|
|
|
|
if (likely(cpus_empty(pending_irq_cpumask[irq])))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!desc->handler->set_affinity)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* note - we hold the desc->lock */
|
|
|
|
cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there was a valid mask to work with, please
|
|
|
|
* do the disable, re-program, enable sequence.
|
|
|
|
* This is *not* particularly important for level triggered
|
|
|
|
* but in a edge trigger case, we might be setting rte
|
|
|
|
* when an active trigger is comming in. This could
|
|
|
|
* cause some ioapics to mal-function.
|
|
|
|
* Being paranoid i guess!
|
|
|
|
*/
|
|
|
|
if (unlikely(!cpus_empty(tmp))) {
|
|
|
|
desc->handler->disable(irq);
|
|
|
|
desc->handler->set_affinity(irq,tmp);
|
|
|
|
desc->handler->enable(irq);
|
|
|
|
}
|
|
|
|
cpus_clear(pending_irq_cpumask[irq]);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
/*
|
|
|
|
* Wonder why these are dummies?
|
|
|
|
* For e.g the set_ioapic_affinity_vector() calls the set_ioapic_affinity_irq()
|
|
|
|
* counter part after translating the vector to irq info. We need to perform
|
|
|
|
* this operation on the real irq, when we dont use vector, i.e when
|
|
|
|
* pci_use_vector() is false.
|
|
|
|
*/
|
|
|
|
static inline void move_irq(int irq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_irq_info(int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#else // CONFIG_PCI_MSI
|
|
|
|
|
|
|
|
static inline void move_irq(int irq)
|
|
|
|
{
|
|
|
|
move_native_irq(irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_irq_info(int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
set_native_irq_info(irq, mask);
|
|
|
|
}
|
|
|
|
#endif // CONFIG_PCI_MSI
|
|
|
|
|
|
|
|
#else // CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE
|
|
|
|
|
|
|
|
#define move_irq(x)
|
|
|
|
#define move_native_irq(x)
|
|
|
|
#define set_pending_irq(x,y)
|
|
|
|
static inline void set_irq_info(int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
set_native_irq_info(irq, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
|
|
|
|
#else // CONFIG_SMP
|
|
|
|
|
|
|
|
#define move_irq(x)
|
|
|
|
#define move_native_irq(x)
|
|
|
|
|
|
|
|
#endif // CONFIG_SMP
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
extern int no_irq_affinity;
|
|
|
|
extern int noirqdebug_setup(char *str);
|
|
|
|
|
|
|
|
extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
|
2005-06-29 05:45:18 +02:00
|
|
|
struct irqaction *action);
|
2005-04-17 00:20:36 +02:00
|
|
|
extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
|
2005-06-29 05:45:18 +02:00
|
|
|
extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
|
|
|
|
int action_ret, struct pt_regs *regs);
|
2005-04-17 00:20:36 +02:00
|
|
|
extern int can_request_irq(unsigned int irq, unsigned long irqflags);
|
|
|
|
|
|
|
|
extern void init_irq_proc(void);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
extern hw_irq_controller no_irq_type; /* needed in every arch ? */
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* __irq_h */
|