android_kernel_motorola_sm6225/include/net/snmp.h
David L Stevens 96793b4825 [IPV4]: Add ICMPMsgStats MIB (RFC 4293)
Background: RFC 4293 deprecates existing individual, named ICMP
type counters to be replaced with the ICMPMsgStatsTable. This table
includes entries for both IPv4 and IPv6, and requires counting of all
ICMP types, whether or not the machine implements the type.

These patches "remove" (but not really) the existing counters, and
replace them with the ICMPMsgStats tables for v4 and v6.
It includes the named counters in the /proc places they were, but gets the
values for them from the new tables. It also counts packets generated
from raw socket output (e.g., OutEchoes, MLD queries, RA's from
radvd, etc).

Changes:
1) create icmpmsg_statistics mib
2) create icmpv6msg_statistics mib
3) modify existing counters to use these
4) modify /proc/net/snmp to add "IcmpMsg" with all ICMP types
        listed by number for easy SNMP parsing
5) modify /proc/net/snmp printing for "Icmp" to get the named data
        from new counters.

Signed-off-by: David L Stevens <dlstevens@us.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-10 16:51:28 -07:00

150 lines
4.2 KiB
C

/*
*
* SNMP MIB entries for the IP subsystem.
*
* Alan Cox <gw4pts@gw4pts.ampr.org>
*
* We don't chose to implement SNMP in the kernel (this would
* be silly as SNMP is a pain in the backside in places). We do
* however need to collect the MIB statistics and export them
* out of /proc (eventually)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* $Id: snmp.h,v 1.19 2001/06/14 13:40:46 davem Exp $
*
*/
#ifndef _SNMP_H
#define _SNMP_H
#include <linux/cache.h>
#include <linux/snmp.h>
/*
* Mibs are stored in array of unsigned long.
*/
/*
* struct snmp_mib{}
* - list of entries for particular API (such as /proc/net/snmp)
* - name of entries.
*/
struct snmp_mib {
char *name;
int entry;
};
#define SNMP_MIB_ITEM(_name,_entry) { \
.name = _name, \
.entry = _entry, \
}
#define SNMP_MIB_SENTINEL { \
.name = NULL, \
.entry = 0, \
}
/*
* We use all unsigned longs. Linux will soon be so reliable that even
* these will rapidly get too small 8-). Seriously consider the IpInReceives
* count on the 20Gb/s + networks people expect in a few years time!
*/
/*
* The rule for padding:
* Best is power of two because then the right structure can be found by a
* simple shift. The structure should be always cache line aligned.
* gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add
* instructions to emulate multiply in case it is not power-of-two.
* Currently n is always <=3 for all sizes so simple cache line alignment
* is enough.
*
* The best solution would be a global CPU local area , especially on 64
* and 128byte cacheline machine it makes a *lot* of sense -AK
*/
#define __SNMP_MIB_ALIGN__ ____cacheline_aligned
/* IPstats */
#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
struct ipstats_mib {
unsigned long mibs[IPSTATS_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* ICMP */
#define ICMP_MIB_DUMMY __ICMP_MIB_MAX
#define ICMP_MIB_MAX (__ICMP_MIB_MAX + 1)
struct icmp_mib {
unsigned long mibs[ICMP_MIB_MAX];
} __SNMP_MIB_ALIGN__;
#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX
struct icmpmsg_mib {
unsigned long mibs[ICMPMSG_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* ICMP6 (IPv6-ICMP) */
#define ICMP6_MIB_MAX __ICMP6_MIB_MAX
struct icmpv6_mib {
unsigned long mibs[ICMP6_MIB_MAX];
} __SNMP_MIB_ALIGN__;
#define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX
struct icmpv6msg_mib {
unsigned long mibs[ICMP6MSG_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* TCP */
#define TCP_MIB_MAX __TCP_MIB_MAX
struct tcp_mib {
unsigned long mibs[TCP_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* UDP */
#define UDP_MIB_MAX __UDP_MIB_MAX
struct udp_mib {
unsigned long mibs[UDP_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* Linux */
#define LINUX_MIB_MAX __LINUX_MIB_MAX
struct linux_mib {
unsigned long mibs[LINUX_MIB_MAX];
};
/*
* FIXME: On x86 and some other CPUs the split into user and softirq parts
* is not needed because addl $1,memory is atomic against interrupts (but
* atomic_inc would be overkill because of the lock cycles). Wants new
* nonlocked_atomic_inc() primitives -AK
*/
#define DEFINE_SNMP_STAT(type, name) \
__typeof__(type) *name[2]
#define DECLARE_SNMP_STAT(type, name) \
extern __typeof__(type) *name[2]
#define SNMP_STAT_BHPTR(name) (name[0])
#define SNMP_STAT_USRPTR(name) (name[1])
#define SNMP_INC_STATS_BH(mib, field) \
(per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
#define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \
(per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field + (offset)]++)
#define SNMP_INC_STATS_USER(mib, field) \
(per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field]++)
#define SNMP_INC_STATS(mib, field) \
(per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]++)
#define SNMP_DEC_STATS(mib, field) \
(per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]--)
#define SNMP_ADD_STATS_BH(mib, field, addend) \
(per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \
(per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field] += addend)
#endif