Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into wireless-next

This commit is contained in:
Wey-Yi Guy 2011-12-02 08:20:01 -08:00
commit baa7248f6a
112 changed files with 6397 additions and 2607 deletions

View file

@ -166,7 +166,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
if (to_platform_device(ah->dev)->id == 0 &&
(bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
(BD_WLAN1 | BD_WLAN0))
__set_bit(ATH_STAT_2G_DISABLED, ah->status);
ah->ah_capabilities.cap_needs_2GHz_ovr = true;
else
ah->ah_capabilities.cap_needs_2GHz_ovr = false;
}
ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);

View file

@ -27,15 +27,21 @@
* or reducing sensitivity as necessary.
*
* The parameters are:
*
* - "noise immunity"
*
* - "spur immunity"
*
* - "firstep level"
*
* - "OFDM weak signal detection"
*
* - "CCK weak signal detection"
*
* Basically we look at the amount of ODFM and CCK timing errors we get and then
* raise or lower immunity accordingly by setting one or more of these
* parameters.
*
* Newer chipsets have PHY error counters in hardware which will generate a MIB
* interrupt when they overflow. Older hardware has too enable PHY error frames
* by setting a RX flag and then count every single PHY error. When a specified
@ -45,11 +51,13 @@
*/
/*** ANI parameter control ***/
/***********************\
* ANI parameter control *
\***********************/
/**
* ath5k_ani_set_noise_immunity_level() - Set noise immunity level
*
* @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
*/
void
@ -91,12 +99,11 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_spur_immunity_level() - Set spur immunity level
*
* @ah: The &struct ath5k_hw
* @level: level between 0 and @max_spur_level (the maximum level is dependent
* on the chip revision).
* on the chip revision).
*/
void
ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
@ -117,10 +124,9 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_firstep_level() - Set "firstep" level
*
* @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
*/
void
@ -140,11 +146,9 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
* detection
*
* ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
* @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
@ -182,10 +186,9 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
on ? "on" : "off");
}
/**
* ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
*
* ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
* @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
@ -200,13 +203,16 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
}
/*** ANI algorithm ***/
/***************\
* ANI algorithm *
\***************/
/**
* ath5k_ani_raise_immunity() - Increase noise immunity
*
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
* @ofdm_trigger: If this is true we are called because of too many OFDM errors,
* the algorithm will tune more parameters then.
* the algorithm will tune more parameters then.
*
* Try to raise noise immunity (=decrease sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
@ -290,9 +296,10 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
*/
}
/**
* ath5k_ani_lower_immunity() - Decrease noise immunity
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
*
* Try to lower noise immunity (=increase sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
@ -352,9 +359,10 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
}
}
/**
* ath5k_hw_ani_get_listen_time() - Update counters and return listening time
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
*
* Return an approximation of the time spent "listening" in milliseconds (ms)
* since the last call of this function.
@ -379,9 +387,10 @@ ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
return listen;
}
/**
* ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
*
* Clear the PHY error counters as soon as possible, since this might be called
* from a MIB interrupt and we want to make sure we don't get interrupted again.
@ -429,14 +438,14 @@ ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
return 1;
}
/**
* ath5k_ani_period_restart() - Restart ANI period
* @as: The &struct ath5k_ani_state
*
* Just reset counters, so they are clear for the next "ani period".
*/
static void
ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
ath5k_ani_period_restart(struct ath5k_ani_state *as)
{
/* keep last values for debugging */
as->last_ofdm_errors = as->ofdm_errors;
@ -448,9 +457,9 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
as->listen_time = 0;
}
/**
* ath5k_ani_calibration() - The main ANI calibration function
* @ah: The &struct ath5k_hw
*
* We count OFDM and CCK errors relative to the time where we did not send or
* receive ("listen" time) and raise or lower immunity accordingly.
@ -492,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
/* too many PHY errors - we have to raise immunity */
bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
ath5k_ani_period_restart(ah, as);
ath5k_ani_period_restart(as);
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
/* If more than 5 (TODO: why 5?) periods have passed and we got
@ -504,15 +513,18 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
ath5k_ani_lower_immunity(ah, as);
ath5k_ani_period_restart(ah, as);
ath5k_ani_period_restart(as);
}
}
/*** INTERRUPT HANDLER ***/
/*******************\
* Interrupt handler *
\*******************/
/**
* ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
* @ah: The &struct ath5k_hw
*
* Just read & reset the registers quickly, so they don't generate more
* interrupts, save the counters and schedule the tasklet to decide whether
@ -549,9 +561,11 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
tasklet_schedule(&ah->ani_tasklet);
}
/**
* ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
* ath5k_ani_phy_error_report - Used by older HW to report PHY errors
*
* @ah: The &struct ath5k_hw
* @phyerr: One of enum ath5k_phy_error_code
*
* This is used by hardware without PHY error counters to report PHY errors
* on a frame-by-frame basis, instead of the interrupt.
@ -574,10 +588,13 @@ ath5k_ani_phy_error_report(struct ath5k_hw *ah,
}
/*** INIT ***/
/****************\
* Initialization *
\****************/
/**
* ath5k_enable_phy_err_counters() - Enable PHY error counters
* @ah: The &struct ath5k_hw
*
* Enable PHY error counters for OFDM and CCK timing errors.
*/
@ -596,9 +613,9 @@ ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
/**
* ath5k_disable_phy_err_counters() - Disable PHY error counters
* @ah: The &struct ath5k_hw
*
* Disable PHY error counters for OFDM and CCK timing errors.
*/
@ -615,10 +632,10 @@ ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
/**
* ath5k_ani_init() - Initialize ANI
* @mode: Which mode to use (auto, manual high, manual low, off)
* @ah: The &struct ath5k_hw
* @mode: One of enum ath5k_ani_mode
*
* Initialize ANI according to mode.
*/
@ -695,10 +712,18 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
}
/*** DEBUG ***/
/**************\
* Debug output *
\**************/
#ifdef CONFIG_ATH5K_DEBUG
/**
* ath5k_ani_print_counters() - Print ANI counters
* @ah: The &struct ath5k_hw
*
* Used for debugging ANI
*/
void
ath5k_ani_print_counters(struct ath5k_hw *ah)
{

View file

@ -40,13 +40,13 @@ enum ath5k_phy_error_code;
* enum ath5k_ani_mode - mode for ANI / noise sensitivity
*
* @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
* algorithm after it has been on auto mode.
* ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
* maximizing sensitivity. ANI will not run.
* ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
* minimizing sensitivity. ANI will not run.
* ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
* amount of OFDM and CCK frame errors (default).
* algorithm after it has been on auto mode.
* @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
* maximizing sensitivity. ANI will not run.
* @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
* minimizing sensitivity. ANI will not run.
* @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
* amount of OFDM and CCK frame errors (default).
*/
enum ath5k_ani_mode {
ATH5K_ANI_MODE_OFF = 0,
@ -58,8 +58,22 @@ enum ath5k_ani_mode {
/**
* struct ath5k_ani_state - ANI state and associated counters
*
* @max_spur_level: the maximum spur level is chip dependent
* @ani_mode: One of enum ath5k_ani_mode
* @noise_imm_level: Noise immunity level
* @spur_level: Spur immunity level
* @firstep_level: FIRstep level
* @ofdm_weak_sig: OFDM weak signal detection state (on/off)
* @cck_weak_sig: CCK weak signal detection state (on/off)
* @max_spur_level: Max spur immunity level (chip specific)
* @listen_time: Listen time
* @ofdm_errors: OFDM timing error count
* @cck_errors: CCK timing error count
* @last_cc: The &struct ath_cycle_counters (for stats)
* @last_listen: Listen time from previous run (for stats)
* @last_ofdm_errors: OFDM timing error count from previous run (for tats)
* @last_cck_errors: CCK timing error count from previous run (for stats)
* @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
* @sum_cck_errors: Sum of all CCK timing errors (for stats)
*/
struct ath5k_ani_state {
enum ath5k_ani_mode ani_mode;

View file

@ -187,10 +187,9 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 60000 /* 60 sec */
#define ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT 10000 /* 10 sec */
#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */
#define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@ -262,16 +261,34 @@
#define AR5K_AGC_SETTLING_TURBO 37
/* GENERIC CHIPSET DEFINITIONS */
/* MAC Chips */
/*****************************\
* GENERIC CHIPSET DEFINITIONS *
\*****************************/
/**
* enum ath5k_version - MAC Chips
* @AR5K_AR5210: AR5210 (Crete)
* @AR5K_AR5211: AR5211 (Oahu/Maui)
* @AR5K_AR5212: AR5212 (Venice) and newer
*/
enum ath5k_version {
AR5K_AR5210 = 0,
AR5K_AR5211 = 1,
AR5K_AR5212 = 2,
};
/* PHY Chips */
/**
* enum ath5k_radio - PHY Chips
* @AR5K_RF5110: RF5110 (Fez)
* @AR5K_RF5111: RF5111 (Sombrero)
* @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2)
* @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite)
* @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor)
* @AR5K_RF2316: RF2315/2316 (Cobra SoC)
* @AR5K_RF2317: RF2317 (Spider SoC)
* @AR5K_RF2425: RF2425/2417 (Swan/Nalla)
*/
enum ath5k_radio {
AR5K_RF5110 = 0,
AR5K_RF5111 = 1,
@ -303,11 +320,11 @@ enum ath5k_radio {
#define AR5K_SREV_AR5213A 0x59 /* Hainan */
#define AR5K_SREV_AR2413 0x78 /* Griffin lite */
#define AR5K_SREV_AR2414 0x70 /* Griffin */
#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
#define AR5K_SREV_AR5424 0x90 /* Condor */
#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
#define AR5K_SREV_AR5414 0xa0 /* Eagle */
#define AR5K_SREV_AR2415 0xb0 /* Talon */
@ -344,32 +361,40 @@ enum ath5k_radio {
/* TODO add support to mac80211 for vendor-specific rates and modes */
/*
/**
* DOC: Atheros XR
*
* Some of this information is based on Documentation from:
*
* http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
*
* Modulation for Atheros' eXtended Range - range enhancing extension that is
* supposed to double the distance an Atheros client device can keep a
* connection with an Atheros access point. This is achieved by increasing
* the receiver sensitivity up to, -105dBm, which is about 20dB above what
* the 802.11 specifications demand. In addition, new (proprietary) data rates
* are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
* Atheros' eXtended Range - range enhancing extension is a modulation scheme
* that is supposed to double the link distance between an Atheros XR-enabled
* client device with an Atheros XR-enabled access point. This is achieved
* by increasing the receiver sensitivity up to, -105dBm, which is about 20dB
* above what the 802.11 specifications demand. In addition, new (proprietary)
* data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
*
* Please note that can you either use XR or TURBO but you cannot use both,
* they are exclusive.
*
* Also note that we do not plan to support XR mode at least for now. You can
* get a mode similar to XR by using 5MHz bwmode.
*/
#define MODULATION_XR 0x00000200
/*
* Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a
* throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s
* signaling rate achieved through the bonding of two 54Mbit/s 802.11g
* channels. To use this feature your Access Point must also support it.
/**
* DOC: Atheros SuperAG
*
* In addition to XR we have another modulation scheme called TURBO mode
* that is supposed to provide a throughput transmission speed up to 40Mbit/s
* -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two
* 54Mbit/s 802.11g channels. To use this feature both ends must support it.
* There is also a distinction between "static" and "dynamic" turbo modes:
*
* - Static: is the dumb version: devices set to this mode stick to it until
* the mode is turned off.
*
* - Dynamic: is the intelligent version, the network decides itself if it
* is ok to use turbo. As soon as traffic is detected on adjacent channels
* (which would get used in turbo mode), or when a non-turbo station joins
@ -383,24 +408,39 @@ enum ath5k_radio {
*
* http://www.pcworld.com/article/id,113428-page,1/article.html
*
* The channel bonding seems to be driver specific though. In addition to
* deciding what channels will be used, these "Turbo" modes are accomplished
* by also enabling the following features:
* The channel bonding seems to be driver specific though.
*
* In addition to TURBO modes we also have the following features for even
* greater speed-up:
*
* - Bursting: allows multiple frames to be sent at once, rather than pausing
* after each frame. Bursting is a standards-compliant feature that can be
* used with any Access Point.
*
* - Fast frames: increases the amount of information that can be sent per
* frame, also resulting in a reduction of transmission overhead. It is a
* proprietary feature that needs to be supported by the Access Point.
*
* - Compression: data frames are compressed in real time using a Lempel Ziv
* algorithm. This is done transparently. Once this feature is enabled,
* compression and decompression takes place inside the chipset, without
* putting additional load on the host CPU.
*
* As with XR we also don't plan to support SuperAG features for now. You can
* get a mode similar to TURBO by using 40MHz bwmode.
*/
#define MODULATION_TURBO 0x00000080
/**
* enum ath5k_driver_mode - PHY operation mode
* @AR5K_MODE_11A: 802.11a
* @AR5K_MODE_11B: 802.11b
* @AR5K_MODE_11G: 801.11g
* @AR5K_MODE_MAX: Used for boundary checks
*
* Do not change the order here, we use these as
* array indices and it also maps EEPROM structures.
*/
enum ath5k_driver_mode {
AR5K_MODE_11A = 0,
AR5K_MODE_11B = 1,
@ -408,30 +448,64 @@ enum ath5k_driver_mode {
AR5K_MODE_MAX = 3
};
/**
* enum ath5k_ant_mode - Antenna operation mode
* @AR5K_ANTMODE_DEFAULT: Default antenna setup
* @AR5K_ANTMODE_FIXED_A: Only antenna A is present
* @AR5K_ANTMODE_FIXED_B: Only antenna B is present
* @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap
* @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc
* @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc
* @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx-
* @AR5K_ANTMODE_MAX: Used for boundary checks
*
* For more infos on antenna control check out phy.c
*/
enum ath5k_ant_mode {
AR5K_ANTMODE_DEFAULT = 0, /* default antenna setup */
AR5K_ANTMODE_FIXED_A = 1, /* only antenna A is present */
AR5K_ANTMODE_FIXED_B = 2, /* only antenna B is present */
AR5K_ANTMODE_SINGLE_AP = 3, /* sta locked on a single ap */
AR5K_ANTMODE_SECTOR_AP = 4, /* AP with tx antenna set on tx desc */
AR5K_ANTMODE_SECTOR_STA = 5, /* STA with tx antenna set on tx desc */
AR5K_ANTMODE_DEBUG = 6, /* Debug mode -A -> Rx, B-> Tx- */
AR5K_ANTMODE_DEFAULT = 0,
AR5K_ANTMODE_FIXED_A = 1,
AR5K_ANTMODE_FIXED_B = 2,
AR5K_ANTMODE_SINGLE_AP = 3,
AR5K_ANTMODE_SECTOR_AP = 4,
AR5K_ANTMODE_SECTOR_STA = 5,
AR5K_ANTMODE_DEBUG = 6,
AR5K_ANTMODE_MAX,
};
/**
* enum ath5k_bw_mode - Bandwidth operation mode
* @AR5K_BWMODE_DEFAULT: 20MHz, default operation
* @AR5K_BWMODE_5MHZ: Quarter rate
* @AR5K_BWMODE_10MHZ: Half rate
* @AR5K_BWMODE_40MHZ: Turbo
*/
enum ath5k_bw_mode {
AR5K_BWMODE_DEFAULT = 0, /* 20MHz, default operation */
AR5K_BWMODE_5MHZ = 1, /* Quarter rate */
AR5K_BWMODE_10MHZ = 2, /* Half rate */
AR5K_BWMODE_40MHZ = 3 /* Turbo */
AR5K_BWMODE_DEFAULT = 0,
AR5K_BWMODE_5MHZ = 1,
AR5K_BWMODE_10MHZ = 2,
AR5K_BWMODE_40MHZ = 3
};
/****************\
TX DEFINITIONS
\****************/
/*
* TX Status descriptor
/**
* struct ath5k_tx_status - TX Status descriptor
* @ts_seqnum: Sequence number
* @ts_tstamp: Timestamp
* @ts_status: Status code
* @ts_final_idx: Final transmission series index
* @ts_final_retry: Final retry count
* @ts_rssi: RSSI for received ACK
* @ts_shortretry: Short retry count
* @ts_virtcol: Virtual collision count
* @ts_antenna: Antenna used
*
* TX status descriptor gets filled by the hw
* on each transmission attempt.
*/
struct ath5k_tx_status {
u16 ts_seqnum;
@ -454,7 +528,6 @@ struct ath5k_tx_status {
* enum ath5k_tx_queue - Queue types used to classify tx queues.
* @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue
* @AR5K_TX_QUEUE_DATA: A normal data queue
* @AR5K_TX_QUEUE_XR_DATA: An XR-data queue
* @AR5K_TX_QUEUE_BEACON: The beacon queue
* @AR5K_TX_QUEUE_CAB: The after-beacon queue
* @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue
@ -462,7 +535,6 @@ struct ath5k_tx_status {
enum ath5k_tx_queue {
AR5K_TX_QUEUE_INACTIVE = 0,
AR5K_TX_QUEUE_DATA,
AR5K_TX_QUEUE_XR_DATA,
AR5K_TX_QUEUE_BEACON,
AR5K_TX_QUEUE_CAB,
AR5K_TX_QUEUE_UAPSD,
@ -471,36 +543,46 @@ enum ath5k_tx_queue {
#define AR5K_NUM_TX_QUEUES 10
#define AR5K_NUM_TX_QUEUES_NOQCU 2
/*
* Queue syb-types to classify normal data queues.
/**
* enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues
* @AR5K_WME_AC_BK: Background traffic
* @AR5K_WME_AC_BE: Best-effort (normal) traffic
* @AR5K_WME_AC_VI: Video traffic
* @AR5K_WME_AC_VO: Voice traffic
*
* These are the 4 Access Categories as defined in
* WME spec. 0 is the lowest priority and 4 is the
* highest. Normal data that hasn't been classified
* goes to the Best Effort AC.
*/
enum ath5k_tx_queue_subtype {
AR5K_WME_AC_BK = 0, /*Background traffic*/
AR5K_WME_AC_BE, /*Best-effort (normal) traffic*/
AR5K_WME_AC_VI, /*Video traffic*/
AR5K_WME_AC_VO, /*Voice traffic*/
AR5K_WME_AC_BK = 0,
AR5K_WME_AC_BE,
AR5K_WME_AC_VI,
AR5K_WME_AC_VO,
};
/*
* Queue ID numbers as returned by the hw functions, each number
* represents a hw queue. If hw does not support hw queues
* (eg 5210) all data goes in one queue. These match
* d80211 definitions (net80211/MadWiFi don't use them).
/**
* enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions
* @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available)
* @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available)
* @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index
* @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index
* @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue
* @AR5K_TX_QUEUE_ID_BEACON: Beacon queue
* @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery,
*
* Each number represents a hw queue. If hw does not support hw queues
* (eg 5210) all data goes in one queue.
*/
enum ath5k_tx_queue_id {
AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/
AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/
AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/
AR5K_TX_QUEUE_ID_UAPSD = 8,
AR5K_TX_QUEUE_ID_XR_DATA = 9,
AR5K_TX_QUEUE_ID_DATA_MIN = 0,
AR5K_TX_QUEUE_ID_DATA_MAX = 3,
AR5K_TX_QUEUE_ID_UAPSD = 7,
AR5K_TX_QUEUE_ID_CAB = 8,
AR5K_TX_QUEUE_ID_BEACON = 9,
};
/*
@ -521,46 +603,70 @@ enum ath5k_tx_queue_id {
#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */
#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
/*
* Data transmit queue state. One of these exists for each
* hardware transmit queue. Packets sent to us from above
* are assigned to queues based on their priority. Not all
* devices support a complete set of hardware transmit queues.
* For those devices the array sc_ac2q will map multiple
* priorities to fewer hardware queues (typically all to one
* hardware queue).
/**
* struct ath5k_txq - Transmit queue state
* @qnum: Hardware q number
* @link: Link ptr in last TX desc
* @q: Transmit queue (&struct list_head)
* @lock: Lock on q and link
* @setup: Is the queue configured
* @txq_len:Number of queued buffers
* @txq_max: Max allowed num of queued buffers
* @txq_poll_mark: Used to check if queue got stuck
* @txq_stuck: Queue stuck counter
*
* One of these exists for each hardware transmit queue.
* Packets sent to us from above are assigned to queues based
* on their priority. Not all devices support a complete set
* of hardware transmit queues. For those devices the array
* sc_ac2q will map multiple priorities to fewer hardware queues
* (typically all to one hardware queue).
*/
struct ath5k_txq {
unsigned int qnum; /* hardware q number */
u32 *link; /* link ptr in last TX desc */
struct list_head q; /* transmit queue */
spinlock_t lock; /* lock on q and link */
unsigned int qnum;
u32 *link;
struct list_head q;
spinlock_t lock;
bool setup;
int txq_len; /* number of queued buffers */
int txq_max; /* max allowed num of queued buffers */
int txq_len;
int txq_max;
bool txq_poll_mark;
unsigned int txq_stuck; /* informational counter */
unsigned int txq_stuck;
};
/*
* A struct to hold tx queue's parameters
/**
* struct ath5k_txq_info - A struct to hold TX queue's parameters
* @tqi_type: One of enum ath5k_tx_queue
* @tqi_subtype: One of enum ath5k_tx_queue_subtype
* @tqi_flags: TX queue flags (see above)
* @tqi_aifs: Arbitrated Inter-frame Space
* @tqi_cw_min: Minimum Contention Window
* @tqi_cw_max: Maximum Contention Window
* @tqi_cbr_period: Constant bit rate period
* @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled
*/
struct ath5k_txq_info {
enum ath5k_tx_queue tqi_type;
enum ath5k_tx_queue_subtype tqi_subtype;
u16 tqi_flags; /* Tx queue flags (see above) */
u8 tqi_aifs; /* Arbitrated Interframe Space */
u16 tqi_cw_min; /* Minimum Contention Window */
u16 tqi_cw_max; /* Maximum Contention Window */
u32 tqi_cbr_period; /* Constant bit rate period */
u16 tqi_flags;
u8 tqi_aifs;
u16 tqi_cw_min;
u16 tqi_cw_max;
u32 tqi_cbr_period;
u32 tqi_cbr_overflow_limit;
u32 tqi_burst_time;
u32 tqi_ready_time; /* Time queue waits after an event */
u32 tqi_ready_time;
};
/*
* Transmit packet types.
* used on tx control descriptor
/**
* enum ath5k_pkt_type - Transmit packet types
* @AR5K_PKT_TYPE_NORMAL: Normal data
* @AR5K_PKT_TYPE_ATIM: ATIM
* @AR5K_PKT_TYPE_PSPOLL: PS-Poll
* @AR5K_PKT_TYPE_BEACON: Beacon
* @AR5K_PKT_TYPE_PROBE_RESP: Probe response
* @AR5K_PKT_TYPE_PIFS: PIFS
* Used on tx control descriptor
*/
enum ath5k_pkt_type {
AR5K_PKT_TYPE_NORMAL = 0,
@ -583,27 +689,23 @@ enum ath5k_pkt_type {
(ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \
)
/*
* DMA size definitions (2^(n+2))
*/
enum ath5k_dmasize {
AR5K_DMASIZE_4B = 0,
AR5K_DMASIZE_8B,
AR5K_DMASIZE_16B,
AR5K_DMASIZE_32B,
AR5K_DMASIZE_64B,
AR5K_DMASIZE_128B,
AR5K_DMASIZE_256B,
AR5K_DMASIZE_512B
};
/****************\
RX DEFINITIONS
\****************/
/*
* RX Status descriptor
/**
* struct ath5k_rx_status - RX Status descriptor
* @rs_datalen: Data length
* @rs_tstamp: Timestamp
* @rs_status: Status code
* @rs_phyerr: PHY error mask
* @rs_rssi: RSSI in 0.5dbm units
* @rs_keyix: Index to the key used for decrypting
* @rs_rate: Rate used to decode the frame
* @rs_antenna: Antenna used to receive the frame
* @rs_more: Indicates this is a frame fragment (Fast frames)
*/
struct ath5k_rx_status {
u16 rs_datalen;
@ -645,10 +747,18 @@ struct ath5k_rx_status {
#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
/*******************************\
GAIN OPTIMIZATION DEFINITIONS
\*******************************/
/**
* enum ath5k_rfgain - RF Gain optimization engine state
* @AR5K_RFGAIN_INACTIVE: Engine disabled
* @AR5K_RFGAIN_ACTIVE: Probe active
* @AR5K_RFGAIN_READ_REQUESTED: Probe requested
* @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change
*/
enum ath5k_rfgain {
AR5K_RFGAIN_INACTIVE = 0,
AR5K_RFGAIN_ACTIVE,
@ -656,6 +766,16 @@ enum ath5k_rfgain {
AR5K_RFGAIN_NEED_CHANGE,
};
/**
* struct ath5k_gain - RF Gain optimization engine state data
* @g_step_idx: Current step index
* @g_current: Current gain
* @g_target: Target gain
* @g_low: Low gain boundary
* @g_high: High gain boundary
* @g_f_corr: Gain_F correction
* @g_state: One of enum ath5k_rfgain
*/
struct ath5k_gain {
u8 g_step_idx;
u8 g_current;
@ -666,6 +786,8 @@ struct ath5k_gain {
u8 g_state;
};
/********************\
COMMON DEFINITIONS
\********************/
@ -674,9 +796,14 @@ struct ath5k_gain {
#define AR5K_SLOT_TIME_20 880
#define AR5K_SLOT_TIME_MAX 0xffff
/*
* The following structure is used to map 2GHz channels to
* 5GHz Atheros channels.
/**
* struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111
* @a2_flags: Channel flags (internal)
* @a2_athchan: HW channel number (internal)
*
* This structure is used to map 2GHz channels to
* 5GHz Atheros channels on 2111 frequency converter
* that comes together with RF5111
* TODO: Clean up
*/
struct ath5k_athchan_2ghz {
@ -684,36 +811,80 @@ struct ath5k_athchan_2ghz {
u16 a2_athchan;
};
/**
* enum ath5k_dmasize - DMA size definitions (2^(n+2))
* @AR5K_DMASIZE_4B: 4Bytes
* @AR5K_DMASIZE_8B: 8Bytes
* @AR5K_DMASIZE_16B: 16Bytes
* @AR5K_DMASIZE_32B: 32Bytes
* @AR5K_DMASIZE_64B: 64Bytes (Default)
* @AR5K_DMASIZE_128B: 128Bytes
* @AR5K_DMASIZE_256B: 256Bytes
* @AR5K_DMASIZE_512B: 512Bytes
*
* These are used to set DMA burst size on hw
*
* Note: Some platforms can't handle more than 4Bytes
* be careful on embedded boards.
*/
enum ath5k_dmasize {
AR5K_DMASIZE_4B = 0,
AR5K_DMASIZE_8B,
AR5K_DMASIZE_16B,
AR5K_DMASIZE_32B,
AR5K_DMASIZE_64B,
AR5K_DMASIZE_128B,
AR5K_DMASIZE_256B,
AR5K_DMASIZE_512B
};
/******************\
RATE DEFINITIONS
\******************/
/**
* DOC: Rate codes
*
* Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
*
* The rate code is used to get the RX rate or set the TX rate on the
* hardware descriptors. It is also used for internal modulation control
* and settings.
*
* This is the hardware rate map we are aware of:
* This is the hardware rate map we are aware of (html unfriendly):
*
* rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08
* rate_kbps 3000 1000 ? ? ? 2000 500 48000
* Rate code Rate (Kbps)
* --------- -----------
* 0x01 3000 (XR)
* 0x02 1000 (XR)
* 0x03 250 (XR)
* 0x04 - 05 -Reserved-
* 0x06 2000 (XR)
* 0x07 500 (XR)
* 0x08 48000 (OFDM)
* 0x09 24000 (OFDM)
* 0x0A 12000 (OFDM)
* 0x0B 6000 (OFDM)
* 0x0C 54000 (OFDM)
* 0x0D 36000 (OFDM)
* 0x0E 18000 (OFDM)
* 0x0F 9000 (OFDM)
* 0x10 - 17 -Reserved-
* 0x18 11000L (CCK)
* 0x19 5500L (CCK)
* 0x1A 2000L (CCK)
* 0x1B 1000L (CCK)
* 0x1C 11000S (CCK)
* 0x1D 5500S (CCK)
* 0x1E 2000S (CCK)
* 0x1F -Reserved-
*
* rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10
* rate_kbps 24000 12000 6000 54000 36000 18000 9000 ?
*
* rate_code 17 18 19 20 21 22 23 24
* rate_kbps ? ? ? ? ? ? ? 11000
*
* rate_code 25 26 27 28 29 30 31 32
* rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ?
*
* "S" indicates CCK rates with short preamble.
* "S" indicates CCK rates with short preamble and "L" with long preamble.
*
* AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
* lowest 4 bits, so they are the same as below with a 0xF mask.
* lowest 4 bits, so they are the same as above with a 0xF mask.
* (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
* We handle this in ath5k_setup_bands().
*/
@ -733,13 +904,9 @@ struct ath5k_athchan_2ghz {
#define ATH5K_RATE_CODE_36M 0x0D
#define ATH5K_RATE_CODE_48M 0x08
#define ATH5K_RATE_CODE_54M 0x0C
/* XR */
#define ATH5K_RATE_CODE_XR_500K 0x07
#define ATH5K_RATE_CODE_XR_1M 0x02
#define ATH5K_RATE_CODE_XR_2M 0x06
#define ATH5K_RATE_CODE_XR_3M 0x01
/* adding this flag to rate_code enables short preamble */
/* Adding this flag to rate_code on B rates
* enables short preamble */
#define AR5K_SET_SHORT_PREAMBLE 0x04
/*
@ -769,49 +936,65 @@ extern int ath5k_modparam_nohwcrypt;
/**
* enum ath5k_int - Hardware interrupt masks helpers
* @AR5K_INT_RXOK: Frame successfully received
* @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor
* @AR5K_INT_RXERR: Frame reception failed
* @AR5K_INT_RXNOFRM: No frame received within a specified time period
* @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors
* @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is
* not always fatal, on some chips we can continue operation
* without resetting the card, that's why %AR5K_INT_FATAL is not
* common for all chips.
* @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts
*
* @AR5K_INT_TXOK: Frame transmission success
* @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor
* @AR5K_INT_TXERR: Frame transmission failure
* @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The
* Queue Control Unit (QCU) signals an EOL interrupt only if a
* descriptor's LinkPtr is NULL. For more details, refer to:
* "http://www.freepatentsonline.com/20030225739.html"
* @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period
* @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should
* increase the TX trigger threshold.
* @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts
*
* @AR5K_INT_RX: mask to identify received frame interrupts, of type
* AR5K_ISR_RXOK or AR5K_ISR_RXERR
* @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?)
* @AR5K_INT_RXNOFRM: No frame received (?)
* @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The
* Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
* LinkPtr is NULL. For more details, refer to:
* http://www.freepatentsonline.com/20030225739.html
* @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors).
* Note that Rx overrun is not always fatal, on some chips we can continue
* operation without resetting the card, that's why int_fatal is not
* common for all chips.
* @AR5K_INT_TX: mask to identify received frame interrupts, of type
* AR5K_ISR_TXOK or AR5K_ISR_TXERR
* @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?)
* @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
* We currently do increments on interrupt by
* (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
* @AR5K_INT_MIB: Indicates the either Management Information Base counters or
* one of the PHY error counters reached the maximum value and should be
* read and cleared.
* one of the PHY error counters reached the maximum value and
* should be read and cleared.
* @AR5K_INT_SWI: Software triggered interrupt.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
* beacon that must be handled in software. The alternative is if you
* have VEOL support, in that case you let the hardware deal with things.
* beacon that must be handled in software. The alternative is if
* you have VEOL support, in that case you let the hardware deal
* with things.
* @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold
* @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
* beacons from the AP have associated with, we should probably try to
* reassociate. When in IBSS mode this might mean we have not received
* any beacons from any local stations. Note that every station in an
* IBSS schedules to send beacons at the Target Beacon Transmission Time
* (TBTT) with a random backoff.
* @AR5K_INT_BNR: Beacon Not Ready interrupt - ??
* @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now
* until properly handled
* @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA
* errors. These types of errors we can enable seem to be of type
* AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
* beacons from the AP have associated with, we should probably
* try to reassociate. When in IBSS mode this might mean we have
* not received any beacons from any local stations. Note that
* every station in an IBSS schedules to send beacons at the
* Target Beacon Transmission Time (TBTT) with a random backoff.
* @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty.
* @AR5K_INT_TIM: Beacon with local station's TIM bit set
* @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received
* @AR5K_INT_DTIM_SYNC: DTIM sync lost
* @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to
* our GPIO pins.
* @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting
* @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got
* nothing or an incomplete CAB frame sequence.
* @AR5K_INT_QCBRORN: A queue got it's CBR counter expired
* @AR5K_INT_QCBRURN: A queue got triggered wile empty
* @AR5K_INT_QTRIG: A queue got triggered
*
* @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA
* errors. Indicates we need to reset the card.
* @AR5K_INT_GLOBAL: Used to clear and set the IER
* @AR5K_INT_NOCARD: signals the card has been removed
* @AR5K_INT_COMMON: common interrupts shared among MACs with the same
* bit value
* @AR5K_INT_NOCARD: Signals the card has been removed
* @AR5K_INT_COMMON: Common interrupts shared among MACs with the same
* bit value
*
* These are mapped to take advantage of some common bits
* between the MACs, to be able to set intr properties
@ -847,15 +1030,15 @@ enum ath5k_int {
AR5K_INT_GPIO = 0x01000000,
AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */
AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */
AR5K_INT_RX_DOPPLER = 0x08000000, /* Non common */
AR5K_INT_QCBRORN = 0x10000000, /* Non common */
AR5K_INT_QCBRURN = 0x20000000, /* Non common */
AR5K_INT_QTRIG = 0x40000000, /* Non common */
AR5K_INT_QCBRORN = 0x08000000, /* Non common */
AR5K_INT_QCBRURN = 0x10000000, /* Non common */
AR5K_INT_QTRIG = 0x20000000, /* Non common */
AR5K_INT_GLOBAL = 0x80000000,
AR5K_INT_TX_ALL = AR5K_INT_TXOK
| AR5K_INT_TXDESC
| AR5K_INT_TXERR
| AR5K_INT_TXNOFRM
| AR5K_INT_TXEOL
| AR5K_INT_TXURN,
@ -891,15 +1074,32 @@ enum ath5k_int {
AR5K_INT_NOCARD = 0xffffffff
};
/* mask which calibration is active at the moment */
/**
* enum ath5k_calibration_mask - Mask which calibration is active at the moment
* @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT)
* @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q)
* @AR5K_CALIBRATION_NF: Noise Floor calibration
* @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity
*/
enum ath5k_calibration_mask {
AR5K_CALIBRATION_FULL = 0x01,
AR5K_CALIBRATION_SHORT = 0x02,
AR5K_CALIBRATION_ANI = 0x04,
AR5K_CALIBRATION_NF = 0x04,
AR5K_CALIBRATION_ANI = 0x08,
};
/*
* Power management
/**
* enum ath5k_power_mode - Power management modes
* @AR5K_PM_UNDEFINED: Undefined
* @AR5K_PM_AUTO: Allow card to sleep if possible
* @AR5K_PM_AWAKE: Force card to wake up
* @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS)
* @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration
*
* Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO
* are also known to have problems on some cards. This is not a big
* problem though because we can have almost the same effect as
* FULL_SLEEP by putting card on warm reset (it's almost powered down).
*/
enum ath5k_power_mode {
AR5K_PM_UNDEFINED = 0,
@ -957,6 +1157,8 @@ struct ath5k_capabilities {
} cap_queues;
bool cap_has_phyerr_counters;
bool cap_has_mrr_support;
bool cap_needs_2GHz_ovr;
};
/* size of noise floor history (keep it a power of two) */
@ -1072,13 +1274,11 @@ struct ath5k_hw {
dma_addr_t desc_daddr; /* DMA (physical) address */
size_t desc_len; /* size of TX/RX descriptors */
DECLARE_BITMAP(status, 6);
DECLARE_BITMAP(status, 4);
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
#define ATH_STAT_PROMISC 2
#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
#define ATH_STAT_PROMISC 1
#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
struct ieee80211_channel *curchan; /* current h/w channel */
@ -1097,6 +1297,7 @@ struct ath5k_hw {
led_on; /* pin setting for LED on */
struct work_struct reset_work; /* deferred chip reset */
struct work_struct calib_work; /* deferred phy calibration */
struct list_head rxbuf; /* receive buffer */
spinlock_t rxbuflock;
@ -1113,8 +1314,6 @@ struct ath5k_hw {
struct ath5k_rfkill rf_kill;
struct tasklet_struct calib; /* calibration tasklet */
spinlock_t block; /* protects beacon */
struct tasklet_struct beacontq; /* beacon intr tasklet */
struct list_head bcbuf; /* beacon buffer */
@ -1144,7 +1343,7 @@ struct ath5k_hw {
enum ath5k_int ah_imr;
struct ieee80211_channel *ah_current_channel;
bool ah_calibration;
bool ah_iq_cal_needed;
bool ah_single_chip;
enum ath5k_version ah_version;
@ -1187,7 +1386,13 @@ struct ath5k_hw {
u32 ah_txq_imr_cbrurn;
u32 ah_txq_imr_qtrig;
u32 ah_txq_imr_nofrm;
u32 ah_txq_isr;
u32 ah_txq_isr_txok_all;
u32 ah_txq_isr_txurn;
u32 ah_txq_isr_qcborn;
u32 ah_txq_isr_qcburn;
u32 ah_txq_isr_qtrig;
u32 *ah_rf_banks;
size_t ah_rf_banks_size;
size_t ah_rf_regs_count;
@ -1228,8 +1433,8 @@ struct ath5k_hw {
/* Calibration timestamp */
unsigned long ah_cal_next_full;
unsigned long ah_cal_next_short;
unsigned long ah_cal_next_ani;
unsigned long ah_cal_next_nf;
/* Calibration mask */
u8 ah_cal_mask;
@ -1338,11 +1543,11 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon,
u32 interval);
bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
/* Init function */
void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
u8 mode);
void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode);
/* Queue Control Unit, DFS Control Unit Functions */
int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,

View file

@ -27,8 +27,7 @@
#include "debug.h"
/**
* ath5k_hw_post - Power On Self Test helper function
*
* ath5k_hw_post() - Power On Self Test helper function
* @ah: The &struct ath5k_hw
*/
static int ath5k_hw_post(struct ath5k_hw *ah)
@ -92,8 +91,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
}
/**
* ath5k_hw_init - Check if hw is supported and init the needed structs
*
* ath5k_hw_init() - Check if hw is supported and init the needed structs
* @ah: The &struct ath5k_hw associated with the device
*
* Check if the device is supported, perform a POST and initialize the needed
@ -298,7 +296,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
/* Reset SERDES to load new settings */
ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
mdelay(1);
usleep_range(1000, 1500);
}
/* Get misc capabilities */
@ -308,11 +306,6 @@ int ath5k_hw_init(struct ath5k_hw *ah)
goto err;
}
if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
__clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
__clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
}
/* Crypto settings */
common->keymax = (ah->ah_version == AR5K_AR5210 ?
AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
@ -349,8 +342,7 @@ err:
}
/**
* ath5k_hw_deinit - Free the ath5k_hw struct
*
* ath5k_hw_deinit() - Free the &struct ath5k_hw
* @ah: The &struct ath5k_hw
*/
void ath5k_hw_deinit(struct ath5k_hw *ah)

View file

@ -80,6 +80,11 @@ static int modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
static int ath5k_modparam_no_hw_rfkill_switch;
module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
bool, S_IRUGO);
MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
@ -183,7 +188,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
{ .bitrate = 540,
.hw_value = ATH5K_RATE_CODE_54M,
.flags = 0 },
/* XR missing */
};
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@ -721,22 +725,25 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
if (ret)
goto err_unmap;
memset(mrr_rate, 0, sizeof(mrr_rate));
memset(mrr_tries, 0, sizeof(mrr_tries));
for (i = 0; i < 3; i++) {
rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
if (!rate)
break;
/* Set up MRR descriptor */
if (ah->ah_capabilities.cap_has_mrr_support) {
memset(mrr_rate, 0, sizeof(mrr_rate));
memset(mrr_tries, 0, sizeof(mrr_tries));
for (i = 0; i < 3; i++) {
rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
if (!rate)
break;
mrr_rate[i] = rate->hw_value;
mrr_tries[i] = info->control.rates[i + 1].count;
mrr_rate[i] = rate->hw_value;
mrr_tries[i] = info->control.rates[i + 1].count;
}
ath5k_hw_setup_mrr_tx_desc(ah, ds,
mrr_rate[0], mrr_tries[0],
mrr_rate[1], mrr_tries[1],
mrr_rate[2], mrr_tries[2]);
}
ath5k_hw_setup_mrr_tx_desc(ah, ds,
mrr_rate[0], mrr_tries[0],
mrr_rate[1], mrr_tries[1],
mrr_rate[2], mrr_tries[2]);
ds->ds_link = 0;
ds->ds_data = bf->skbaddr;
@ -1689,7 +1696,7 @@ ath5k_tasklet_tx(unsigned long data)
struct ath5k_hw *ah = (void *)data;
for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
ath5k_tx_processq(ah, &ah->txqs[i]);
ah->tx_pending = false;
@ -2005,7 +2012,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
ah->nexttbtt = nexttbtt;
intval |= AR5K_BEACON_ENA;
ath5k_hw_init_beacon(ah, nexttbtt, intval);
ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
/*
* debugging output last in order to preserve the time critical aspect
@ -2112,16 +2119,29 @@ static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
/* run ANI only when full calibration is not active */
!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
!(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
/* Run ANI only when calibration is not active */
ah->ah_cal_next_ani = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
tasklet_schedule(&ah->ani_tasklet);
} else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
ah->ah_cal_next_full = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
tasklet_schedule(&ah->calib);
} else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
!(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
/* Run calibration only when another calibration
* is not running.
*
* Note: This is for both full/short calibration,
* if it's time for a full one, ath5k_calibrate_work will deal
* with it. */
ah->ah_cal_next_short = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
ieee80211_queue_work(ah->hw, &ah->calib_work);
}
/* we could use SWI to generate enough interrupts to meet our
* calibration interval requirements, if necessary:
@ -2149,69 +2169,110 @@ ath5k_intr(int irq, void *dev_id)
enum ath5k_int status;
unsigned int counter = 1000;
/*
* If hw is not ready (or detached) and we get an
* interrupt, or if we have no interrupts pending
* (that means it's not for us) skip it.
*
* NOTE: Group 0/1 PCI interface registers are not
* supported on WiSOCs, so we can't check for pending
* interrupts (ISR belongs to another register group
* so we are ok).
*/
if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
((ath5k_get_bus_type(ah) != ATH_AHB) &&
!ath5k_hw_is_intr_pending(ah))))
((ath5k_get_bus_type(ah) != ATH_AHB) &&
!ath5k_hw_is_intr_pending(ah))))
return IRQ_NONE;
/** Main loop **/
do {
ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
status, ah->imask);
/*
* Fatal hw error -> Log and reset
*
* Fatal errors are unrecoverable so we have to
* reset the card. These errors include bus and
* dma errors.
*/
if (unlikely(status & AR5K_INT_FATAL)) {
/*
* Fatal errors are unrecoverable.
* Typically these are caused by DMA errors.
*/
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fatal int, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
/*
* RX Overrun -> Count and reset if needed
*
* Receive buffers are full. Either the bus is busy or
* the CPU is not fast enough to process all received
* frames.
*/
} else if (unlikely(status & AR5K_INT_RXORN)) {
/*
* Receive buffers are full. Either the bus is busy or
* the CPU is not fast enough to process all received
* frames.
* Older chipsets need a reset to come out of this
* condition, but we treat it as RX for newer chips.
* We don't know exactly which versions need a reset -
* We don't know exactly which versions need a reset
* this guess is copied from the HAL.
*/
ah->stats.rxorn_intr++;
if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"rx overrun, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
} else
ath5k_schedule_rx(ah);
} else {
/* Software Beacon Alert -> Schedule beacon tasklet */
if (status & AR5K_INT_SWBA)
tasklet_hi_schedule(&ah->beacontq);
if (status & AR5K_INT_RXEOL) {
/*
* NB: the hardware should re-read the link when
* RXE bit is written, but it doesn't work at
* least on older hardware revs.
*/
/*
* No more RX descriptors -> Just count
*
* NB: the hardware should re-read the link when
* RXE bit is written, but it doesn't work at
* least on older hardware revs.
*/
if (status & AR5K_INT_RXEOL)
ah->stats.rxeol_intr++;
}
if (status & AR5K_INT_TXURN) {
/* bump tx trigger level */
/* TX Underrun -> Bump tx trigger level */
if (status & AR5K_INT_TXURN)
ath5k_hw_update_tx_triglevel(ah, true);
}
/* RX -> Schedule rx tasklet */
if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
ath5k_schedule_rx(ah);
if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
| AR5K_INT_TXERR | AR5K_INT_TXEOL))
/* TX -> Schedule tx tasklet */
if (status & (AR5K_INT_TXOK
| AR5K_INT_TXDESC
| AR5K_INT_TXERR
| AR5K_INT_TXEOL))
ath5k_schedule_tx(ah);
if (status & AR5K_INT_BMISS) {
/* TODO */
}
/* Missed beacon -> TODO
if (status & AR5K_INT_BMISS)
*/
/* MIB event -> Update counters and notify ANI */
if (status & AR5K_INT_MIB) {
ah->stats.mib_intr++;
ath5k_hw_update_mib_counters(ah);
ath5k_ani_mib_intr(ah);
}
/* GPIO -> Notify RFKill layer */
if (status & AR5K_INT_GPIO)
tasklet_schedule(&ah->rf_kill.toggleq);
@ -2222,12 +2283,19 @@ ath5k_intr(int irq, void *dev_id)
} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
/*
* Until we handle rx/tx interrupts mask them on IMR
*
* NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
* and unset after we 've handled the interrupts.
*/
if (ah->rx_pending || ah->tx_pending)
ath5k_set_current_imask(ah);
if (unlikely(!counter))
ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
/* Fire up calibration poll */
ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
@ -2238,41 +2306,58 @@ ath5k_intr(int irq, void *dev_id)
* for temperature/environment changes.
*/
static void
ath5k_tasklet_calibrate(unsigned long data)
ath5k_calibrate_work(struct work_struct *work)
{
struct ath5k_hw *ah = (void *)data;
struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
calib_work);
/* Should we run a full calibration ? */
if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
ah->ah_cal_next_full = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"running full calibration\n");
if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
/*
* Rfgain is out of bounds, reset the chip
* to load new gain values.
*/
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"got new rfgain, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
}
/* TODO: On full calibration we should stop TX here,
* so that it doesn't interfere (mostly due to gain_f
* calibration that messes with tx packets -see phy.c).
*
* NOTE: Stopping the queues from above is not enough
* to stop TX but saves us from disconecting (at least
* we don't lose packets). */
ieee80211_stop_queues(ah->hw);
} else
ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
/* Only full calibration for now */
ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
ieee80211_frequency_to_channel(ah->curchan->center_freq),
ah->curchan->hw_value);
if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
/*
* Rfgain is out of bounds, reset the chip
* to load new gain values.
*/
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
}
if (ath5k_hw_phy_calibrate(ah, ah->curchan))
ATH5K_ERR(ah, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
ah->curchan->center_freq));
/* Noise floor calibration interrupts rx/tx path while I/Q calibration
* doesn't.
* TODO: We should stop TX here, so that it doesn't interfere.
* Note that stopping the queues is not enough to stop TX! */
if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
ah->ah_cal_next_nf = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
ath5k_hw_update_noise_floor(ah);
}
ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
/* Clear calibration flags */
if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
ieee80211_wake_queues(ah->hw);
ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
} else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
}
@ -2407,8 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
if (ret)
goto err_irq;
/* set up multi-rate retry capabilities */
if (ah->ah_version == AR5K_AR5212) {
/* Set up multi-rate retry capabilities */
if (ah->ah_capabilities.cap_has_mrr_support) {
hw->max_rates = 4;
hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
AR5K_INIT_RETRY_LONG);
@ -2544,15 +2629,22 @@ int ath5k_start(struct ieee80211_hw *hw)
* and then setup of the interrupt mask.
*/
ah->curchan = ah->hw->conf.channel;
ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
ah->imask = AR5K_INT_RXOK
| AR5K_INT_RXERR
| AR5K_INT_RXEOL
| AR5K_INT_RXORN
| AR5K_INT_TXDESC
| AR5K_INT_TXEOL
| AR5K_INT_FATAL
| AR5K_INT_GLOBAL
| AR5K_INT_MIB;
ret = ath5k_reset(ah, NULL, false);
if (ret)
goto done;
ath5k_rfkill_hw_start(ah);
if (!ath5k_modparam_no_hw_rfkill_switch)
ath5k_rfkill_hw_start(ah);
/*
* Reset the key cache since some parts do not reset the
@ -2585,7 +2677,6 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
ah->tx_pending = false;
tasklet_kill(&ah->rxtq);
tasklet_kill(&ah->txtq);
tasklet_kill(&ah->calib);
tasklet_kill(&ah->beacontq);
tasklet_kill(&ah->ani_tasklet);
}
@ -2637,7 +2728,8 @@ void ath5k_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&ah->tx_complete_work);
ath5k_rfkill_hw_stop(ah);
if (!ath5k_modparam_no_hw_rfkill_switch)
ath5k_rfkill_hw_stop(ah);
}
/*
@ -2689,9 +2781,24 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
ath5k_ani_init(ah, ani_mode);
ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
ah->ah_cal_next_ani = jiffies;
ah->ah_cal_next_nf = jiffies;
/*
* Set calibration intervals
*
* Note: We don't need to run calibration imediately
* since some initial calibration is done on reset
* even for fast channel switching. Also on scanning
* this will get set again and again and it won't get
* executed unless we connect somewhere and spend some
* time on the channel (that's what calibration needs
* anyway to be accurate).
*/
ah->ah_cal_next_full = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
ah->ah_cal_next_ani = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
ah->ah_cal_next_short = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
/* clear survey data and cycle counters */
@ -2744,20 +2851,6 @@ ath5k_init(struct ieee80211_hw *hw)
int ret;
/*
* Check if the MAC has multi-rate retry support.
* We do this by trying to setup a fake extended
* descriptor. MACs that don't have support will
* return false w/o doing anything. MACs that do
* support it will return true w/o doing anything.
*/
ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
if (ret < 0)
goto err;
if (ret > 0)
__set_bit(ATH_STAT_MRRETRY, ah->status);
/*
* Collect the channel list. The 802.11 layer
* is responsible for filtering this list based
@ -2841,11 +2934,11 @@ ath5k_init(struct ieee80211_hw *hw)
tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
INIT_WORK(&ah->reset_work, ath5k_reset_work);
INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);

View file

@ -85,12 +85,19 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
caps->cap_range.range_2ghz_min = 2412;
caps->cap_range.range_2ghz_max = 2732;
if (AR5K_EEPROM_HDR_11B(ee_header))
__set_bit(AR5K_MODE_11B, caps->cap_mode);
/* Override 2GHz modes on SoCs that need it
* NOTE: cap_needs_2GHz_ovr gets set from
* ath_ahb_probe */
if (!caps->cap_needs_2GHz_ovr) {
if (AR5K_EEPROM_HDR_11B(ee_header))
__set_bit(AR5K_MODE_11B,
caps->cap_mode);
if (AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)
__set_bit(AR5K_MODE_11G, caps->cap_mode);
if (AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)
__set_bit(AR5K_MODE_11G,
caps->cap_mode);
}
}
}
@ -103,12 +110,18 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
else
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
/* newer hardware has PHY error counters */
/* Newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
caps->cap_has_phyerr_counters = true;
else
caps->cap_has_phyerr_counters = false;
/* MACs since AR5212 have MRR support */
if (ah->ah_version == AR5K_AR5212)
caps->cap_has_mrr_support = true;
else
caps->cap_has_mrr_support = false;
return 0;
}

View file

@ -26,20 +26,61 @@
#include "debug.h"
/**
* DOC: Hardware descriptor functions
*
* Here we handle the processing of the low-level hw descriptors
* that hw reads and writes via DMA for each TX and RX attempt (that means
* we can also have descriptors for failed TX/RX tries). We have two kind of
* descriptors for RX and TX, control descriptors tell the hw how to send or
* receive a packet where to read/write it from/to etc and status descriptors
* that contain information about how the packet was sent or received (errors
* included).
*
* Descriptor format is not exactly the same for each MAC chip version so we
* have function pointers on &struct ath5k_hw we initialize at runtime based on
* the chip used.
*/
/************************\
* TX Control descriptors *
\************************/
/*
* Initialize the 2-word tx control descriptor on 5210/5211
/**
* ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @pkt_len: Frame length in bytes
* @hdr_len: Header length in bytes (only used on AR5210)
* @padsize: Any padding we've added to the frame length
* @type: One of enum ath5k_pkt_type
* @tx_power: Tx power in 0.5dB steps
* @tx_rate0: HW idx for transmission rate
* @tx_tries0: Max number of retransmissions
* @key_index: Index on key table to use for encryption
* @antenna_mode: Which antenna to use (0 for auto)
* @flags: One of AR5K_TXDESC_* flags (desc.h)
* @rtscts_rate: HW idx for RTS/CTS transmission rate
* @rtscts_duration: What to put on duration field on the header of RTS/CTS
*
* Internal function to initialize a 2-Word TX control descriptor
* found on AR5210 and AR5211 MACs chips.
*
* Returns 0 on success or -EINVAL on false input
*/
static int
ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
unsigned int pkt_len, unsigned int hdr_len, int padsize,
enum ath5k_pkt_type type,
unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
unsigned int pkt_len, unsigned int hdr_len,
int padsize,
enum ath5k_pkt_type type,
unsigned int tx_power,
unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index,
unsigned int antenna_mode,
unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
{
u32 frame_type;
struct ath5k_hw_2w_tx_ctl *tx_ctl;
@ -172,17 +213,40 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
/*
* Initialize the 4-word tx control descriptor on 5212
/**
* ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @pkt_len: Frame length in bytes
* @hdr_len: Header length in bytes (only used on AR5210)
* @padsize: Any padding we've added to the frame length
* @type: One of enum ath5k_pkt_type
* @tx_power: Tx power in 0.5dB steps
* @tx_rate0: HW idx for transmission rate
* @tx_tries0: Max number of retransmissions
* @key_index: Index on key table to use for encryption
* @antenna_mode: Which antenna to use (0 for auto)
* @flags: One of AR5K_TXDESC_* flags (desc.h)
* @rtscts_rate: HW idx for RTS/CTS transmission rate
* @rtscts_duration: What to put on duration field on the header of RTS/CTS
*
* Internal function to initialize a 4-Word TX control descriptor
* found on AR5212 and later MACs chips.
*
* Returns 0 on success or -EINVAL on false input
*/
static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
int padsize,
enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
unsigned int tx_tries0, unsigned int key_index,
unsigned int antenna_mode, unsigned int flags,
unsigned int rtscts_rate,
unsigned int rtscts_duration)
static int
ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
unsigned int pkt_len, unsigned int hdr_len,
int padsize,
enum ath5k_pkt_type type,
unsigned int tx_power,
unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index,
unsigned int antenna_mode,
unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
unsigned int frame_len;
@ -292,13 +356,29 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
return 0;
}
/*
* Initialize a 4-word multi rate retry tx control descriptor on 5212
/**
* ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @tx_rate1: HW idx for rate used on transmission series 1
* @tx_tries1: Max number of retransmissions for transmission series 1
* @tx_rate2: HW idx for rate used on transmission series 2
* @tx_tries2: Max number of retransmissions for transmission series 2
* @tx_rate3: HW idx for rate used on transmission series 3
* @tx_tries3: Max number of retransmissions for transmission series 3
*
* Multi rate retry (MRR) tx control descriptors are available only on AR5212
* MACs, they are part of the normal 4-word tx control descriptor (see above)
* but we handle them through a separate function for better abstraction.
*
* Returns 0 on success or -EINVAL on invalid input
*/
int
ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
u_int tx_rate1, u_int tx_tries1,
u_int tx_rate2, u_int tx_tries2,
u_int tx_rate3, u_int tx_tries3)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
@ -350,11 +430,16 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
* TX Status descriptors *
\***********************/
/*
* Process the tx status descriptor on 5210/5211
/**
* ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @ts: The &struct ath5k_tx_status
*/
static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc, struct ath5k_tx_status *ts)
static int
ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_tx_status *ts)
{
struct ath5k_hw_2w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
@ -399,11 +484,16 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
return 0;
}
/*
* Process a tx status descriptor on 5212
/**
* ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @ts: The &struct ath5k_tx_status
*/
static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc, struct ath5k_tx_status *ts)
static int
ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_tx_status *ts)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
@ -460,11 +550,17 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
* RX Descriptors *
\****************/
/*
* Initialize an rx control descriptor
/**
* ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @size: RX buffer length in bytes
* @flags: One of AR5K_RXDESC_* flags
*/
int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
u32 size, unsigned int flags)
int
ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
u32 size, unsigned int flags)
{
struct ath5k_hw_rx_ctl *rx_ctl;
@ -491,11 +587,22 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
/*
* Process the rx status descriptor on 5210/5211
/**
* ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @rs: The &struct ath5k_rx_status
*
* Internal function used to process an RX status descriptor
* on AR5210/5211 MAC.
*
* Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
* frame yet.
*/
static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc, struct ath5k_rx_status *rs)
static int
ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
@ -574,12 +681,22 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
return 0;
}
/*
* Process the rx status descriptor on 5212
/**
* ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @rs: The &struct ath5k_rx_status
*
* Internal function used to process an RX status descriptor
* on AR5212 and later MAC.
*
* Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
* frame yet.
*/
static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_rx_status *rs)
static int
ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
u32 rxstat0, rxstat1;
@ -646,10 +763,16 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
* Attach *
\********/
/*
* Init function pointers inside ath5k_hw struct
/**
* ath5k_hw_init_desc_functions() - Init function pointers inside ah
* @ah: The &struct ath5k_hw
*
* Maps the internal descriptor functions to the function pointers on ah, used
* from above. This is used as an abstraction layer to handle the various chips
* the same way.
*/
int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
int
ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;

View file

@ -20,25 +20,30 @@
* RX/TX descriptor structures
*/
/*
* Common hardware RX control descriptor
/**
* struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
* @rx_control_0: RX control word 0
* @rx_control_1: RX control word 1
*/
struct ath5k_hw_rx_ctl {
u32 rx_control_0; /* RX control word 0 */
u32 rx_control_1; /* RX control word 1 */
u32 rx_control_0;
u32 rx_control_1;
} __packed __aligned(4);
/* RX control word 1 fields/flags */
#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
/*
* Common hardware RX status descriptor
/**
* struct ath5k_hw_rx_status - Common hardware RX status descriptor
* @rx_status_0: RX status word 0
* @rx_status_1: RX status word 1
*
* 5210, 5211 and 5212 differ only in the fields and flags defined below
*/
struct ath5k_hw_rx_status {
u32 rx_status_0; /* RX status word 0 */
u32 rx_status_1; /* RX status word 1 */
u32 rx_status_0;
u32 rx_status_1;
} __packed __aligned(4);
/* 5210/5211 */
@ -98,17 +103,36 @@ struct ath5k_hw_rx_status {
/**
* enum ath5k_phy_error_code - PHY Error codes
* @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
* @AR5K_RX_PHY_ERROR_TIMING: Timing error
* @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
* @AR5K_RX_PHY_ERROR_RATE: Illegal rate
* @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
* @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
* @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
* @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
* @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
* @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
* @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
* @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
* @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
* @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
*/
enum ath5k_phy_error_code {
AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */
AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */
AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
/* these are specific to the 5212 */
AR5K_RX_PHY_ERROR_UNDERRUN = 0,
AR5K_RX_PHY_ERROR_TIMING = 1,
AR5K_RX_PHY_ERROR_PARITY = 2,
AR5K_RX_PHY_ERROR_RATE = 3,
AR5K_RX_PHY_ERROR_LENGTH = 4,
AR5K_RX_PHY_ERROR_RADAR = 5,
AR5K_RX_PHY_ERROR_SERVICE = 6,
AR5K_RX_PHY_ERROR_TOR = 7,
AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
@ -123,12 +147,14 @@ enum ath5k_phy_error_code {
AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
};
/*
* 5210/5211 hardware 2-word TX control descriptor
/**
* struct ath5k_hw_2w_tx_ctl - 5210/5211 hardware 2-word TX control descriptor
* @tx_control_0: TX control word 0
* @tx_control_1: TX control word 1
*/
struct ath5k_hw_2w_tx_ctl {
u32 tx_control_0; /* TX control word 0 */
u32 tx_control_1; /* TX control word 1 */
u32 tx_control_0;
u32 tx_control_1;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
@ -177,14 +203,18 @@ struct ath5k_hw_2w_tx_ctl {
#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
/*
* 5212 hardware 4-word TX control descriptor
/**
* struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
* @tx_control_0: TX control word 0
* @tx_control_1: TX control word 1
* @tx_control_2: TX control word 2
* @tx_control_3: TX control word 3
*/
struct ath5k_hw_4w_tx_ctl {
u32 tx_control_0; /* TX control word 0 */
u32 tx_control_1; /* TX control word 1 */
u32 tx_control_2; /* TX control word 2 */
u32 tx_control_3; /* TX control word 3 */
u32 tx_control_0;
u32 tx_control_1;
u32 tx_control_2;
u32 tx_control_3;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
@ -238,12 +268,14 @@ struct ath5k_hw_4w_tx_ctl {
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
/*
* Common TX status descriptor
/**
* struct ath5k_hw_tx_status - Common TX status descriptor
* @tx_status_0: TX status word 0
* @tx_status_1: TX status word 1
*/
struct ath5k_hw_tx_status {
u32 tx_status_0; /* TX status word 0 */
u32 tx_status_1; /* TX status word 1 */
u32 tx_status_0;
u32 tx_status_1;
} __packed __aligned(4);
/* TX status word 0 fields/flags */
@ -276,37 +308,47 @@ struct ath5k_hw_tx_status {
#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
/*
* 5210/5211 hardware TX descriptor
/**
* struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
* @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
* @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5210_tx_desc {
struct ath5k_hw_2w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
/*
* 5212 hardware TX descriptor
/**
* struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
* @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
* @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5212_tx_desc {
struct ath5k_hw_4w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
/*
* Common hardware RX descriptor
/**
* struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
* @rx_ctl: The &struct ath5k_hw_rx_ctl
* @rx_stat: The &struct ath5k_hw_rx_status
*/
struct ath5k_hw_all_rx_desc {
struct ath5k_hw_rx_ctl rx_ctl;
struct ath5k_hw_rx_status rx_stat;
} __packed __aligned(4);
/*
* Atheros hardware DMA descriptor
/**
* struct ath5k_desc - Atheros hardware DMA descriptor
* @ds_link: Physical address of the next descriptor
* @ds_data: Physical address of data buffer (skb)
* @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
*
* This is read and written to by the hardware
*/
struct ath5k_desc {
u32 ds_link; /* physical address of the next descriptor */
u32 ds_data; /* physical address of data buffer (skb) */
u32 ds_link;
u32 ds_data;
union {
struct ath5k_hw_5210_tx_desc ds_tx5210;

View file

@ -20,16 +20,13 @@
* DMA and interrupt masking functions *
\*************************************/
/*
* dma.c - DMA and interrupt masking functions
/**
* DOC: DMA and interrupt masking functions
*
* Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
* handle queue setup for 5210 chipset (rest are handled on qcu.c).
* Also we setup interrupt mask register (IMR) and read the various interrupt
* status registers (ISR).
*
* TODO: Handle SISR on 5211+ and introduce a function to return the queue
* number that resulted the interrupt.
*/
#include "ath5k.h"
@ -42,22 +39,22 @@
\*********/
/**
* ath5k_hw_start_rx_dma - Start DMA receive
*
* ath5k_hw_start_rx_dma() - Start DMA receive
* @ah: The &struct ath5k_hw
*/
void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
void
ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
}
/**
* ath5k_hw_stop_rx_dma - Stop DMA receive
*
* ath5k_hw_stop_rx_dma() - Stop DMA receive
* @ah: The &struct ath5k_hw
*/
static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
static int
ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
{
unsigned int i;
@ -79,24 +76,24 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
}
/**
* ath5k_hw_get_rxdp - Get RX Descriptor's address
*
* ath5k_hw_get_rxdp() - Get RX Descriptor's address
* @ah: The &struct ath5k_hw
*/
u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
u32
ath5k_hw_get_rxdp(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_RXDP);
}
/**
* ath5k_hw_set_rxdp - Set RX Descriptor's address
*
* ath5k_hw_set_rxdp() - Set RX Descriptor's address
* @ah: The &struct ath5k_hw
* @phys_addr: RX descriptor address
*
* Returns -EIO if rx is active
*/
int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
int
ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
{
if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
@ -114,8 +111,7 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
\**********/
/**
* ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
*
* ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
@ -128,7 +124,8 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
* NOTE: Must be called after setting up tx control descriptor for that
* queue (see below).
*/
int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
int
ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
u32 tx_queue;
@ -177,17 +174,16 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
* ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
*
* ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Stop DMA transmit on a specific hw queue and drain queue so we don't
* have any pending frames. Returns -EBUSY if we still have pending frames,
* -EINVAL if queue number is out of range or inactive.
*
*/
static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
static int
ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
unsigned int i = 40;
u32 tx_queue, pending;
@ -320,14 +316,14 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
* ath5k_hw_stop_beacon_queue - Stop beacon queue
*
* @ah The &struct ath5k_hw
* @queue The queue number
* ath5k_hw_stop_beacon_queue() - Stop beacon queue
* @ah: The &struct ath5k_hw
* @queue: The queue number
*
* Returns -EIO if queue didn't stop
*/
int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
int
ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
{
int ret;
ret = ath5k_hw_stop_tx_dma(ah, queue);
@ -340,8 +336,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
}
/**
* ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
*
* ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
@ -352,7 +347,8 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
*
* XXX: Is TXDP read and clear ?
*/
u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
u32
ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
{
u16 tx_reg;
@ -382,10 +378,10 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
}
/**
* ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
*
* ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
* @phys_addr: The physical address
*
* Set TX descriptor's address for a specific queue. For 5210 we ignore
* the queue number and we use tx queue type since we only have 2 queues
@ -394,7 +390,8 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
* Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
* active.
*/
int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
int
ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
{
u16 tx_reg;
@ -435,8 +432,7 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
}
/**
* ath5k_hw_update_tx_triglevel - Update tx trigger level
*
* ath5k_hw_update_tx_triglevel() - Update tx trigger level
* @ah: The &struct ath5k_hw
* @increase: Flag to force increase of trigger level
*
@ -444,15 +440,15 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
* buffer (aka FIFO threshold) that is used to indicate when PCU flushes
* the buffer and transmits its data. Lowering this results sending small
* frames more quickly but can lead to tx underruns, raising it a lot can
* result other problems (i think bmiss is related). Right now we start with
* the lowest possible (64Bytes) and if we get tx underrun we increase it using
* the increase flag. Returns -EIO if we have reached maximum/minimum.
* result other problems. Right now we start with the lowest possible
* (64Bytes) and if we get tx underrun we increase it using the increase
* flag. Returns -EIO if we have reached maximum/minimum.
*
* XXX: Link this with tx DMA size ?
* XXX: Use it to save interrupts ?
* TODO: Needs testing, i think it's related to bmiss...
* XXX2: Use it to save interrupts ?
*/
int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
int
ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
{
u32 trigger_level, imr;
int ret = -EIO;
@ -498,21 +494,20 @@ done:
\*******************/
/**
* ath5k_hw_is_intr_pending - Check if we have pending interrupts
*
* ath5k_hw_is_intr_pending() - Check if we have pending interrupts
* @ah: The &struct ath5k_hw
*
* Check if we have pending interrupts to process. Returns 1 if we
* have pending interrupts and 0 if we haven't.
*/
bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
bool
ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
}
/**
* ath5k_hw_get_isr - Get interrupt status
*
* ath5k_hw_get_isr() - Get interrupt status
* @ah: The @struct ath5k_hw
* @interrupt_mask: Driver's interrupt mask used to filter out
* interrupts in sw.
@ -523,62 +518,162 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
* being mapped on some standard non hw-specific positions
* (check out &ath5k_int).
*
* NOTE: We use read-and-clear register, so after this function is called ISR
* is zeroed.
* NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
* function gets called are cleared on return.
*/
int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
int
ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
{
u32 data;
u32 data = 0;
/*
* Read interrupt status from the Interrupt Status register
* on 5210
* Read interrupt status from Primary Interrupt
* Register.
*
* Note: PISR/SISR Not available on 5210
*/
if (ah->ah_version == AR5K_AR5210) {
data = ath5k_hw_reg_read(ah, AR5K_ISR);
if (unlikely(data == AR5K_INT_NOCARD)) {
*interrupt_mask = data;
u32 isr = 0;
isr = ath5k_hw_reg_read(ah, AR5K_ISR);
if (unlikely(isr == AR5K_INT_NOCARD)) {
*interrupt_mask = isr;
return -ENODEV;
}
} else {
/*
* Read interrupt status from Interrupt
* Status Register shadow copy (Read And Clear)
*
* Note: PISR/SISR Not available on 5210
* Filter out the non-common bits from the interrupt
* status.
*/
data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
if (unlikely(data == AR5K_INT_NOCARD)) {
*interrupt_mask = data;
*interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
/* Hanlde INT_FATAL */
if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
| AR5K_ISR_DPERR)))
*interrupt_mask |= AR5K_INT_FATAL;
/*
* XXX: BMISS interrupts may occur after association.
* I found this on 5210 code but it needs testing. If this is
* true we should disable them before assoc and re-enable them
* after a successful assoc + some jiffies.
interrupt_mask &= ~AR5K_INT_BMISS;
*/
data = isr;
} else {
u32 pisr = 0;
u32 pisr_clear = 0;
u32 sisr0 = 0;
u32 sisr1 = 0;
u32 sisr2 = 0;
u32 sisr3 = 0;
u32 sisr4 = 0;
/* Read PISR and SISRs... */
pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
if (unlikely(pisr == AR5K_INT_NOCARD)) {
*interrupt_mask = pisr;
return -ENODEV;
}
}
/*
* Get abstract interrupt mask (driver-compatible)
*/
*interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
if (ah->ah_version != AR5K_AR5210) {
u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
/*
* PISR holds the logical OR of interrupt bits
* from SISR registers:
*
* TXOK and TXDESC -> Logical OR of TXOK and TXDESC
* per-queue bits on SISR0
*
* TXERR and TXEOL -> Logical OR of TXERR and TXEOL
* per-queue bits on SISR1
*
* TXURN -> Logical OR of TXURN per-queue bits on SISR2
*
* HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
*
* BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
* BCN_TIMEOUT, CAB_TIMEOUT and DTIM
* (and TSFOOR ?) bits on SISR2
*
* QCBRORN and QCBRURN -> Logical OR of QCBRORN and
* QCBRURN per-queue bits on SISR3
* QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
*
* If we clean these bits on PISR we 'll also clear all
* related bits from SISRs, e.g. if we write the TXOK bit on
* PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
* interrupt got fired for another queue while we were reading
* the interrupt registers and we write back the TXOK bit on
* PISR we 'll lose it. So make sure that we don't write back
* on PISR any bits that come from SISRs. Clearing them from
* SISRs will also clear PISR so no need to worry here.
*/
/*HIU = Host Interface Unit (PCI etc)*/
if (unlikely(data & (AR5K_ISR_HIUERR)))
*interrupt_mask |= AR5K_INT_FATAL;
pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
/*Beacon Not Ready*/
if (unlikely(data & (AR5K_ISR_BNR)))
*interrupt_mask |= AR5K_INT_BNR;
/*
* Write to clear them...
* Note: This means that each bit we write back
* to the registers will get cleared, leaving the
* rest unaffected. So this won't affect new interrupts
* we didn't catch while reading/processing, we 'll get
* them next time get_isr gets called.
*/
ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
/* Flush previous write */
ath5k_hw_reg_read(ah, AR5K_PISR);
if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
AR5K_SISR2_DPERR |
AR5K_SISR2_MCABT)))
*interrupt_mask |= AR5K_INT_FATAL;
/*
* Filter out the non-common bits from the interrupt
* status.
*/
*interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
if (data & AR5K_ISR_TIM)
/* We treat TXOK,TXDESC, TXERR and TXEOL
* the same way (schedule the tx tasklet)
* so we track them all together per queue */
if (pisr & AR5K_ISR_TXOK)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
AR5K_SISR0_QCU_TXOK);
if (pisr & AR5K_ISR_TXDESC)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
AR5K_SISR0_QCU_TXDESC);
if (pisr & AR5K_ISR_TXERR)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXERR);
if (pisr & AR5K_ISR_TXEOL)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXEOL);
/* Currently this is not much usefull since we treat
* all queues the same way if we get a TXURN (update
* tx trigger level) but we might need it later on*/
if (pisr & AR5K_ISR_TXURN)
ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
AR5K_SISR2_QCU_TXURN);
/* Misc Beacon related interrupts */
/* For AR5211 */
if (pisr & AR5K_ISR_TIM)
*interrupt_mask |= AR5K_INT_TIM;
if (data & AR5K_ISR_BCNMISC) {
/* For AR5212+ */
if (pisr & AR5K_ISR_BCNMISC) {
if (sisr2 & AR5K_SISR2_TIM)
*interrupt_mask |= AR5K_INT_TIM;
if (sisr2 & AR5K_SISR2_DTIM)
@ -591,63 +686,39 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
}
if (data & AR5K_ISR_RXDOPPLER)
*interrupt_mask |= AR5K_INT_RX_DOPPLER;
if (data & AR5K_ISR_QCBRORN) {
*interrupt_mask |= AR5K_INT_QCBRORN;
ah->ah_txq_isr |= AR5K_REG_MS(
ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
AR5K_SISR3_QCBRORN);
}
if (data & AR5K_ISR_QCBRURN) {
*interrupt_mask |= AR5K_INT_QCBRURN;
ah->ah_txq_isr |= AR5K_REG_MS(
ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
AR5K_SISR3_QCBRURN);
}
if (data & AR5K_ISR_QTRIG) {
*interrupt_mask |= AR5K_INT_QTRIG;
ah->ah_txq_isr |= AR5K_REG_MS(
ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
AR5K_SISR4_QTRIG);
}
/* Below interrupts are unlikely to happen */
if (data & AR5K_ISR_TXOK)
ah->ah_txq_isr |= AR5K_REG_MS(
ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
AR5K_SISR0_QCU_TXOK);
if (data & AR5K_ISR_TXDESC)
ah->ah_txq_isr |= AR5K_REG_MS(
ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
AR5K_SISR0_QCU_TXDESC);
if (data & AR5K_ISR_TXERR)
ah->ah_txq_isr |= AR5K_REG_MS(
ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
AR5K_SISR1_QCU_TXERR);
if (data & AR5K_ISR_TXEOL)
ah->ah_txq_isr |= AR5K_REG_MS(
ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
AR5K_SISR1_QCU_TXEOL);
if (data & AR5K_ISR_TXURN)
ah->ah_txq_isr |= AR5K_REG_MS(
ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
AR5K_SISR2_QCU_TXURN);
} else {
if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
| AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
/* HIU = Host Interface Unit (PCI etc)
* Can be one of MCABT, SSERR, DPERR from SISR2 */
if (unlikely(pisr & (AR5K_ISR_HIUERR)))
*interrupt_mask |= AR5K_INT_FATAL;
/*
* XXX: BMISS interrupts may occur after association.
* I found this on 5210 code but it needs testing. If this is
* true we should disable them before assoc and re-enable them
* after a successful assoc + some jiffies.
interrupt_mask &= ~AR5K_INT_BMISS;
*/
/*Beacon Not Ready*/
if (unlikely(pisr & (AR5K_ISR_BNR)))
*interrupt_mask |= AR5K_INT_BNR;
/* A queue got CBR overrun */
if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
*interrupt_mask |= AR5K_INT_QCBRORN;
ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
AR5K_SISR3_QCBRORN);
}
/* A queue got CBR underrun */
if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
*interrupt_mask |= AR5K_INT_QCBRURN;
ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
AR5K_SISR3_QCBRURN);
}
/* A queue got triggered */
if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
*interrupt_mask |= AR5K_INT_QTRIG;
ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
AR5K_SISR4_QTRIG);
}
data = pisr;
}
/*
@ -661,8 +732,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
}
/**
* ath5k_hw_set_imr - Set interrupt mask
*
* ath5k_hw_set_imr() - Set interrupt mask
* @ah: The &struct ath5k_hw
* @new_mask: The new interrupt mask to be set
*
@ -670,7 +740,8 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
* ath5k_int bits to hw-specific bits to remove abstraction and writing
* Interrupt Mask Register.
*/
enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
enum ath5k_int
ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
{
enum ath5k_int old_mask, int_mask;
@ -697,16 +768,14 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
& AR5K_SIMR2_QCU_TXURN;
/* Fatal interrupt abstraction for 5211+ */
if (new_mask & AR5K_INT_FATAL) {
int_mask |= AR5K_IMR_HIUERR;
simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
| AR5K_SIMR2_DPERR);
}
/*Beacon Not Ready*/
if (new_mask & AR5K_INT_BNR)
int_mask |= AR5K_INT_BNR;
/* Misc beacon related interrupts */
if (new_mask & AR5K_INT_TIM)
int_mask |= AR5K_IMR_TIM;
@ -721,8 +790,9 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
if (new_mask & AR5K_INT_CAB_TIMEOUT)
simr2 |= AR5K_SISR2_CAB_TIMEOUT;
if (new_mask & AR5K_INT_RX_DOPPLER)
int_mask |= AR5K_IMR_RXDOPPLER;
/*Beacon Not Ready*/
if (new_mask & AR5K_INT_BNR)
int_mask |= AR5K_INT_BNR;
/* Note: Per queue interrupt masks
* are set via ath5k_hw_reset_tx_queue() (qcu.c) */
@ -730,10 +800,12 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
} else {
/* Fatal interrupt abstraction for 5210 */
if (new_mask & AR5K_INT_FATAL)
int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
| AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
/* Only common interrupts left for 5210 (no SIMRs) */
ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
}
@ -760,8 +832,7 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
\********************/
/**
* ath5k_hw_dma_init - Initialize DMA unit
*
* ath5k_hw_dma_init() - Initialize DMA unit
* @ah: The &struct ath5k_hw
*
* Set DMA size and pre-enable interrupts
@ -770,7 +841,8 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
*
* XXX: Save/restore RXDP/TXDP registers ?
*/
void ath5k_hw_dma_init(struct ath5k_hw *ah)
void
ath5k_hw_dma_init(struct ath5k_hw *ah)
{
/*
* Set Rx/Tx DMA Configuration
@ -799,8 +871,7 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
}
/**
* ath5k_hw_dma_stop - stop DMA unit
*
* ath5k_hw_dma_stop() - stop DMA unit
* @ah: The &struct ath5k_hw
*
* Stop tx/rx DMA and interrupts. Returns
@ -810,7 +881,8 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
* stuck frames on tx queues, only a reset
* can fix that.
*/
int ath5k_hw_dma_stop(struct ath5k_hw *ah)
int
ath5k_hw_dma_stop(struct ath5k_hw *ah)
{
int i, qmax, err;
err = 0;

View file

@ -24,10 +24,33 @@
#include "reg.h"
#include "debug.h"
/*
* Set led state
/**
* DOC: GPIO/LED functions
*
* Here we control the 6 bidirectional GPIO pins provided by the hw.
* We can set a GPIO pin to be an input or an output pin on GPIO control
* register and then read or set its status from GPIO data input/output
* registers.
*
* We also control the two LED pins provided by the hw, LED_0 is our
* "power" LED and LED_1 is our "network activity" LED but many scenarios
* are available from hw. Vendors might also provide LEDs connected to the
* GPIO pins, we handle them through the LED subsystem on led.c
*/
void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
/**
* ath5k_hw_set_ledstate() - Set led state
* @ah: The &struct ath5k_hw
* @state: One of AR5K_LED_*
*
* Used to set the LED blinking state. This only
* works for the LED connected to the LED_0, LED_1 pins,
* not the GPIO based.
*/
void
ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
{
u32 led;
/*5210 has different led mode handling*/
@ -74,10 +97,13 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
}
/*
* Set GPIO inputs
/**
* ath5k_hw_set_gpio_input() - Set GPIO inputs
* @ah: The &struct ath5k_hw
* @gpio: GPIO pin to set as input
*/
int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
int
ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@ -89,10 +115,13 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
return 0;
}
/*
* Set GPIO outputs
/**
* ath5k_hw_set_gpio_output() - Set GPIO outputs
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to set as output
*/
int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
int
ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@ -104,10 +133,13 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
return 0;
}
/*
* Get GPIO state
/**
* ath5k_hw_get_gpio() - Get GPIO state
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to read
*/
u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
u32
ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return 0xffffffff;
@ -117,10 +149,14 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
0x1;
}
/*
* Set GPIO state
/**
* ath5k_hw_set_gpio() - Set GPIO state
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to set
* @val: Value to set (boolean)
*/
int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
int
ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
{
u32 data;
@ -138,10 +174,19 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
return 0;
}
/*
* Initialize the GPIO interrupt (RFKill switch)
/**
* ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to use
* @interrupt_level: True to generate interrupt on active pin (high)
*
* This function is used to set up the GPIO interrupt for the hw RFKill switch.
* That switch is connected to a GPIO pin and it's number is stored on EEPROM.
* It can either open or close the circuit to indicate that we should disable
* RF/Wireless to save power (we also get that from EEPROM).
*/
void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
void
ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
u32 interrupt_level)
{
u32 data;

View file

@ -23,24 +23,27 @@
#include "reg.h"
#include "debug.h"
/*
* Mode-independent initial register writes
/**
* struct ath5k_ini - Mode-independent initial register writes
* @ini_register: Register address
* @ini_value: Default value
* @ini_mode: 0 to write 1 to read (and clear)
*/
struct ath5k_ini {
u16 ini_register;
u32 ini_value;
enum {
AR5K_INI_WRITE = 0, /* Default */
AR5K_INI_READ = 1, /* Cleared on read */
AR5K_INI_READ = 1,
} ini_mode;
};
/*
* Mode specific initial register values
/**
* struct ath5k_ini_mode - Mode specific initial register values
* @mode_register: Register address
* @mode_value: Set of values for each enum ath5k_driver_mode
*/
struct ath5k_ini_mode {
u16 mode_register;
u32 mode_value[3];
@ -386,11 +389,10 @@ static const struct ath5k_ini ar5211_ini[] = {
/* Initial mode-specific settings for AR5211
* 5211 supports OFDM-only g (draft g) but we
* need to test it !
*/
* need to test it ! */
static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ AR5K_TXCFG,
/* A/XR B G */
/* A B G */
{ 0x00000015, 0x0000001d, 0x00000015 } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(0),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
@ -460,7 +462,7 @@ static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ 0x00000010, 0x00000010, 0x00000010 } },
};
/* Initial register settings for AR5212 */
/* Initial register settings for AR5212 and newer chips */
static const struct ath5k_ini ar5212_ini_common_start[] = {
{ AR5K_RXDP, 0x00000000 },
{ AR5K_RXCFG, 0x00000005 },
@ -724,7 +726,8 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
{ 0x00000000, 0x00000000, 0x00000108 } },
};
/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
/* Initial mode-specific settings for AR5212 + RF5111
* (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@ -757,6 +760,7 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ 0x1883800a, 0x1873800a, 0x1883800a } },
};
/* Common for all modes */
static const struct ath5k_ini rf5111_ini_common_end[] = {
{ AR5K_DCU_FP, 0x00000000 },
{ AR5K_PHY_AGC, 0x00000000 },
@ -774,7 +778,9 @@ static const struct ath5k_ini rf5111_ini_common_end[] = {
{ 0xa23c, 0x13c889af },
};
/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
/* Initial mode-specific settings for AR5212 + RF5112
* (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@ -825,7 +831,9 @@ static const struct ath5k_ini rf5112_ini_common_end[] = {
{ 0xa23c, 0x13c889af },
};
/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
/* Initial mode-specific settings for RF5413/5414
* (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@ -963,7 +971,8 @@ static const struct ath5k_ini rf5413_ini_common_end[] = {
{ 0xa384, 0xf3307ff0 },
};
/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */
/* Initial mode-specific settings for RF2413/2414
* (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
{ AR5K_TXCFG,
@ -1085,7 +1094,8 @@ static const struct ath5k_ini rf2413_ini_common_end[] = {
{ 0xa384, 0xf3307ff0 },
};
/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */
/* Initial mode-specific settings for RF2425
* (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
{ AR5K_TXCFG,
@ -1357,10 +1367,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = {
};
/*
* Write initial register dump
/**
* ath5k_hw_ini_registers() - Write initial register dump common for all modes
* @ah: The &struct ath5k_hw
* @size: Dump size
* @ini_regs: The array of &struct ath5k_ini
* @skip_pcu: Skip PCU registers
*/
static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
static void
ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
const struct ath5k_ini *ini_regs, bool skip_pcu)
{
unsigned int i;
@ -1388,7 +1403,15 @@ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
}
}
static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
/**
* ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
* @ah: The &struct ath5k_hw
* @size: Dump size
* @ini_mode: The array of &struct ath5k_ini_mode
* @mode: One of enum ath5k_driver_mode
*/
static void
ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
unsigned int size, const struct ath5k_ini_mode *ini_mode,
u8 mode)
{
@ -1402,7 +1425,17 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
}
int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
/**
* ath5k_hw_write_initvals() - Write initial chip-specific register dump
* @ah: The &struct ath5k_hw
* @mode: One of enum ath5k_driver_mode
* @skip_pcu: Skip PCU registers
*
* Write initial chip-specific register dump, to get the chipset on a
* clean and ready-to-work state after warm reset.
*/
int
ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
{
/*
* Write initial register settings

View file

@ -98,7 +98,7 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
0xffff);
return true;
}
udelay(15);
usleep_range(15, 20);
}
return false;

View file

@ -30,11 +30,47 @@
#include "reg.h"
#include "debug.h"
/*
/**
* DOC: Protocol Control Unit (PCU) functions
*
* Protocol control unit is responsible to maintain various protocol
* properties before a frame is send and after a frame is received to/from
* baseband. To be more specific, PCU handles:
*
* - Buffering of RX and TX frames (after QCU/DCUs)
*
* - Encrypting and decrypting (using the built-in engine)
*
* - Generating ACKs, RTS/CTS frames
*
* - Maintaining TSF
*
* - FCS
*
* - Updating beacon data (with TSF etc)
*
* - Generating virtual CCA
*
* - RX/Multicast filtering
*
* - BSSID filtering
*
* - Various statistics
*
* -Different operating modes: AP, STA, IBSS
*
* Note: Most of these functions can be tweaked/bypassed so you can do
* them on sw above for debugging or research. For more infos check out PCU
* registers on reg.h.
*/
/**
* DOC: ACK rates
*
* AR5212+ can use higher rates for ack transmission
* based on current tx rate instead of the base rate.
* It does this to better utilize channel usage.
* This is a mapping between G rates (that cover both
* There is a mapping between G rates (that cover both
* CCK and OFDM) and ack rates that we use when setting
* rate -> duration table. This mapping is hw-based so
* don't change anything.
@ -63,17 +99,18 @@ static const unsigned int ack_rates_high[] =
\*******************/
/**
* ath5k_hw_get_frame_duration - Get tx time of a frame
*
* ath5k_hw_get_frame_duration() - Get tx time of a frame
* @ah: The &struct ath5k_hw
* @len: Frame's length in bytes
* @rate: The @struct ieee80211_rate
* @shortpre: Indicate short preample
*
* Calculate tx duration of a frame given it's rate and length
* It extends ieee80211_generic_frame_duration for non standard
* bwmodes.
*/
int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
int
ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
int len, struct ieee80211_rate *rate, bool shortpre)
{
int sifs, preamble, plcp_bits, sym_time;
@ -129,11 +166,11 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
}
/**
* ath5k_hw_get_default_slottime - Get the default slot time for current mode
*
* ath5k_hw_get_default_slottime() - Get the default slot time for current mode
* @ah: The &struct ath5k_hw
*/
unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
unsigned int
ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int slot_time;
@ -160,11 +197,11 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
}
/**
* ath5k_hw_get_default_sifs - Get the default SIFS for current mode
*
* ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
* @ah: The &struct ath5k_hw
*/
unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
unsigned int
ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int sifs;
@ -191,17 +228,17 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
}
/**
* ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics)
*
* ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
* @ah: The &struct ath5k_hw
*
* Reads MIB counters from PCU and updates sw statistics. Is called after a
* MIB interrupt, because one of these counters might have reached their maximum
* and triggered the MIB interrupt, to let us read and clear the counter.
*
* Is called in interrupt context!
* NOTE: Is called in interrupt context!
*/
void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
void
ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
struct ath5k_statistics *stats = &ah->stats;
@ -219,10 +256,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
\******************/
/**
* ath5k_hw_write_rate_duration - fill rate code to duration table
*
* @ah: the &struct ath5k_hw
* @mode: one of enum ath5k_driver_mode
* ath5k_hw_write_rate_duration() - Fill rate code to duration table
* @ah: The &struct ath5k_hw
*
* Write the rate code to duration table upon hw reset. This is a helper for
* ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
@ -236,7 +271,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
* that include all OFDM and CCK rates.
*
*/
static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
static inline void
ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
{
struct ieee80211_rate *rate;
unsigned int i;
@ -280,12 +316,12 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
}
/**
* ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
*
* ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
static int
ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
<= timeout)
@ -298,12 +334,12 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
}
/**
* ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
*
* ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
static int
ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
<= timeout)
@ -321,14 +357,14 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
\*******************/
/**
* ath5k_hw_set_lladdr - Set station id
*
* ath5k_hw_set_lladdr() - Set station id
* @ah: The &struct ath5k_hw
* @mac: The card's mac address
* @mac: The card's mac address (array of octets)
*
* Set station id on hw using the provided mac address
*/
int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
int
ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 low_id, high_id;
@ -349,14 +385,14 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
}
/**
* ath5k_hw_set_bssid - Set current BSSID on hw
*
* ath5k_hw_set_bssid() - Set current BSSID on hw
* @ah: The &struct ath5k_hw
*
* Sets the current BSSID and BSSID mask we have from the
* common struct into the hardware
*/
void ath5k_hw_set_bssid(struct ath5k_hw *ah)
void
ath5k_hw_set_bssid(struct ath5k_hw *ah)
{
struct ath_common *common = ath5k_hw_common(ah);
u16 tim_offset = 0;
@ -389,7 +425,23 @@ void ath5k_hw_set_bssid(struct ath5k_hw *ah)
ath5k_hw_enable_pspoll(ah, NULL, 0);
}
void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
/**
* ath5k_hw_set_bssid_mask() - Filter out bssids we listen
* @ah: The &struct ath5k_hw
* @mask: The BSSID mask to set (array of octets)
*
* BSSID masking is a method used by AR5212 and newer hardware to inform PCU
* which bits of the interface's MAC address should be looked at when trying
* to decide which packets to ACK. In station mode and AP mode with a single
* BSS every bit matters since we lock to only one BSS. In AP mode with
* multiple BSSes (virtual interfaces) not every bit matters because hw must
* accept frames for all BSSes and so we tweak some bits of our mac address
* in order to have multiple BSSes.
*
* For more information check out ../hw.c of the common ath module.
*/
void
ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
{
struct ath_common *common = ath5k_hw_common(ah);
@ -400,18 +452,21 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
ath_hw_setbssidmask(common);
}
/*
* Set multicast filter
/**
* ath5k_hw_set_mcast_filter() - Set multicast filter
* @ah: The &struct ath5k_hw
* @filter0: Lower 32bits of muticast filter
* @filter1: Higher 16bits of multicast filter
*/
void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
void
ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
{
ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
/**
* ath5k_hw_get_rx_filter - Get current rx filter
*
* ath5k_hw_get_rx_filter() - Get current rx filter
* @ah: The &struct ath5k_hw
*
* Returns the RX filter by reading rx filter and
@ -420,7 +475,8 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
* and pass to the driver. For a list of frame types
* check out reg.h.
*/
u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
u32
ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
{
u32 data, filter = 0;
@ -440,8 +496,7 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
}
/**
* ath5k_hw_set_rx_filter - Set rx filter
*
* ath5k_hw_set_rx_filter() - Set rx filter
* @ah: The &struct ath5k_hw
* @filter: RX filter mask (see reg.h)
*
@ -449,7 +504,8 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
* register on 5212 and newer chips so that we have proper PHY
* error reporting.
*/
void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
void
ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
{
u32 data = 0;
@ -493,13 +549,13 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
#define ATH5K_MAX_TSF_READ 10
/**
* ath5k_hw_get_tsf64 - Get the full 64bit TSF
*
* ath5k_hw_get_tsf64() - Get the full 64bit TSF
* @ah: The &struct ath5k_hw
*
* Returns the current TSF
*/
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
u64
ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
u32 tsf_lower, tsf_upper1, tsf_upper2;
int i;
@ -536,28 +592,30 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
return ((u64)tsf_upper1 << 32) | tsf_lower;
}
#undef ATH5K_MAX_TSF_READ
/**
* ath5k_hw_set_tsf64 - Set a new 64bit TSF
*
* ath5k_hw_set_tsf64() - Set a new 64bit TSF
* @ah: The &struct ath5k_hw
* @tsf64: The new 64bit TSF
*
* Sets the new TSF
*/
void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
void
ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
{
ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
}
/**
* ath5k_hw_reset_tsf - Force a TSF reset
*
* ath5k_hw_reset_tsf() - Force a TSF reset
* @ah: The &struct ath5k_hw
*
* Forces a TSF reset on PCU
*/
void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
void
ath5k_hw_reset_tsf(struct ath5k_hw *ah)
{
u32 val;
@ -573,10 +631,17 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
}
/*
* Initialize beacon timers
/**
* ath5k_hw_init_beacon_timers() - Initialize beacon timers
* @ah: The &struct ath5k_hw
* @next_beacon: Next TBTT
* @interval: Current beacon interval
*
* This function is used to initialize beacon timers based on current
* operation mode and settings.
*/
void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
void
ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
{
u32 timer1, timer2, timer3;
@ -655,8 +720,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
}
/**
* ath5k_check_timer_win - Check if timer B is timer A + window
*
* ath5k_check_timer_win() - Check if timer B is timer A + window
* @a: timer a (before b)
* @b: timer b (after a)
* @window: difference between a and b
@ -686,12 +750,11 @@ ath5k_check_timer_win(int a, int b, int window, int intval)
}
/**
* ath5k_hw_check_beacon_timers - Check if the beacon timers are correct
*
* ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
* @ah: The &struct ath5k_hw
* @intval: beacon interval
*
* This is a workaround for IBSS mode:
* This is a workaround for IBSS mode
*
* The need for this function arises from the fact that we have 4 separate
* HW timer registers (TIMER0 - TIMER3), which are closely related to the
@ -746,14 +809,14 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
}
/**
* ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
*
* ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
* @ah: The &struct ath5k_hw
* @coverage_class: IEEE 802.11 coverage class number
*
* Sets IFS intervals and ACK/CTS timeouts for given coverage class.
*/
void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
void
ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
{
/* As defined by IEEE 802.11-2007 17.3.8.6 */
int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
@ -772,8 +835,7 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
\***************************/
/**
* ath5k_hw_start_rx_pcu - Start RX engine
*
* ath5k_hw_start_rx_pcu() - Start RX engine
* @ah: The &struct ath5k_hw
*
* Starts RX engine on PCU so that hw can process RXed frames
@ -781,32 +843,33 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
*/
void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
void
ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
* at5k_hw_stop_rx_pcu - Stop RX engine
*
* at5k_hw_stop_rx_pcu() - Stop RX engine
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
*/
void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
void
ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
* ath5k_hw_set_opmode - Set PCU operating mode
*
* ath5k_hw_set_opmode() - Set PCU operating mode
* @ah: The &struct ath5k_hw
* @op_mode: &enum nl80211_iftype operating mode
* @op_mode: One of enum nl80211_iftype
*
* Configure PCU for the various operating modes (AP/STA etc)
*/
int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
int
ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
@ -873,8 +936,17 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
return 0;
}
void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
u8 mode)
/**
* ath5k_hw_pcu_init() - Initialize PCU
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
* @mode: One of enum ath5k_driver_mode
*
* This function is used to initialize PCU by setting current
* operation mode and various other settings.
*/
void
ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
/* Set bssid and bssid mask */
ath5k_hw_set_bssid(ah);

File diff suppressed because it is too large Load diff

View file

@ -17,23 +17,48 @@
*/
/********************************************\
Queue Control Unit, DFS Control Unit Functions
Queue Control Unit, DCF Control Unit Functions
\********************************************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
#include <linux/log2.h>
/**
* DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
*
* Here we setup parameters for the 12 available TX queues. Note that
* on the various registers we can usually only map the first 10 of them so
* basically we have 10 queues to play with. Each queue has a matching
* QCU that controls when the queue will get triggered and multiple QCUs
* can be mapped to a single DCU that controls the various DFS parameters
* for the various queues. In our setup we have a 1:1 mapping between QCUs
* and DCUs allowing us to have different DFS settings for each queue.
*
* When a frame goes into a TX queue, QCU decides when it'll trigger a
* transmission based on various criteria (such as how many data we have inside
* it's buffer or -if it's a beacon queue- if it's time to fire up the queue
* based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
* (arbitrator) decides the priority of each QCU based on it's configuration
* (e.g. beacons are always transmitted when they leave DCU bypassing all other
* frames from other queues waiting to be transmitted). After a frame leaves
* the DCU it goes to PCU for further processing and then to PHY for
* the actual transmission.
*/
/******************\
* Helper functions *
\******************/
/*
* Get number of pending frames
* for a specific queue [5211+]
/**
* ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*/
u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
u32
ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
{
u32 pending;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@ -58,10 +83,13 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
return pending;
}
/*
* Set a transmit queue inactive
/**
* ath5k_hw_release_tx_queue() - Set a transmit queue inactive
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*/
void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
void
ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
return;
@ -72,34 +100,56 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
}
/*
/**
* ath5k_cw_validate() - Make sure the given cw is valid
* @cw_req: The contention window value to check
*
* Make sure cw is a power of 2 minus 1 and smaller than 1024
*/
static u16 ath5k_cw_validate(u16 cw_req)
static u16
ath5k_cw_validate(u16 cw_req)
{
u32 cw = 1;
cw_req = min(cw_req, (u16)1023);
while (cw < cw_req)
cw = (cw << 1) | 1;
/* Check if cw_req + 1 a power of 2 */
if (is_power_of_2(cw_req + 1))
return cw_req;
return cw;
/* Check if cw_req is a power of 2 */
if (is_power_of_2(cw_req))
return cw_req - 1;
/* If none of the above is correct
* find the closest power of 2 */
cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
return cw_req;
}
/*
* Get properties for a transmit queue
/**
* ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
* @queue_info: The &struct ath5k_txq_info to fill
*/
int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
int
ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
struct ath5k_txq_info *queue_info)
{
memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
return 0;
}
/*
* Set properties for a transmit queue
/**
* ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
* @qinfo: The &struct ath5k_txq_info to use
*
* Returns 0 on success or -EIO if queue is inactive
*/
int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
int
ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
const struct ath5k_txq_info *qinfo)
{
struct ath5k_txq_info *qi;
@ -139,10 +189,16 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
return 0;
}
/*
* Initialize a transmit queue
/**
* ath5k_hw_setup_tx_queue() - Initialize a transmit queue
* @ah: The &struct ath5k_hw
* @queue_type: One of enum ath5k_tx_queue
* @queue_info: The &struct ath5k_txq_info to use
*
* Returns 0 on success, -EINVAL on invalid arguments
*/
int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
int
ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info)
{
unsigned int queue;
@ -217,10 +273,16 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
* Single QCU/DCU initialization *
\*******************************/
/*
* Set tx retry limits on DCU
/**
* ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*
* This function is used when initializing a queue, to set
* retry limits based on ah->ah_retry_* and the chipset used.
*/
void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
void
ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
unsigned int queue)
{
/* Single data queue on AR5210 */
@ -255,15 +317,15 @@ void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
}
/**
* ath5k_hw_reset_tx_queue - Initialize a single hw queue
* ath5k_hw_reset_tx_queue() - Initialize a single hw queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*
* @ah The &struct ath5k_hw
* @queue The hw queue number
*
* Set DFS properties for the given transmit queue on DCU
* Set DCF properties for the given transmit queue on DCU
* and configures all queue-specific parameters.
*/
int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
int
ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@ -491,10 +553,9 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
\**************************/
/**
* ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU
*
* @ah The &struct ath5k_hw
* @slot_time Slot time in us
* ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
* @ah: The &struct ath5k_hw
* @slot_time: Slot time in us
*
* Sets the global IFS intervals on DCU (also works on AR5210) for
* the given slot time and the current bwmode.
@ -597,7 +658,15 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
}
int ath5k_hw_init_queues(struct ath5k_hw *ah)
/**
* ath5k_hw_init_queues() - Initialize tx queues
* @ah: The &struct ath5k_hw
*
* Initializes all tx queues based on information on
* ah->ah_txq* set by the driver
*/
int
ath5k_hw_init_queues(struct ath5k_hw *ah)
{
int i, ret;

View file

@ -280,6 +280,10 @@
* 5211/5212 we have one primary and 4 secondary registers.
* So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212.
* Most of these bits are common for all chipsets.
*
* NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain
* the logical OR from per-queue interrupt bits found on SISR registers
* (see below).
*/
#define AR5K_ISR 0x001c /* Register Address [5210] */
#define AR5K_PISR 0x0080 /* Register Address [5211+] */
@ -292,7 +296,10 @@
#define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */
#define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */
#define AR5K_ISR_TXERR 0x00000100 /* Transmit error */
#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) */
#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout)
* NOTE: We don't have per-queue info for this
* one, but we can enable it per-queue through
* TXNOFRM_QCU field on TXNOFRM register */
#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
@ -302,21 +309,29 @@
#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+]
* 'or' of MCABT, SSERR, DPERR from SISR2 */
#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */
#define AR5K_ISR_DPERR 0x00400000 /* Bus parity error [5210] */
#define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */
#define AR5K_ISR_TIM 0x00800000 /* [5211+] */
#define AR5K_ISR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
#define AR5K_ISR_BCNMISC 0x00800000 /* Misc beacon related interrupt
* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
* CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */
#define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */
#define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */
#define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */
#define AR5K_ISR_BITS_FROM_SISRS (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\
AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\
AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\
AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\
AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG)
/*
* Secondary status registers [5211+] (0 - 4)
*
@ -347,7 +362,7 @@
#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF OOR (?) */
#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF Out of range */
#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */

View file

@ -19,9 +19,9 @@
*
*/
/*****************************\
Reset functions and helpers
\*****************************/
/****************************\
Reset function and helpers
\****************************/
#include <asm/unaligned.h>
@ -33,14 +33,36 @@
#include "debug.h"
/**
* DOC: Reset function and helpers
*
* Here we implement the main reset routine, used to bring the card
* to a working state and ready to receive. We also handle routines
* that don't fit on other places such as clock, sleep and power control
*/
/******************\
* Helper functions *
\******************/
/*
* Check if a register write has been completed
/**
* ath5k_hw_register_timeout() - Poll a register for a flag/field change
* @ah: The &struct ath5k_hw
* @reg: The register to read
* @flag: The flag/field to check on the register
* @val: The field value we expect (if we check a field)
* @is_set: Instead of checking if the flag got cleared, check if it got set
*
* Some registers contain flags that indicate that an operation is
* running. We use this function to poll these registers and check
* if these flags get cleared. We also use it to poll a register
* field (containing multiple flags) until it gets a specific value.
*
* Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
*/
int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
int
ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
bool is_set)
{
int i;
@ -64,35 +86,48 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
\*************************/
/**
* ath5k_hw_htoclock - Translate usec to hw clock units
*
* ath5k_hw_htoclock() - Translate usec to hw clock units
* @ah: The &struct ath5k_hw
* @usec: value in microseconds
*
* Translate usecs to hw clock units based on the current
* hw clock rate.
*
* Returns number of clock units
*/
unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
unsigned int
ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
{
struct ath_common *common = ath5k_hw_common(ah);
return usec * common->clockrate;
}
/**
* ath5k_hw_clocktoh - Translate hw clock units to usec
* ath5k_hw_clocktoh() - Translate hw clock units to usec
* @ah: The &struct ath5k_hw
* @clock: value in hw clock units
*
* Translate hw clock units to usecs based on the current
* hw clock rate.
*
* Returns number of usecs
*/
unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
unsigned int
ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
{
struct ath_common *common = ath5k_hw_common(ah);
return clock / common->clockrate;
}
/**
* ath5k_hw_init_core_clock - Initialize core clock
* ath5k_hw_init_core_clock() - Initialize core clock
* @ah: The &struct ath5k_hw
*
* @ah The &struct ath5k_hw
*
* Initialize core clock parameters (usec, usec32, latencies etc).
* Initialize core clock parameters (usec, usec32, latencies etc),
* based on current bwmode and chipset properties.
*/
static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
static void
ath5k_hw_init_core_clock(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
struct ath_common *common = ath5k_hw_common(ah);
@ -227,16 +262,21 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
}
}
/*
/**
* ath5k_hw_set_sleep_clock() - Setup sleep clock operation
* @ah: The &struct ath5k_hw
* @enable: Enable sleep clock operation (false to disable)
*
* If there is an external 32KHz crystal available, use it
* as ref. clock instead of 32/40MHz clock and baseband clocks
* to save power during sleep or restore normal 32/40MHz
* operation.
*
* XXX: When operating on 32KHz certain PHY registers (27 - 31,
* 123 - 127) require delay on access.
* NOTE: When operating on 32KHz certain PHY registers (27 - 31,
* 123 - 127) require delay on access.
*/
static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
static void
ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 scal, spending, sclock;
@ -340,10 +380,19 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
* Reset/Sleep control *
\*********************/
/*
* Reset chipset
/**
* ath5k_hw_nic_reset() - Reset the various chipset units
* @ah: The &struct ath5k_hw
* @val: Mask to indicate what units to reset
*
* To reset the various chipset units we need to write
* the mask to AR5K_RESET_CTL and poll the register until
* all flags are cleared.
*
* Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
static int
ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
{
int ret;
u32 mask = val ? val : ~0U;
@ -357,7 +406,7 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
/* Wait at least 128 PCI clocks */
udelay(15);
usleep_range(15, 20);
if (ah->ah_version == AR5K_AR5210) {
val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
@ -382,12 +431,17 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
return ret;
}
/*
* Reset AHB chipset
* AR5K_RESET_CTL_PCU flag resets WMAC
* AR5K_RESET_CTL_BASEBAND flag resets WBB
/**
* ath5k_hw_wisoc_reset() - Reset AHB chipset
* @ah: The &struct ath5k_hw
* @flags: Mask to indicate what units to reset
*
* Same as ath5k_hw_nic_reset but for AHB based devices
*
* Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
static int
ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
{
u32 mask = flags ? flags : ~0U;
u32 __iomem *reg;
@ -422,7 +476,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
regval = __raw_readl(reg);
__raw_writel(regval | val, reg);
regval = __raw_readl(reg);
udelay(100);
usleep_range(100, 150);
/* Bring BB/MAC out of reset */
__raw_writel(regval & ~val, reg);
@ -439,11 +493,23 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
return 0;
}
/*
* Sleep control
/**
* ath5k_hw_set_power_mode() - Set power mode
* @ah: The &struct ath5k_hw
* @mode: One of enum ath5k_power_mode
* @set_chip: Set to true to write sleep control register
* @sleep_duration: How much time the device is allowed to sleep
* when sleep logic is enabled (in 128 microsecond increments).
*
* This function is used to configure sleep policy and allowed
* sleep modes. For more information check out the sleep control
* register on reg.h and STA_ID1.
*
* Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
* mode is requested.
*/
static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
static int
ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
bool set_chip, u16 sleep_duration)
{
unsigned int i;
@ -493,7 +559,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
udelay(15);
usleep_range(15, 20);
for (i = 200; i > 0; i--) {
/* Check if the chip did wake up */
@ -502,7 +568,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
break;
/* Wait a bit and retry */
udelay(50);
usleep_range(50, 75);
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
}
@ -523,17 +589,20 @@ commit:
return 0;
}
/*
* Put device on hold
/**
* ath5k_hw_on_hold() - Put device on hold
* @ah: The &struct ath5k_hw
*
* Put MAC and Baseband on warm reset and
* keep that state (don't clean sleep control
* register). After this MAC and Baseband are
* disabled and a full reset is needed to come
* back. This way we save as much power as possible
* Put MAC and Baseband on warm reset and keep that state
* (don't clean sleep control register). After this MAC
* and Baseband are disabled and a full reset is needed
* to come back. This way we save as much power as possible
* without putting the card on full sleep.
*
* Returns 0 on success or -EIO on error
*/
int ath5k_hw_on_hold(struct ath5k_hw *ah)
int
ath5k_hw_on_hold(struct ath5k_hw *ah)
{
struct pci_dev *pdev = ah->pdev;
u32 bus_flags;
@ -543,7 +612,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
return 0;
/* Make sure device is awake */
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
@ -563,7 +632,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
mdelay(2);
usleep_range(2000, 2500);
} else {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_BASEBAND | bus_flags);
@ -575,7 +644,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
}
/* ...wakeup again!*/
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to put device on hold\n");
return ret;
@ -584,11 +653,18 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
return ret;
}
/*
/**
* ath5k_hw_nic_wakeup() - Force card out of sleep
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* Bring up MAC + PHY Chips and program PLL
* Channel is NULL for the initial wakeup.
* NOTE: Channel is NULL for the initial wakeup.
*
* Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
*/
int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
int
ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
struct pci_dev *pdev = ah->pdev;
u32 turbo, mode, clock, bus_flags;
@ -600,7 +676,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
/* Wakeup the device */
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
@ -621,7 +697,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
mdelay(2);
usleep_range(2000, 2500);
} else {
if (ath5k_get_bus_type(ah) == ATH_AHB)
ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@ -637,7 +713,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
}
/* ...wakeup again!...*/
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
return ret;
@ -739,7 +815,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
/* ...update PLL if needed */
if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
udelay(300);
usleep_range(300, 350);
}
/* ...set the PHY operating mode */
@ -755,8 +831,19 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
* Post-initvals register modifications *
\**************************************/
/* TODO: Half/Quarter rate */
static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
/**
* ath5k_hw_tweak_initval_settings() - Tweak initial settings
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* Some settings are not handled on initvals, e.g. bwmode
* settings, some phy settings, workarounds etc that in general
* don't fit anywhere else or are too small to introduce a separate
* function for each one. So we have this function to handle
* them all during reset and complete card's initialization.
*/
static void
ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
if (ah->ah_version == AR5K_AR5212 &&
@ -875,7 +962,16 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
}
}
static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
/**
* ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* Use settings stored on EEPROM to properly initialize the card
* based on various infos and per-mode calibration data.
*/
static void
ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@ -1029,7 +1125,23 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
* Main reset function *
\*********************/
int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/**
* ath5k_hw_reset() - The main reset function
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
* @channel: The &struct ieee80211_channel
* @fast: Enable fast channel switching
* @skip_pcu: Skip pcu initialization
*
* This is the function we call each time we want to (re)initialize the
* card and pass new settings to hw. We also call it when hw runs into
* trouble to make it come back to a working state.
*
* Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
* on failure.
*/
int
ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
struct ieee80211_channel *channel, bool fast, bool skip_pcu)
{
u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
@ -1242,7 +1354,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/*
* Initialize PCU
*/
ath5k_hw_pcu_init(ah, op_mode, mode);
ath5k_hw_pcu_init(ah, op_mode);
/*
* Initialize PHY

View file

@ -18,7 +18,9 @@
*/
/*
/**
* DOC: RF Buffer registers
*
* There are some special registers on the RF chip
* that control various operation settings related mostly to
* the analog parts (channel, gain adjustment etc).
@ -44,40 +46,63 @@
*/
/*
/**
* struct ath5k_ini_rfbuffer - Initial RF Buffer settings
* @rfb_bank: RF Bank number
* @rfb_ctrl_register: RF Buffer control register
* @rfb_mode_data: RF Buffer data for each mode
*
* Struct to hold default mode specific RF
* register values (RF Banks)
* register values (RF Banks) for each chip.
*/
struct ath5k_ini_rfbuffer {
u8 rfb_bank; /* RF Bank number */
u16 rfb_ctrl_register; /* RF Buffer control register */
u32 rfb_mode_data[3]; /* RF Buffer data for each mode */
u8 rfb_bank;
u16 rfb_ctrl_register;
u32 rfb_mode_data[3];
};
/*
/**
* struct ath5k_rfb_field - An RF Buffer field (register/value)
* @len: Field length
* @pos: Offset on the raw packet
* @col: Used for shifting
*
* Struct to hold RF Buffer field
* infos used to access certain RF
* analog registers
*/
struct ath5k_rfb_field {
u8 len; /* Field length */
u16 pos; /* Offset on the raw packet */
u8 col; /* Column -used for shifting */
u8 len;
u16 pos;
u8 col;
};
/*
* RF analog register definition
/**
* struct ath5k_rf_reg - RF analog register definition
* @bank: RF Buffer Bank number
* @index: Register's index on ath5k_rf_regx_idx
* @field: The &struct ath5k_rfb_field
*
* We use this struct to define the set of RF registers
* on each chip that we want to tweak. Some RF registers
* are common between different chip versions so this saves
* us space and complexity because we can refer to an rf
* register by it's index no matter what chip we work with
* as long as it has that register.
*/
struct ath5k_rf_reg {
u8 bank; /* RF Buffer Bank number */
u8 index; /* Register's index on rf_regs_idx */
struct ath5k_rfb_field field; /* RF Buffer field for this register */
u8 bank;
u8 index;
struct ath5k_rfb_field field;
};
/* Map RF registers to indexes
/**
* enum ath5k_rf_regs_idx - Map RF registers to indexes
*
* We do this to handle common bits and make our
* life easier by using an index for each register
* instead of a full rfb_field */
* instead of a full rfb_field
*/
enum ath5k_rf_regs_idx {
/* BANK 2 */
AR5K_RF_TURBO = 0,

View file

@ -18,13 +18,17 @@
*
*/
/*
/**
* struct ath5k_ini_rfgain - RF Gain table
* @rfg_register: RF Gain register address
* @rfg_value: Register value for 5 and 2GHz
*
* Mode-specific RF Gain table (64bytes) for RF5111/5112
* (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
* RF Gain values are included in AR5K_AR5210_INI)
*/
struct ath5k_ini_rfgain {
u16 rfg_register; /* RF Gain register address */
u16 rfg_register;
u32 rfg_value[2]; /* [freq (see below)] */
};
@ -455,18 +459,31 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = {
#define AR5K_GAIN_CHECK_ADJUST(_g) \
((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
/**
* struct ath5k_gain_opt_step - An RF gain optimization step
* @gos_param: Set of parameters
* @gos_gain: Gain
*/
struct ath5k_gain_opt_step {
s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
s8 gos_gain;
};
/**
* struct ath5k_gain_opt - RF Gain optimization ladder
* @go_default: The default step
* @go_steps_count: How many optimization steps
* @go_step: Array of &struct ath5k_gain_opt_step
*/
struct ath5k_gain_opt {
u8 go_default;
u8 go_steps_count;
const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT];
};
/*
* RF5111
* Parameters on gos_param:
* 1) Tx clip PHY register
* 2) PWD 90 RF register
@ -490,6 +507,7 @@ static const struct ath5k_gain_opt rfgain_opt_5111 = {
};
/*
* RF5112
* Parameters on gos_param:
* 1) Mixgain ovr RF register
* 2) PWD 138 RF register

View file

@ -34,7 +34,8 @@ ath9k_hw-y:= \
ar9002_mac.o \
ar9003_mac.o \
ar9003_eeprom.o \
ar9003_paprd.o
ar9003_paprd.o \
ar9003_mci.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o

View file

@ -18,6 +18,7 @@
#include "hw-ops.h"
#include "ar9003_phy.h"
#include "ar9003_rtt.h"
#include "ar9003_mci.h"
#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
@ -824,7 +825,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
chan_info_tab[i] + offset);
ath_dbg(common, ATH_DBG_CALIBRATE,
"IQ RES[%d]=0x%x"
"IQ_RES[%d]=0x%x "
"IQ_RES[%d]=0x%x\n",
idx, iq_res[idx], idx + 1,
iq_res[idx + 1]);
@ -934,10 +935,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
bool txiqcal_done = false, txclcal_done = false;
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@ -1005,6 +1008,31 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
} else if (caldata && !caldata->done_txiqcal_once)
run_agc_cal = true;
if (mci && IS_CHAN_2GHZ(chan) &&
(mci_hw->bt_state == MCI_BT_AWAKE) &&
run_agc_cal &&
!(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
u32 pld[4] = {0, 0, 0, 0};
/* send CAL_REQ only when BT is AWAKE. */
ath_dbg(common, ATH_DBG_MCI, "MCI send WLAN_CAL_REQ 0x%x\n",
mci_hw->wlan_cal_seq);
MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
/* Wait BT_CAL_GRANT for 50ms */
ath_dbg(common, ATH_DBG_MCI, "MCI wait for BT_CAL_GRANT");
if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000))
ath_dbg(common, ATH_DBG_MCI, "MCI got BT_CAL_GRANT");
else {
is_reusable = false;
ath_dbg(common, ATH_DBG_MCI, "\nMCI BT is not responding");
}
}
txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
udelay(5);
@ -1022,6 +1050,21 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
}
if (mci && IS_CHAN_2GHZ(chan) &&
(mci_hw->bt_state == MCI_BT_AWAKE) &&
run_agc_cal &&
!(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
u32 pld[4] = {0, 0, 0, 0};
ath_dbg(common, ATH_DBG_MCI, "MCI Send WLAN_CAL_DONE 0x%x\n",
mci_hw->wlan_cal_done);
MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
}
if (rtt && !run_rtt_cal) {
agc_ctrl |= agc_supp_cals;
REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);

View file

@ -4779,7 +4779,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
u16 twiceMaxEdgePower = MAX_RATE_POWER;
u16 twiceMaxEdgePower;
int i;
u16 scaledPower = 0, minCtlPower;
static const u16 ctlModesFor11a[] = {
@ -4880,6 +4880,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
ctlNum = AR9300_NUM_CTLS_5G;
}
twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
ath_dbg(common, ATH_DBG_REGULATORY,
"LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",

View file

@ -175,15 +175,47 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
u32 isr = 0;
u32 mask2 = 0;
struct ath9k_hw_capabilities *pCap = &ah->caps;
u32 sync_cause = 0;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 sync_cause = 0, async_cause;
if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);
}
if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
u32 raw_intr, rx_msg_intr;
rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
ath_dbg(common, ATH_DBG_MCI,
"MCI gets 0xdeadbeef during MCI int processing"
"new raw_intr=0x%08x, new rx_msg_raw=0x%08x, "
"raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
raw_intr, rx_msg_intr, mci->raw_intr,
mci->rx_msg_intr);
else {
mci->rx_msg_intr |= rx_msg_intr;
mci->raw_intr |= raw_intr;
*masked |= ATH9K_INT_MCI;
if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
mci->cont_status =
REG_READ(ah, AR_MCI_CONT_STATUS);
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
ath_dbg(common, ATH_DBG_MCI, "AR_INTR_SYNC_MCI\n");
}
}
sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
*masked = 0;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,102 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef AR9003_MCI_H
#define AR9003_MCI_H
#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */
/* Default remote BT device MCI COEX version */
#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3
#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT 0
/* Local WLAN MCI COEX version */
#define MCI_GPM_COEX_MAJOR_VERSION_WLAN 3
#define MCI_GPM_COEX_MINOR_VERSION_WLAN 0
enum mci_gpm_coex_query_type {
MCI_GPM_COEX_QUERY_BT_ALL_INFO = BIT(0),
MCI_GPM_COEX_QUERY_BT_TOPOLOGY = BIT(1),
MCI_GPM_COEX_QUERY_BT_DEBUG = BIT(2),
};
enum mci_gpm_coex_halt_bt_gpm {
MCI_GPM_COEX_BT_GPM_UNHALT,
MCI_GPM_COEX_BT_GPM_HALT
};
enum mci_gpm_coex_bt_update_flags_op {
MCI_GPM_COEX_BT_FLAGS_READ,
MCI_GPM_COEX_BT_FLAGS_SET,
MCI_GPM_COEX_BT_FLAGS_CLEAR
};
#define MCI_NUM_BT_CHANNELS 79
#define MCI_BT_MCI_FLAGS_UPDATE_CORR 0x00000002
#define MCI_BT_MCI_FLAGS_UPDATE_HDR 0x00000004
#define MCI_BT_MCI_FLAGS_UPDATE_PLD 0x00000008
#define MCI_BT_MCI_FLAGS_LNA_CTRL 0x00000010
#define MCI_BT_MCI_FLAGS_DEBUG 0x00000020
#define MCI_BT_MCI_FLAGS_SCHED_MSG 0x00000040
#define MCI_BT_MCI_FLAGS_CONT_MSG 0x00000080
#define MCI_BT_MCI_FLAGS_COEX_GPM 0x00000100
#define MCI_BT_MCI_FLAGS_CPU_INT_MSG 0x00000200
#define MCI_BT_MCI_FLAGS_MCI_MODE 0x00000400
#define MCI_BT_MCI_FLAGS_AR9462_MODE 0x00001000
#define MCI_BT_MCI_FLAGS_OTHER 0x00010000
#define MCI_DEFAULT_BT_MCI_FLAGS 0x00011dde
#define MCI_TOGGLE_BT_MCI_FLAGS (MCI_BT_MCI_FLAGS_UPDATE_CORR | \
MCI_BT_MCI_FLAGS_UPDATE_HDR | \
MCI_BT_MCI_FLAGS_UPDATE_PLD | \
MCI_BT_MCI_FLAGS_MCI_MODE)
#define MCI_2G_FLAGS_CLEAR_MASK 0x00000000
#define MCI_2G_FLAGS_SET_MASK MCI_TOGGLE_BT_MCI_FLAGS
#define MCI_2G_FLAGS MCI_DEFAULT_BT_MCI_FLAGS
#define MCI_5G_FLAGS_CLEAR_MASK MCI_TOGGLE_BT_MCI_FLAGS
#define MCI_5G_FLAGS_SET_MASK 0x00000000
#define MCI_5G_FLAGS (MCI_DEFAULT_BT_MCI_FLAGS & \
~MCI_TOGGLE_BT_MCI_FLAGS)
/*
* Default value for AR9462 is 0x00002201
*/
#define ATH_MCI_CONFIG_CONCUR_TX 0x00000003
#define ATH_MCI_CONFIG_MCI_OBS_MCI 0x00000004
#define ATH_MCI_CONFIG_MCI_OBS_TXRX 0x00000008
#define ATH_MCI_CONFIG_MCI_OBS_BT 0x00000010
#define ATH_MCI_CONFIG_DISABLE_MCI_CAL 0x00000020
#define ATH_MCI_CONFIG_DISABLE_OSLA 0x00000040
#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP 0x00000080
#define ATH_MCI_CONFIG_AGGR_THRESH 0x00000700
#define ATH_MCI_CONFIG_AGGR_THRESH_S 8
#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH 0x00000800
#define ATH_MCI_CONFIG_CLK_DIV 0x00003000
#define ATH_MCI_CONFIG_CLK_DIV_S 12
#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000
#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000
#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000
#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \
ATH_MCI_CONFIG_MCI_OBS_TXRX | \
ATH_MCI_CONFIG_MCI_OBS_BT)
#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F
#endif

View file

@ -490,6 +490,8 @@
#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
#define AR_PHY_TEST_CTL_DEBUGPORT_SEL 0xe0000000
#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
@ -1001,6 +1003,7 @@
/* GLB Registers */
#define AR_GLB_BASE 0x20000
#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
(AR_SREV_9462_20(_ah) ? 0x4c : 0x50))

View file

@ -462,7 +462,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
#define ATH_LED_PIN_9287 8
#define ATH_LED_PIN_9300 10
#define ATH_LED_PIN_9485 6
#define ATH_LED_PIN_9462 0
#define ATH_LED_PIN_9462 4
#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
@ -647,6 +647,7 @@ struct ath_softc {
struct delayed_work tx_complete_work;
struct delayed_work hw_pll_work;
struct ath_btcoex btcoex;
struct ath_mci_coex mci_coex;
struct ath_descdma txsdma;

View file

@ -21,7 +21,7 @@ enum ath_bt_mode {
ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */
ATH_BT_COEX_MODE_DISABLED, /* coexistence disabled */
};
struct ath_btcoex_config {

View file

@ -54,8 +54,39 @@ enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_MCI,
};
struct ath9k_hw_mci {
u32 raw_intr;
u32 rx_msg_intr;
u32 cont_status;
u32 gpm_addr;
u32 gpm_len;
u32 gpm_idx;
u32 sched_addr;
u32 wlan_channels[4];
u32 wlan_cal_seq;
u32 wlan_cal_done;
u32 config;
u8 *gpm_buf;
u8 *sched_buf;
bool ready;
bool update_2g5g;
bool is_2g;
bool query_bt;
bool unhalt_bt_gpm; /* need send UNHALT */
bool halted_bt_gpm; /* HALT sent */
bool need_flush_btinfo;
bool bt_version_known;
bool wlan_channels_update;
u8 wlan_ver_major;
u8 wlan_ver_minor;
u8 bt_ver_major;
u8 bt_ver_minor;
u8 bt_state;
};
struct ath_btcoex_hw {
enum ath_btcoex_scheme scheme;
struct ath9k_hw_mci mci;
bool enabled;
u8 wlanactive_gpio;
u8 btactive_gpio;

View file

@ -473,7 +473,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
int i;
u16 twiceMinEdgePower;
u16 twiceMaxEdgePower = MAX_RATE_POWER;
u16 twiceMaxEdgePower;
u16 scaledPower = 0, minCtlPower;
u16 numCtlModes;
const u16 *pCtlMode;
@ -542,9 +542,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
ah->eep_ops->get_eeprom_rev(ah) <= 2)
twiceMaxEdgePower = MAX_RATE_POWER;
twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
pEepData->ctlIndex[i]; i++) {

View file

@ -569,7 +569,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
u16 twiceMaxEdgePower = MAX_RATE_POWER;
u16 twiceMaxEdgePower;
int i;
struct cal_ctl_data_ar9287 *rep;
struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
@ -669,6 +669,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
twiceMaxEdgePower = MAX_RATE_POWER;
/* Walk through the CTL indices stored in EEPROM */
for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
struct cal_ctl_edges *pRdEdgesPower;

View file

@ -1000,7 +1000,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
u16 twiceMaxEdgePower = MAX_RATE_POWER;
u16 twiceMaxEdgePower;
int i;
struct cal_ctl_data *rep;
struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
@ -1121,9 +1121,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
ah->eep_ops->get_eeprom_rev(ah) <= 2)
twiceMaxEdgePower = MAX_RATE_POWER;
twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
if ((((cfgCtl & ~CTL_MODE_M) |

View file

@ -1350,6 +1350,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
{
bool ret = false;
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_WA, ah->WARegVal);
@ -1361,13 +1362,20 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
switch (type) {
case ATH9K_RESET_POWER_ON:
return ath9k_hw_set_reset_power_on(ah);
ret = ath9k_hw_set_reset_power_on(ah);
break;
case ATH9K_RESET_WARM:
case ATH9K_RESET_COLD:
return ath9k_hw_set_reset(ah, type);
ret = ath9k_hw_set_reset(ah, type);
break;
default:
return false;
break;
}
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
return ret;
}
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
@ -1506,6 +1514,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata, bool bChannelChange)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 saveLedState;
struct ath9k_channel *curchan = ah->curchan;
u32 saveDefAntenna;
@ -1513,6 +1522,53 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u64 tsf = 0;
int i, r;
bool allow_fbs = false;
bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
bool save_fullsleep = ah->chip_fullsleep;
if (mci) {
ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
if (mci_hw->bt_state == MCI_BT_CAL_START) {
u32 payload[4] = {0, 0, 0, 0};
ath_dbg(common, ATH_DBG_MCI, "MCI stop rx for BT CAL");
mci_hw->bt_state = MCI_BT_CAL;
/*
* MCI FIX: disable mci interrupt here. This is to avoid
* SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
* lead to mci_intr reentry.
*/
ar9003_mci_disable_interrupt(ah);
ath_dbg(common, ATH_DBG_MCI, "send WLAN_CAL_GRANT");
MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
16, true, false);
ath_dbg(common, ATH_DBG_MCI, "\nMCI BT is calibrating");
/* Wait BT calibration to be completed for 25ms */
if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
0, 25000))
ath_dbg(common, ATH_DBG_MCI,
"MCI got BT_CAL_DONE\n");
else
ath_dbg(common, ATH_DBG_MCI,
"MCI ### BT cal takes to long, force"
"bt_state to be bt_awake\n");
mci_hw->bt_state = MCI_BT_AWAKE;
/* MCI FIX: enable mci interrupt here */
ar9003_mci_enable_interrupt(ah);
return true;
}
}
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@ -1550,12 +1606,29 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ath9k_hw_channel_change(ah, chan)) {
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_hw_start_nfcal(ah, true);
if (mci && mci_hw->ready)
ar9003_mci_2g5g_switch(ah, true);
if (AR_SREV_9271(ah))
ar9002_hw_load_ani_reg(ah, chan);
return 0;
}
}
if (mci) {
ar9003_mci_disable_interrupt(ah);
if (mci_hw->ready && !save_fullsleep) {
ar9003_mci_mute_bt(ah);
udelay(20);
REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
}
mci_hw->bt_state = MCI_BT_SLEEP;
mci_hw->ready = false;
}
saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
if (saveDefAntenna == 0)
saveDefAntenna = 1;
@ -1611,6 +1684,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
if (mci)
ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
/*
* Some AR91xx SoC devices frequently fail to accept TSF writes
* right after the chip reset. When that happens, write a new
@ -1728,6 +1804,55 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
if (mci && mci_hw->ready) {
if (IS_CHAN_2GHZ(chan) &&
(mci_hw->bt_state == MCI_BT_SLEEP)) {
if (ar9003_mci_check_int(ah,
AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
ar9003_mci_check_int(ah,
AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
/*
* BT is sleeping. Check if BT wakes up during
* WLAN calibration. If BT wakes up during
* WLAN calibration, need to go through all
* message exchanges again and recal.
*/
ath_dbg(common, ATH_DBG_MCI, "MCI BT wakes up"
"during WLAN calibration\n");
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
ath_dbg(common, ATH_DBG_MCI, "MCI send"
"REMOTE_RESET\n");
ar9003_mci_remote_reset(ah, true);
ar9003_mci_send_sys_waking(ah, true);
udelay(1);
if (IS_CHAN_2GHZ(chan))
ar9003_mci_send_lna_transfer(ah, true);
mci_hw->bt_state = MCI_BT_AWAKE;
ath_dbg(common, ATH_DBG_MCI, "MCI re-cal\n");
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
caldata->rtt_hist.num_readings = 0;
}
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
}
}
ar9003_mci_enable_interrupt(ah);
}
ENABLE_REGWRITE_BUFFER(ah);
ath9k_hw_restore_chainmask(ah);
@ -1770,6 +1895,21 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->btcoex_hw.enabled)
ath9k_hw_btcoex_enable(ah);
if (mci && mci_hw->ready) {
/*
* check BT state again to make
* sure it's not changed.
*/
ar9003_mci_sync_bt_state(ah);
ar9003_mci_2g5g_switch(ah, true);
if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
(mci_hw->query_bt == true)) {
mci_hw->need_flush_btinfo = true;
}
}
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
@ -1933,6 +2073,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
int status = true, setChip = true;
static const char *modes[] = {
"AWAKE",
@ -1950,12 +2091,35 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
switch (mode) {
case ATH9K_PM_AWAKE:
status = ath9k_hw_set_power_awake(ah, setChip);
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
break;
case ATH9K_PM_FULL_SLEEP:
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
(mci->bt_state != MCI_BT_SLEEP) &&
!mci->halted_bt_gpm) {
ath_dbg(common, ATH_DBG_MCI, "MCI halt BT GPM"
"(full_sleep)");
ar9003_mci_send_coex_halt_bt_gpm(ah,
true, true);
}
mci->ready = false;
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
}
ath9k_set_power_sleep(ah, setChip);
ah->chip_fullsleep = true;
break;
case ATH9K_PM_NETWORK_SLEEP:
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
ath9k_set_power_network_sleep(ah, setChip);
break;
default:
@ -2148,6 +2312,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
chip_chainmask = 1;
else if (AR_SREV_9462(ah))
chip_chainmask = 3;
else if (!AR_SREV_9280_20_OR_LATER(ah))
chip_chainmask = 7;
else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
@ -2233,7 +2399,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
if (common->btcoex_enabled) {
if (AR_SREV_9300_20_OR_LATER(ah)) {
if (AR_SREV_9462(ah))
btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
else if (AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@ -2331,7 +2499,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
if (!AR_SREV_9330(ah))
if (AR_SREV_9485_OR_LATER(ah))
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
if (AR_SREV_9462(ah))

View file

@ -126,6 +126,16 @@
#define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4
#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA 0x16
#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK 0x17
#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA 0x18
#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK 0x19
#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX 0x14
#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX 0x13
#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX 9
#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX 8
#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE 0x1d
#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e
#define AR_GPIOD_MASK 0x00001FFF
#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
@ -266,6 +276,7 @@ enum ath9k_int {
ATH9K_INT_TX = 0x00000040,
ATH9K_INT_TXDESC = 0x00000080,
ATH9K_INT_TIM_TIMER = 0x00000100,
ATH9K_INT_MCI = 0x00000200,
ATH9K_INT_BB_WATCHDOG = 0x00000400,
ATH9K_INT_TXURN = 0x00000800,
ATH9K_INT_MIB = 0x00001000,
@ -417,6 +428,25 @@ enum ath9k_rx_qtype {
ATH9K_RX_QUEUE_MAX,
};
enum mci_message_header { /* length of payload */
MCI_LNA_CTRL = 0x10, /* len = 0 */
MCI_CONT_NACK = 0x20, /* len = 0 */
MCI_CONT_INFO = 0x30, /* len = 4 */
MCI_CONT_RST = 0x40, /* len = 0 */
MCI_SCHD_INFO = 0x50, /* len = 16 */
MCI_CPU_INT = 0x60, /* len = 4 */
MCI_SYS_WAKING = 0x70, /* len = 0 */
MCI_GPM = 0x80, /* len = 16 */
MCI_LNA_INFO = 0x90, /* len = 1 */
MCI_LNA_STATE = 0x94,
MCI_LNA_TAKE = 0x98,
MCI_LNA_TRANS = 0x9c,
MCI_SYS_SLEEPING = 0xa0, /* len = 0 */
MCI_REQ_WAKE = 0xc0, /* len = 0 */
MCI_DEBUG_16 = 0xfe, /* len = 2 */
MCI_REMOTE_RESET = 0xff /* len = 16 */
};
enum ath_mci_gpm_coex_profile_type {
MCI_GPM_COEX_PROFILE_UNKNOWN,
MCI_GPM_COEX_PROFILE_RFCOMM,
@ -427,6 +457,132 @@ enum ath_mci_gpm_coex_profile_type {
MCI_GPM_COEX_PROFILE_MAX
};
/* MCI GPM/Coex opcode/type definitions */
enum {
MCI_GPM_COEX_W_GPM_PAYLOAD = 1,
MCI_GPM_COEX_B_GPM_TYPE = 4,
MCI_GPM_COEX_B_GPM_OPCODE = 5,
/* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
MCI_GPM_WLAN_CAL_W_SEQUENCE = 2,
/* MCI_GPM_COEX_VERSION_QUERY */
/* MCI_GPM_COEX_VERSION_RESPONSE */
MCI_GPM_COEX_B_MAJOR_VERSION = 6,
MCI_GPM_COEX_B_MINOR_VERSION = 7,
/* MCI_GPM_COEX_STATUS_QUERY */
MCI_GPM_COEX_B_BT_BITMAP = 6,
MCI_GPM_COEX_B_WLAN_BITMAP = 7,
/* MCI_GPM_COEX_HALT_BT_GPM */
MCI_GPM_COEX_B_HALT_STATE = 6,
/* MCI_GPM_COEX_WLAN_CHANNELS */
MCI_GPM_COEX_B_CHANNEL_MAP = 6,
/* MCI_GPM_COEX_BT_PROFILE_INFO */
MCI_GPM_COEX_B_PROFILE_TYPE = 6,
MCI_GPM_COEX_B_PROFILE_LINKID = 7,
MCI_GPM_COEX_B_PROFILE_STATE = 8,
MCI_GPM_COEX_B_PROFILE_ROLE = 9,
MCI_GPM_COEX_B_PROFILE_RATE = 10,
MCI_GPM_COEX_B_PROFILE_VOTYPE = 11,
MCI_GPM_COEX_H_PROFILE_T = 12,
MCI_GPM_COEX_B_PROFILE_W = 14,
MCI_GPM_COEX_B_PROFILE_A = 15,
/* MCI_GPM_COEX_BT_STATUS_UPDATE */
MCI_GPM_COEX_B_STATUS_TYPE = 6,
MCI_GPM_COEX_B_STATUS_LINKID = 7,
MCI_GPM_COEX_B_STATUS_STATE = 8,
/* MCI_GPM_COEX_BT_UPDATE_FLAGS */
MCI_GPM_COEX_W_BT_FLAGS = 6,
MCI_GPM_COEX_B_BT_FLAGS_OP = 10
};
enum mci_gpm_subtype {
MCI_GPM_BT_CAL_REQ = 0,
MCI_GPM_BT_CAL_GRANT = 1,
MCI_GPM_BT_CAL_DONE = 2,
MCI_GPM_WLAN_CAL_REQ = 3,
MCI_GPM_WLAN_CAL_GRANT = 4,
MCI_GPM_WLAN_CAL_DONE = 5,
MCI_GPM_COEX_AGENT = 0x0c,
MCI_GPM_RSVD_PATTERN = 0xfe,
MCI_GPM_RSVD_PATTERN32 = 0xfefefefe,
MCI_GPM_BT_DEBUG = 0xff
};
enum mci_bt_state {
MCI_BT_SLEEP,
MCI_BT_AWAKE,
MCI_BT_CAL_START,
MCI_BT_CAL
};
/* Type of state query */
enum mci_state_type {
MCI_STATE_ENABLE,
MCI_STATE_INIT_GPM_OFFSET,
MCI_STATE_NEXT_GPM_OFFSET,
MCI_STATE_LAST_GPM_OFFSET,
MCI_STATE_BT,
MCI_STATE_SET_BT_SLEEP,
MCI_STATE_SET_BT_AWAKE,
MCI_STATE_SET_BT_CAL_START,
MCI_STATE_SET_BT_CAL,
MCI_STATE_LAST_SCHD_MSG_OFFSET,
MCI_STATE_REMOTE_SLEEP,
MCI_STATE_CONT_RSSI_POWER,
MCI_STATE_CONT_PRIORITY,
MCI_STATE_CONT_TXRX,
MCI_STATE_RESET_REQ_WAKE,
MCI_STATE_SEND_WLAN_COEX_VERSION,
MCI_STATE_SET_BT_COEX_VERSION,
MCI_STATE_SEND_WLAN_CHANNELS,
MCI_STATE_SEND_VERSION_QUERY,
MCI_STATE_SEND_STATUS_QUERY,
MCI_STATE_NEED_FLUSH_BT_INFO,
MCI_STATE_SET_CONCUR_TX_PRI,
MCI_STATE_RECOVER_RX,
MCI_STATE_NEED_FTP_STOMP,
MCI_STATE_NEED_TUNING,
MCI_STATE_DEBUG,
MCI_STATE_MAX
};
enum mci_gpm_coex_opcode {
MCI_GPM_COEX_VERSION_QUERY,
MCI_GPM_COEX_VERSION_RESPONSE,
MCI_GPM_COEX_STATUS_QUERY,
MCI_GPM_COEX_HALT_BT_GPM,
MCI_GPM_COEX_WLAN_CHANNELS,
MCI_GPM_COEX_BT_PROFILE_INFO,
MCI_GPM_COEX_BT_STATUS_UPDATE,
MCI_GPM_COEX_BT_UPDATE_FLAGS
};
#define MCI_GPM_NOMORE 0
#define MCI_GPM_MORE 1
#define MCI_GPM_INVALID 0xffffffff
#define MCI_GPM_RECYCLE(_p_gpm) do { \
*(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
MCI_GPM_RSVD_PATTERN32; \
} while (0)
#define MCI_GPM_TYPE(_p_gpm) \
(*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
#define MCI_GPM_OPCODE(_p_gpm) \
(*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \
*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
} while (0)
#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \
*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \
*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
} while (0)
#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
@ -1047,6 +1203,32 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_proc_mib_event(struct ath_hw *ah);
void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
u32 *payload, u8 len, bool wait_done,
bool check_bt);
void ar9003_mci_mute_bt(struct ath_hw *ah);
u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
u16 len, u32 sched_addr);
void ar9003_mci_cleanup(struct ath_hw *ah);
void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
bool wait_done);
u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
u8 gpm_opcode, int time_out);
void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g);
void ar9003_mci_disable_interrupt(struct ath_hw *ah);
void ar9003_mci_enable_interrupt(struct ath_hw *ah);
void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
bool is_full_sleep);
bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints);
void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done);
void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done);
void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done);
void ar9003_mci_sync_bt_state(struct ath_hw *ah);
void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
u32 *rx_msg_intr);
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44

View file

@ -408,6 +408,7 @@ fail:
static int ath9k_init_btcoex(struct ath_softc *sc)
{
struct ath_txq *txq;
struct ath_hw *ah = sc->sc_ah;
int r;
switch (sc->sc_ah->btcoex_hw.scheme) {
@ -423,9 +424,38 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
return -1;
txq = sc->tx.txq_map[WME_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
break;
case ATH_BTCOEX_CFG_MCI:
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
INIT_LIST_HEAD(&sc->btcoex.mci.info);
r = ath_mci_setup(sc);
if (r)
return r;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
ah->btcoex_hw.mci.ready = false;
ah->btcoex_hw.mci.bt_state = 0;
ah->btcoex_hw.mci.bt_ver_major = 3;
ah->btcoex_hw.mci.bt_ver_minor = 0;
ah->btcoex_hw.mci.bt_version_known = false;
ah->btcoex_hw.mci.update_2g5g = true;
ah->btcoex_hw.mci.is_2g = true;
ah->btcoex_hw.mci.wlan_channels_update = false;
ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
ah->btcoex_hw.mci.query_bt = true;
ah->btcoex_hw.mci.unhalt_bt_gpm = true;
ah->btcoex_hw.mci.halted_bt_gpm = false;
ah->btcoex_hw.mci.need_flush_btinfo = false;
ah->btcoex_hw.mci.wlan_cal_seq = 0;
ah->btcoex_hw.mci.wlan_cal_done = 0;
ah->btcoex_hw.mci.config = 0x2201;
}
break;
default:
WARN_ON(1);
@ -839,6 +869,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
if (sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_MCI)
ath_mci_cleanup(sc);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->tx.txq[i]);

View file

@ -760,7 +760,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
return true;
host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
if (((host_isr & AR_INTR_MAC_IRQ) ||
(host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
(host_isr != AR_INTR_SPURIOUS))
return true;
host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
@ -798,6 +801,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sync_default = AR_INTR_SYNC_DEFAULT;
u32 async_mask;
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
@ -812,13 +816,16 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
if (AR_SREV_9340(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
async_mask = AR_INTR_MAC_IRQ;
if (ah->imask & ATH9K_INT_MCI)
async_mask |= AR_INTR_ASYNC_MASK_MCI;
ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
if (!AR_SREV_9100(ah)) {
REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
AR_INTR_MAC_IRQ);
REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);

View file

@ -742,6 +742,9 @@ void ath9k_tasklet(unsigned long data)
if (status & ATH9K_INT_GENTIMER)
ath_gen_timer_isr(sc->sc_ah);
if (status & ATH9K_INT_MCI)
ath_mci_intr(sc);
out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
@ -764,7 +767,8 @@ irqreturn_t ath_isr(int irq, void *dev)
ATH9K_INT_BMISS | \
ATH9K_INT_CST | \
ATH9K_INT_TSFOOR | \
ATH9K_INT_GENTIMER)
ATH9K_INT_GENTIMER | \
ATH9K_INT_MCI)
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
@ -1119,6 +1123,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ah->imask |= ATH9K_INT_CST;
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
ah->imask |= ATH9K_INT_MCI;
sc->sc_flags &= ~SC_OP_INVALID;
sc->sc_ah->is_monitoring = false;

View file

@ -14,6 +14,9 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include "ath9k.h"
#include "mci.h"
@ -181,6 +184,56 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
ath9k_btcoex_timer_resume(sc);
}
static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
u32 payload[4] = {0, 0, 0, 0};
switch (opcode) {
case MCI_GPM_BT_CAL_REQ:
ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_REQ\n");
if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
} else
ath_dbg(common, ATH_DBG_MCI,
"MCI State mismatches: %d\n",
ar9003_mci_state(ah, MCI_STATE_BT, NULL));
break;
case MCI_GPM_BT_CAL_DONE:
ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_DONE\n");
if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL)
ath_dbg(common, ATH_DBG_MCI, "MCI error illegal!\n");
else
ath_dbg(common, ATH_DBG_MCI, "MCI BT not in CAL state\n");
break;
case MCI_GPM_BT_CAL_GRANT:
ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_GRANT\n");
/* Send WLAN_CAL_DONE for now */
ath_dbg(common, ATH_DBG_MCI, "MCI send WLAN_CAL_DONE\n");
MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
16, false, true);
break;
default:
ath_dbg(common, ATH_DBG_MCI, "MCI Unknown GPM CAL message\n");
break;
}
}
void ath_mci_process_profile(struct ath_softc *sc,
struct ath_mci_profile_info *info)
{
@ -252,3 +305,369 @@ void ath_mci_process_status(struct ath_softc *sc,
if (old_num_mgmt != mci->num_mgmt)
ath_mci_update_scheme(sc);
}
static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_mci_profile_info profile_info;
struct ath_mci_profile_status profile_status;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u32 version;
u8 major;
u8 minor;
u32 seq_num;
switch (opcode) {
case MCI_GPM_COEX_VERSION_QUERY:
ath_dbg(common, ATH_DBG_MCI,
"MCI Recv GPM COEX Version Query.\n");
version = ar9003_mci_state(ah,
MCI_STATE_SEND_WLAN_COEX_VERSION, NULL);
break;
case MCI_GPM_COEX_VERSION_RESPONSE:
ath_dbg(common, ATH_DBG_MCI,
"MCI Recv GPM COEX Version Response.\n");
major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
ath_dbg(common, ATH_DBG_MCI,
"MCI BT Coex version: %d.%d\n", major, minor);
version = (major << 8) + minor;
version = ar9003_mci_state(ah,
MCI_STATE_SET_BT_COEX_VERSION, &version);
break;
case MCI_GPM_COEX_STATUS_QUERY:
ath_dbg(common, ATH_DBG_MCI,
"MCI Recv GPM COEX Status Query = 0x%02x.\n",
*(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP));
ar9003_mci_state(ah,
MCI_STATE_SEND_WLAN_CHANNELS, NULL);
break;
case MCI_GPM_COEX_BT_PROFILE_INFO:
ath_dbg(common, ATH_DBG_MCI,
"MCI Recv GPM Coex BT profile info\n");
memcpy(&profile_info,
(rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN)
|| (profile_info.type >=
MCI_GPM_COEX_PROFILE_MAX)) {
ath_dbg(common, ATH_DBG_MCI,
"illegal profile type = %d,"
"state = %d\n", profile_info.type,
profile_info.start);
break;
}
ath_mci_process_profile(sc, &profile_info);
break;
case MCI_GPM_COEX_BT_STATUS_UPDATE:
profile_status.is_link = *(rx_payload +
MCI_GPM_COEX_B_STATUS_TYPE);
profile_status.conn_handle = *(rx_payload +
MCI_GPM_COEX_B_STATUS_LINKID);
profile_status.is_critical = *(rx_payload +
MCI_GPM_COEX_B_STATUS_STATE);
seq_num = *((u32 *)(rx_payload + 12));
ath_dbg(common, ATH_DBG_MCI,
"MCI Recv GPM COEX BT_Status_Update: "
"is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
profile_status.is_link, profile_status.conn_handle,
profile_status.is_critical, seq_num);
ath_mci_process_status(sc, &profile_status);
break;
default:
ath_dbg(common, ATH_DBG_MCI,
"MCI Unknown GPM COEX message = 0x%02x\n", opcode);
break;
}
}
static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf)
{
int error = 0;
buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len,
&buf->bf_paddr, GFP_KERNEL);
if (buf->bf_addr == NULL) {
error = -ENOMEM;
goto fail;
}
return 0;
fail:
memset(buf, 0, sizeof(*buf));
return error;
}
static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf)
{
if (buf->bf_addr) {
dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr,
buf->bf_paddr);
memset(buf, 0, sizeof(*buf));
}
}
int ath_mci_setup(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_mci_coex *mci = &sc->mci_coex;
int error = 0;
mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE;
if (ath_mci_buf_alloc(sc, &mci->sched_buf)) {
ath_dbg(common, ATH_DBG_FATAL, "MCI buffer alloc failed\n");
error = -ENOMEM;
goto fail;
}
mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN,
mci->sched_buf.bf_len);
mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr +
mci->sched_buf.bf_len;
mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
/* initialize the buffer */
memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len);
ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
mci->sched_buf.bf_paddr);
fail:
return error;
}
void ath_mci_cleanup(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_mci_coex *mci = &sc->mci_coex;
/*
* both schedule and gpm buffers will be released
*/
ath_mci_buf_free(sc, &mci->sched_buf);
ar9003_mci_cleanup(ah);
}
void ath_mci_intr(struct ath_softc *sc)
{
struct ath_mci_coex *mci = &sc->mci_coex;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
u32 mci_int, mci_int_rxmsg;
u32 offset, subtype, opcode;
u32 *pgpm;
u32 more_data = MCI_GPM_MORE;
bool skip_gpm = false;
ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
ath_dbg(common, ATH_DBG_MCI,
"MCI interrupt but MCI disabled\n");
ath_dbg(common, ATH_DBG_MCI,
"MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n",
mci_int, mci_int_rxmsg);
return;
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
u32 payload[4] = { 0xffffffff, 0xffffffff,
0xffffffff, 0xffffff00};
/*
* The following REMOTE_RESET and SYS_WAKING used to sent
* only when BT wake up. Now they are always sent, as a
* recovery method to reset BT MCI's RX alignment.
*/
ath_dbg(common, ATH_DBG_MCI, "MCI interrupt send REMOTE_RESET\n");
ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
payload, 16, true, false);
ath_dbg(common, ATH_DBG_MCI, "MCI interrupt send SYS_WAKING\n");
ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
NULL, 0, true, false);
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
/*
* always do this for recovery and 2G/5G toggling and LNA_TRANS
*/
ath_dbg(common, ATH_DBG_MCI, "MCI Set BT state to AWAKE.\n");
ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
}
/* Processing SYS_WAKING/SYS_SLEEPING */
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
== MCI_BT_SLEEP)
ath_dbg(common, ATH_DBG_MCI,
"MCI BT stays in sleep mode\n");
else {
ath_dbg(common, ATH_DBG_MCI,
"MCI Set BT state to AWAKE.\n");
ar9003_mci_state(ah,
MCI_STATE_SET_BT_AWAKE, NULL);
}
} else
ath_dbg(common, ATH_DBG_MCI,
"MCI BT stays in AWAKE mode.\n");
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
== MCI_BT_AWAKE)
ath_dbg(common, ATH_DBG_MCI,
"MCI BT stays in AWAKE mode.\n");
else {
ath_dbg(common, ATH_DBG_MCI,
"MCI SetBT state to SLEEP\n");
ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
NULL);
}
} else
ath_dbg(common, ATH_DBG_MCI,
"MCI BT stays in SLEEP mode\n");
}
if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
(mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
ath_dbg(common, ATH_DBG_MCI, "MCI RX broken, skip GPM msgs\n");
ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
skip_gpm = true;
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
NULL);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
while (more_data == MCI_GPM_MORE) {
pgpm = mci->gpm_buf.bf_addr;
offset = ar9003_mci_state(ah,
MCI_STATE_NEXT_GPM_OFFSET, &more_data);
if (offset == MCI_GPM_INVALID)
break;
pgpm += (offset >> 2);
/*
* The first dword is timer.
* The real data starts from 2nd dword.
*/
subtype = MCI_GPM_TYPE(pgpm);
opcode = MCI_GPM_OPCODE(pgpm);
if (!skip_gpm) {
if (MCI_GPM_IS_CAL_TYPE(subtype))
ath_mci_cal_msg(sc, subtype,
(u8 *) pgpm);
else {
switch (subtype) {
case MCI_GPM_COEX_AGENT:
ath_mci_msg(sc, opcode,
(u8 *) pgpm);
break;
default:
break;
}
}
}
MCI_GPM_RECYCLE(pgpm);
}
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
ath_dbg(common, ATH_DBG_MCI, "MCI LNA_INFO\n");
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
int value_dbm = ar9003_mci_state(ah,
MCI_STATE_CONT_RSSI_POWER, NULL);
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
ath_dbg(common, ATH_DBG_MCI,
"MCI CONT_INFO: "
"(tx) pri = %d, pwr = %d dBm\n",
ar9003_mci_state(ah,
MCI_STATE_CONT_PRIORITY, NULL),
value_dbm);
else
ath_dbg(common, ATH_DBG_MCI,
"MCI CONT_INFO:"
"(rx) pri = %d,pwr = %d dBm\n",
ar9003_mci_state(ah,
MCI_STATE_CONT_PRIORITY, NULL),
value_dbm);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
ath_dbg(common, ATH_DBG_MCI, "MCI CONT_NACK\n");
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
ath_dbg(common, ATH_DBG_MCI, "MCI CONT_RST\n");
}
}
if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
(mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
if (mci_int_rxmsg & 0xfffffffe)
ath_dbg(common, ATH_DBG_MCI,
"MCI not processed mci_int_rxmsg = 0x%x\n",
mci_int_rxmsg);
}

View file

@ -17,6 +17,9 @@
#ifndef MCI_H
#define MCI_H
#define ATH_MCI_SCHED_BUF_SIZE (16 * 16) /* 16 entries, 4 dword each */
#define ATH_MCI_GPM_MAX_ENTRY 16
#define ATH_MCI_GPM_BUF_SIZE (ATH_MCI_GPM_MAX_ENTRY * 16)
#define ATH_MCI_DEF_BT_PERIOD 40
#define ATH_MCI_BDR_DUTY_CYCLE 20
#define ATH_MCI_MAX_DUTY_CYCLE 90
@ -110,9 +113,26 @@ struct ath_mci_profile {
u8 num_bdr;
};
struct ath_mci_buf {
void *bf_addr; /* virtual addr of desc */
dma_addr_t bf_paddr; /* physical addr of buffer */
u32 bf_len; /* len of data */
};
struct ath_mci_coex {
atomic_t mci_cal_flag;
struct ath_mci_buf sched_buf;
struct ath_mci_buf gpm_buf;
u32 bt_cal_start;
};
void ath_mci_flush_profile(struct ath_mci_profile *mci);
void ath_mci_process_profile(struct ath_softc *sc,
struct ath_mci_profile_info *info);
void ath_mci_process_status(struct ath_softc *sc,
struct ath_mci_profile_status *status);
int ath_mci_setup(struct ath_softc *sc);
void ath_mci_cleanup(struct ath_softc *sc);
void ath_mci_intr(struct ath_softc *sc);
#endif

View file

@ -475,7 +475,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
return rfilt;
#undef RX_FILTER_PRESERVE
}
int ath_startrecv(struct ath_softc *sc)
@ -1923,15 +1922,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb = hdr_skb;
}
/*
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row.
*/
if (sc->rx.defant != rs.rs_antenna) {
if (++sc->rx.rxotherant >= 3)
ath_setdefantenna(sc, rs.rs_antenna);
} else {
sc->rx.rxotherant = 0;
if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
/*
* change the default rx antenna if rx diversity
* chooses the other antenna 3 times in a row.
*/
if (sc->rx.defant != rs.rs_antenna) {
if (++sc->rx.rxotherant >= 3)
ath_setdefantenna(sc, rs.rs_antenna);
} else {
sc->rx.rxotherant = 0;
}
}
if (rxs->flag & RX_FLAG_MMIC_STRIPPED)

View file

@ -1006,6 +1006,8 @@ enum {
#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
#define AR_INTR_ASYNC_MASK_GPIO_S 18
#define AR_INTR_ASYNC_MASK_MCI 0x00000080
#define AR_INTR_ASYNC_MASK_MCI_S 7
#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034)
#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
@ -1013,6 +1015,14 @@ enum {
#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080
#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \
AR_INTR_ASYNC_CAUSE_MCI)
/* Asynchronous Interrupt Enable Register */
#define AR_INTR_ASYNC_ENABLE_MCI 0x00000080
#define AR_INTR_ASYNC_ENABLE_MCI_S 7
#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
@ -1269,6 +1279,8 @@ enum {
#define AR_RTC_INTR_MASK \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
#define AR_RTC_KEEP_AWAKE 0x7034
/* RTC_DERIVED_* - only for AR9100 */
#define AR_RTC_DERIVED_CLK \
@ -1555,6 +1567,8 @@ enum {
#define AR_DIAG_FRAME_NV0 0x00020000
#define AR_DIAG_OBS_PT_SEL1 0x000C0000
#define AR_DIAG_OBS_PT_SEL1_S 18
#define AR_DIAG_OBS_PT_SEL2 0x08000000
#define AR_DIAG_OBS_PT_SEL2_S 27
#define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */
#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
@ -1929,37 +1943,277 @@ enum {
#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
/* MCI Registers */
#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
#define AR_MCI_COMMAND0 0x1800
#define AR_MCI_COMMAND0_HEADER 0xFF
#define AR_MCI_COMMAND0_HEADER_S 0
#define AR_MCI_COMMAND0_LEN 0x1f00
#define AR_MCI_COMMAND0_LEN_S 8
#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
#define AR_MCI_COMMAND1 0x1804
#define AR_MCI_COMMAND2 0x1808
#define AR_MCI_COMMAND2_RESET_TX 0x01
#define AR_MCI_COMMAND2_RESET_TX_S 0
#define AR_MCI_COMMAND2_RESET_RX 0x02
#define AR_MCI_COMMAND2_RESET_RX_S 1
#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
#define AR_MCI_RX_CTRL 0x180c
#define AR_MCI_TX_CTRL 0x1810
/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
#define AR_MCI_TX_CTRL_CLK_DIV 0x03
#define AR_MCI_TX_CTRL_CLK_DIV_S 0
#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
#define AR_MCI_SCHD_TABLE_0 0x1818
#define AR_MCI_SCHD_TABLE_1 0x181c
#define AR_MCI_GPM_0 0x1820
#define AR_MCI_GPM_1 0x1824
#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
#define AR_MCI_GPM_WRITE_PTR_S 16
#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
#define AR_MCI_GPM_BUF_LEN_S 0
#define AR_MCI_INTERRUPT_RAW 0x1828
#define AR_MCI_INTERRUPT_EN 0x182c
#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
#define AR_MCI_INTERRUPT_RX_MSG_S 9
#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
#define AR_MCI_INTERRUPT_BT_PRI_S 11
#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
#define AR_MCI_INTERRUPT_BT_FREQ_S 28
#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
#define AR_MCI_INTERRUPT_BT_STOMP_S 29
#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
AR_MCI_INTERRUPT_RX_INVALID_HDR | \
AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
AR_MCI_INTERRUPT_RX_MSG | \
AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
#define AR_MCI_REMOTE_CPU_INT 0x1830
#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \
AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
#define AR_MCI_CPU_INT 0x1840
#define AR_MCI_RX_STATUS 0x1844
#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
#define AR_MCI_RX_REMOTE_SLEEP_S 12
#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
#define AR_MCI_RX_MCI_CLK_REQ_S 13
#define AR_MCI_CONT_STATUS 0x1848
#define AR_MCI_CONT_RSSI_POWER 0x000000FF
#define AR_MCI_CONT_RSSI_POWER_S 0
#define AR_MCI_CONT_RRIORITY 0x0000FF00
#define AR_MCI_CONT_RRIORITY_S 8
#define AR_MCI_CONT_TXRX 0x00010000
#define AR_MCI_CONT_TXRX_S 16
#define AR_MCI_BT_PRI0 0x184c
#define AR_MCI_BT_PRI1 0x1850
#define AR_MCI_BT_PRI2 0x1854
#define AR_MCI_BT_PRI3 0x1858
#define AR_MCI_BT_PRI 0x185c
#define AR_MCI_WL_FREQ0 0x1860
#define AR_MCI_WL_FREQ1 0x1864
#define AR_MCI_WL_FREQ2 0x1868
#define AR_MCI_GAIN 0x186c
#define AR_MCI_WBTIMER1 0x1870
#define AR_MCI_WBTIMER2 0x1874
#define AR_MCI_WBTIMER3 0x1878
#define AR_MCI_WBTIMER4 0x187c
#define AR_MCI_MAXGAIN 0x1880
#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
#define AR_MCI_HW_SCHD_TBL_D0 0x1888
#define AR_MCI_HW_SCHD_TBL_D1 0x188c
#define AR_MCI_HW_SCHD_TBL_D2 0x1890
#define AR_MCI_HW_SCHD_TBL_D3 0x1894
#define AR_MCI_TX_PAYLOAD0 0x1898
#define AR_MCI_TX_PAYLOAD1 0x189c
#define AR_MCI_TX_PAYLOAD2 0x18a0
#define AR_MCI_TX_PAYLOAD3 0x18a4
#define AR_BTCOEX_WBTIMER 0x18a8
#define AR_BTCOEX_CTRL 0x18ac
#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
#define AR_BTCOEX_CTRL_PA_SHARED_S 4
#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
#define AR_BTCOEX_WL_LNA 0x1940
#define AR_BTCOEX_RFGAIN_CTRL 0x1944
#define AR_BTCOEX_CTRL2 0x1948
#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
#define AR_GLB_WLAN_UART_INTF_EN_S 17
#define AR_GLB_DS_JTAG_DISABLE 0x00040000
#define AR_GLB_DS_JTAG_DISABLE_S 18
#define AR_BTCOEX_RC 0x194c
#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
#define AR_BTCOEX_DBG 0x1a50
#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
#define AR_MCI_SCHD_TABLE_2 0x1a5c
#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
#define AR_BTCOEX_CTRL3 0x1a60
#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
#endif

View file

@ -179,6 +179,11 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
spin_lock_bh(&txq->axq_lock);
}
if (tid->baw_head == tid->baw_tail) {
tid->state &= ~AGGR_ADDBA_COMPLETE;
tid->state &= ~AGGR_CLEANUP;
}
spin_unlock_bh(&txq->axq_lock);
}
@ -556,15 +561,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
spin_unlock_bh(&txq->axq_lock);
}
if (tid->state & AGGR_CLEANUP) {
if (tid->state & AGGR_CLEANUP)
ath_tx_flush_tid(sc, tid);
if (tid->baw_head == tid->baw_tail) {
tid->state &= ~AGGR_ADDBA_COMPLETE;
tid->state &= ~AGGR_CLEANUP;
}
}
rcu_read_unlock();
if (needreset) {

View file

@ -1,25 +0,0 @@
/*
* Copyright (c) 2011 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _bcmchip_h_
#define _bcmchip_h_
/* bcm4329 */
/* firmware name */
#define BCM4329_FW_NAME "brcm/bcm4329-fullmac-4.bin"
#define BCM4329_NV_NAME "brcm/bcm4329-fullmac-4.txt"
#endif /* _bcmchip_h_ */

View file

@ -40,7 +40,8 @@
static void brcmf_sdioh_irqhandler(struct sdio_func *func)
{
struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
struct brcmf_bus *bus_if = dev_get_drvdata(&func->card->dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
brcmf_dbg(TRACE, "***IRQHandler\n");
@ -222,19 +223,12 @@ bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
return sdiodev->regfail;
}
int
brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags,
u8 *buf, uint nbytes, struct sk_buff *pkt)
static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
uint flags, uint width, u32 *addr)
{
int status;
uint incr_fix;
uint width;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
/* Async not implemented yet */
if (flags & SDIO_REQ_ASYNC)
return -ENOTSUPP;
@ -247,29 +241,114 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
sdiodev->sbwad = bar0;
}
addr &= SBSDIO_SB_OFT_ADDR_MASK;
*addr &= SBSDIO_SB_OFT_ADDR_MASK;
if (width == 4)
*addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
return 0;
}
int
brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
if (!mypkt) {
brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
nbytes);
return -EIO;
}
err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
if (!err)
memcpy(buf, mypkt->data, nbytes);
brcmu_pkt_buf_free_skb(mypkt);
return err;
}
int
brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff *pkt)
{
uint incr_fix;
uint width;
int err = 0;
brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
if (err)
return err;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
fn, addr, pkt);
return err;
}
int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff_head *pktq)
{
uint incr_fix;
uint width;
int err = 0;
brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pktq->qlen);
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
if (width == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
if (err)
return err;
status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
fn, addr, width, nbytes, buf, pkt);
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
pktq);
return status;
return err;
}
int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt)
uint flags, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
if (!mypkt) {
brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
nbytes);
return -EIO;
}
memcpy(mypkt->data, buf, nbytes);
err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
brcmu_pkt_buf_free_skb(mypkt);
return err;
}
int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff *pkt)
{
uint incr_fix;
uint width;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
/* Async not implemented yet */
if (flags & SDIO_REQ_ASYNC)
@ -291,18 +370,39 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
addr, width, nbytes, buf, pkt);
addr, pkt);
}
int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
bool write = rw ? SDIOH_WRITE : SDIOH_READ;
int err;
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
(rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
addr, 4, nbytes, buf, NULL);
mypkt = brcmu_pkt_buf_get_skb(nbytes);
if (!mypkt) {
brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
nbytes);
return -EIO;
}
/* For a write, copy the buffer data into the packet. */
if (write)
memcpy(mypkt->data, buf, nbytes);
err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write,
SDIO_FUNC_1, addr, mypkt);
/* For a read, copy the packet data back to the buffer. */
if (!err && !write)
memcpy(buf, mypkt->data, nbytes);
brcmu_pkt_buf_free_skb(mypkt);
return err;
}
int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
@ -333,7 +433,7 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
sdiodev->sbwad = SI_ENUM_BASE;
/* try to attach to the target device */
sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev);
sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
if (!sdiodev->bus) {
brcmf_dbg(ERROR, "device attach failed\n");
ret = -ENODEV;

View file

@ -204,62 +204,75 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
return err_ret;
}
/* precondition: host controller is claimed */
static int
brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
uint write, uint func, uint addr,
struct sk_buff *pkt)
brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
uint func, uint addr, struct sk_buff *pkt, uint pktlen)
{
int err_ret = 0;
if ((write) && (!fifo)) {
err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
((u8 *) (pkt->data)), pktlen);
} else if (write) {
err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
((u8 *) (pkt->data)), pktlen);
} else if (fifo) {
err_ret = sdio_readsb(sdiodev->func[func],
((u8 *) (pkt->data)), addr, pktlen);
} else {
err_ret = sdio_memcpy_fromio(sdiodev->func[func],
((u8 *) (pkt->data)),
addr, pktlen);
}
return err_ret;
}
/*
* This function takes a queue of packets. The packets on the queue
* are assumed to be properly aligned by the caller.
*/
int
brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
uint write, uint func, uint addr,
struct sk_buff_head *pktq)
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
u32 SGCount = 0;
int err_ret = 0;
struct sk_buff *pnext;
struct sk_buff *pkt;
brcmf_dbg(TRACE, "Enter\n");
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
/* Claim host controller */
sdio_claim_host(sdiodev->func[func]);
for (pnext = pkt; pnext; pnext = pnext->next) {
uint pkt_len = pnext->len;
skb_queue_walk(pktq, pkt) {
uint pkt_len = pkt->len;
pkt_len += 3;
pkt_len &= 0xFFFFFFFC;
if ((write) && (!fifo)) {
err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
((u8 *) (pnext->data)),
pkt_len);
} else if (write) {
err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
((u8 *) (pnext->data)),
pkt_len);
} else if (fifo) {
err_ret = sdio_readsb(sdiodev->func[func],
((u8 *) (pnext->data)),
addr, pkt_len);
} else {
err_ret = sdio_memcpy_fromio(sdiodev->func[func],
((u8 *) (pnext->data)),
addr, pkt_len);
}
err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
addr, pkt, pkt_len);
if (err_ret) {
brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
write ? "TX" : "RX", pnext, SGCount, addr,
write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len, err_ret);
} else {
brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
write ? "TX" : "RX", pnext, SGCount, addr,
write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len);
}
if (!fifo)
addr += pkt_len;
SGCount++;
SGCount++;
}
/* Release host controller */
@ -270,91 +283,45 @@ brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
}
/*
* This function takes a buffer or packet, and fixes everything up
* so that in the end, a DMA-able packet is created.
*
* A buffer does not have an associated packet pointer,
* and may or may not be aligned.
* A packet may consist of a single packet, or a packet chain.
* If it is a packet chain, then all the packets in the chain
* must be properly aligned.
*
* If the packet data is not aligned, then there may only be
* one packet, and in this case, it is copied to a new
* aligned packet.
*
* This function takes a single DMA-able packet.
*/
int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
uint fix_inc, uint write, uint func, uint addr,
uint reg_width, uint buflen_u, u8 *buffer,
struct sk_buff *pkt)
{
int Status;
struct sk_buff *mypkt = NULL;
int status;
uint pkt_len = pkt->len;
bool fifo = (fix_inc == SDIOH_DATA_FIX);
brcmf_dbg(TRACE, "Enter\n");
if (pkt == NULL)
return -EINVAL;
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
/* Case 1: we don't have a packet. */
if (pkt == NULL) {
brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
write ? "TX" : "RX", buflen_u);
mypkt = brcmu_pkt_buf_get_skb(buflen_u);
if (!mypkt) {
brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
buflen_u);
return -EIO;
}
/* For a write, copy the buffer data into the packet. */
if (write)
memcpy(mypkt->data, buffer, buflen_u);
/* Claim host controller */
sdio_claim_host(sdiodev->func[func]);
Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
func, addr, mypkt);
pkt_len += 3;
pkt_len &= (uint)~3;
/* For a read, copy the packet data back to the buffer. */
if (!write)
memcpy(buffer, mypkt->data, buflen_u);
brcmu_pkt_buf_free_skb(mypkt);
} else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
/*
* Case 2: We have a packet, but it is unaligned.
* In this case, we cannot have a chain (pkt->next == NULL)
*/
brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
write ? "TX" : "RX", pkt->len);
mypkt = brcmu_pkt_buf_get_skb(pkt->len);
if (!mypkt) {
brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
pkt->len);
return -EIO;
}
/* For a write, copy the buffer data into the packet. */
if (write)
memcpy(mypkt->data, pkt->data, pkt->len);
Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
func, addr, mypkt);
/* For a read, copy the packet data back to the buffer. */
if (!write)
memcpy(pkt->data, mypkt->data, mypkt->len);
brcmu_pkt_buf_free_skb(mypkt);
} else { /* case 3: We have a packet and
it is aligned. */
brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
write ? "Tx" : "Rx");
Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
func, addr, pkt);
status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
addr, pkt, pkt_len);
if (status) {
brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
write ? "TX" : "RX", pkt, addr, pkt_len, status);
} else {
brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
write ? "TX" : "RX", pkt, addr, pkt_len);
}
return Status;
/* Release host controller */
sdio_release_host(sdiodev->func[func]);
return status;
}
/* Read client card reg */
@ -494,6 +461,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
{
int ret = 0;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(TRACE, "func->class=%x\n", func->class);
brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@ -505,22 +473,31 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(ERROR, "card private drvdata occupied\n");
return -ENXIO;
}
sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
if (!sdiodev)
bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
if (!bus_if)
return -ENOMEM;
sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
if (!sdiodev) {
kfree(bus_if);
return -ENOMEM;
}
sdiodev->dev = &func->card->dev;
sdiodev->func[0] = func->card->sdio_func[0];
sdiodev->func[1] = func;
dev_set_drvdata(&func->card->dev, sdiodev);
bus_if->bus_priv = sdiodev;
bus_if->type = SDIO_BUS;
dev_set_drvdata(&func->card->dev, bus_if);
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_byte_wait);
init_waitqueue_head(&sdiodev->request_word_wait);
init_waitqueue_head(&sdiodev->request_packet_wait);
init_waitqueue_head(&sdiodev->request_chain_wait);
init_waitqueue_head(&sdiodev->request_buffer_wait);
}
if (func->num == 2) {
sdiodev = dev_get_drvdata(&func->card->dev);
bus_if = dev_get_drvdata(&func->card->dev);
sdiodev = bus_if->bus_priv;
if ((!sdiodev) || (sdiodev->func[1]->card != func->card))
return -ENODEV;
sdiodev->func[2] = func;
@ -534,6 +511,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
static void brcmf_ops_sdio_remove(struct sdio_func *func)
{
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(INFO, "func->class=%x\n", func->class);
@ -542,10 +520,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
if (func->num == 2) {
sdiodev = dev_get_drvdata(&func->card->dev);
bus_if = dev_get_drvdata(&func->card->dev);
sdiodev = bus_if->bus_priv;
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
brcmf_sdio_remove(sdiodev);
dev_set_drvdata(&func->card->dev, NULL);
kfree(bus_if);
kfree(sdiodev);
}
}
@ -556,11 +536,12 @@ static int brcmf_sdio_suspend(struct device *dev)
mmc_pm_flag_t sdio_flags;
struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev);
struct brcmf_bus *bus_if = dev_get_drvdata(&func->card->dev);
int ret = 0;
brcmf_dbg(TRACE, "\n");
sdiodev = dev_get_drvdata(&func->card->dev);
sdiodev = bus_if->bus_priv;
atomic_set(&sdiodev->suspend, true);
@ -585,8 +566,9 @@ static int brcmf_sdio_resume(struct device *dev)
{
struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev);
struct brcmf_bus *bus_if = dev_get_drvdata(&func->card->dev);
sdiodev = dev_get_drvdata(&func->card->dev);
sdiodev = bus_if->bus_priv;
brcmf_sdio_wdtmr_enable(sdiodev, true);
atomic_set(&sdiodev->suspend, false);
return 0;
@ -610,17 +592,26 @@ static struct sdio_driver brcmf_sdmmc_driver = {
#endif /* CONFIG_PM_SLEEP */
};
/* bus register interface */
int brcmf_bus_register(void)
{
brcmf_dbg(TRACE, "Enter\n");
return sdio_register_driver(&brcmf_sdmmc_driver);
}
void brcmf_bus_unregister(void)
static void __exit brcmf_sdio_exit(void)
{
brcmf_dbg(TRACE, "Enter\n");
sdio_unregister_driver(&brcmf_sdmmc_driver);
}
static int __init brcmf_sdio_init(void)
{
int ret;
brcmf_dbg(TRACE, "Enter\n");
ret = sdio_register_driver(&brcmf_sdmmc_driver);
if (ret)
brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
return ret;
}
module_init(brcmf_sdio_init);
module_exit(brcmf_sdio_exit);

View file

@ -571,8 +571,14 @@ struct brcmf_dcmd {
uint needed; /* bytes needed (optional) */
};
struct brcmf_bus {
u8 type; /* bus type */
void *bus_priv; /* pointer to bus private structure */
enum brcmf_bus_state state;
};
/* Forward decls for struct brcmf_pub (see below) */
struct brcmf_bus; /* device bus info */
struct brcmf_sdio; /* device bus info */
struct brcmf_proto; /* device communication protocol info */
struct brcmf_info; /* device driver info */
struct brcmf_cfg80211_dev; /* cfg80211 device info */
@ -580,15 +586,16 @@ struct brcmf_cfg80211_dev; /* cfg80211 device info */
/* Common structure for module and instance linkage */
struct brcmf_pub {
/* Linkage ponters */
struct brcmf_bus *bus;
struct brcmf_sdio *bus;
struct brcmf_bus *bus_if;
struct brcmf_proto *prot;
struct brcmf_info *info;
struct brcmf_cfg80211_dev *config;
struct device *dev; /* fullmac dongle device pointer */
/* Internal brcmf items */
bool up; /* Driver up/down (to OS) */
bool txoff; /* Transmit flow-controlled */
enum brcmf_bus_state busstate;
uint hdrlen; /* Total BRCMF header length (proto + bus) */
uint maxctl; /* Max size rxctl request from proto to bus */
uint rxsz; /* Rx buffer size bus module should use */
@ -656,7 +663,6 @@ struct brcmf_pub {
u8 country_code[BRCM_CNTRY_BUF_SZ];
char eventmask[BRCMF_EVENTING_MASK_LEN];
};
struct brcmf_if_event {
@ -681,8 +687,8 @@ extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
* Returned structure should have bus and prot pointers filled in.
* bus_hdrlen specifies required headroom for bus module header.
*/
extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
uint bus_hdrlen);
extern struct brcmf_pub *brcmf_attach(struct brcmf_sdio *bus,
uint bus_hdrlen, struct device *dev);
extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
@ -699,7 +705,16 @@ extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
/* Receive frame for delivery to OS. Callee disposes of rxp. */
extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
struct sk_buff *rxp, int numpkt);
struct sk_buff_head *rxlist);
static inline void brcmf_rx_packet(struct brcmf_pub *drvr, int ifidx,
struct sk_buff *pkt)
{
struct sk_buff_head q;
skb_queue_head_init(&q);
skb_queue_tail(&q, pkt);
brcmf_rx_frame(drvr, ifidx, &q);
}
/* Return pointer to interface name */
extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
@ -724,8 +739,6 @@ extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
void *pktdata, struct brcmf_event_msg *,
void **data_ptr);
extern void brcmf_c_init(void);
extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx,
char *name, u8 *mac_addr);
extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);

View file

@ -27,31 +27,24 @@
* Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
*/
/* Indicate (dis)interest in finding dongles. */
extern int brcmf_bus_register(void);
extern void brcmf_bus_unregister(void);
/* obtain linux device object providing bus function */
extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus);
/* Stop bus module: clear pending frames, disable data flow */
extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus);
extern void brcmf_sdbrcm_bus_stop(struct device *dev);
/* Initialize bus module: prepare for communication w/dongle */
extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr);
extern int brcmf_sdbrcm_bus_init(struct device *dev);
/* Send a data frame to the dongle. Callee disposes of txp. */
extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
extern int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *txp);
/* Send/receive a control message to/from the dongle.
* Expects caller to enforce a single outstanding transaction.
*/
extern int
brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen);
extern int
brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen);
extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
#endif /* _BRCMF_BUS_H_ */

View file

@ -116,7 +116,7 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
len = CDC_MAX_MSG_SIZE;
/* Send request */
return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
return brcmf_sdbrcm_bus_txctl(drvr->dev, (unsigned char *)&prot->msg,
len);
}
@ -128,7 +128,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
brcmf_dbg(TRACE, "Enter\n");
do {
ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
ret = brcmf_sdbrcm_bus_rxctl(drvr->dev,
(unsigned char *)&prot->msg,
len + sizeof(struct brcmf_proto_cdc_dcmd));
if (ret < 0)
@ -280,7 +280,7 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
struct brcmf_proto *prot = drvr->prot;
int ret = -1;
if (drvr->busstate == BRCMF_BUS_DOWN) {
if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
return ret;
}

View file

@ -32,8 +32,6 @@
#define PKTFILTER_BUF_SIZE 2048
#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
int brcmf_msg_level;
#define MSGTRACE_VERSION 1
#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u)
@ -85,19 +83,6 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
return len;
}
void brcmf_c_init(void)
{
/* Init global variables at run-time, not as part of the declaration.
* This is required to support init/de-init of the driver.
* Initialization
* of globals as part of the declaration results in non-deterministic
* behaviour since the value of the globals may be different on the
* first time that the driver is initialized vs subsequent
* initializations.
*/
brcmf_msg_level = BRCMF_ERROR_VAL;
}
bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
struct sk_buff *pkt, int prec)
{

View file

@ -43,7 +43,6 @@
#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "wl_cfg80211.h"
#include "bcmchip.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
@ -77,6 +76,7 @@ struct brcmf_info {
};
/* Error bits */
int brcmf_msg_level = BRCMF_ERROR_VAL;
module_param(brcmf_msg_level, int, 0);
int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
@ -292,7 +292,7 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
struct brcmf_info *drvr_priv = drvr->info;
/* Reject if down */
if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
if (!drvr->up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
return -ENODEV;
/* Update multicast statistic */
@ -310,7 +310,7 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
/* Use bus module to send data frame */
return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
return brcmf_sdbrcm_bus_txdata(drvr->dev, pktbuf);
}
static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
@ -322,9 +322,11 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
brcmf_dbg(TRACE, "Enter\n");
/* Reject if down */
if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n",
drvr_priv->pub.up, drvr_priv->pub.busstate);
if (!drvr_priv->pub.up ||
(drvr_priv->pub.bus_if->state == BRCMF_BUS_DOWN)) {
brcmf_dbg(ERROR, "xmit rejected pub.up=%d state=%d\n",
drvr_priv->pub.up,
drvr_priv->pub.bus_if->state);
netif_stop_queue(ndev);
return -ENODEV;
}
@ -397,26 +399,21 @@ static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
return bcmerror;
}
void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
int numpkt)
void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
struct sk_buff_head *skb_list)
{
struct brcmf_info *drvr_priv = drvr->info;
unsigned char *eth;
uint len;
void *data;
struct sk_buff *pnext, *save_pktbuf;
int i;
struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_event_msg event;
brcmf_dbg(TRACE, "Enter\n");
save_pktbuf = skb;
for (i = 0; skb && i < numpkt; i++, skb = pnext) {
pnext = skb->next;
skb->next = NULL;
skb_queue_walk_safe(skb_list, skb, pnext) {
skb_unlink(skb, skb_list);
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
@ -437,6 +434,12 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
if (ifp == NULL)
ifp = drvr_priv->iflist[0];
if (!ifp || !ifp->ndev ||
ifp->ndev->reg_state != NETREG_REGISTERED) {
brcmu_pkt_buf_free_skb(skb);
continue;
}
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
@ -605,9 +608,7 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
sprintf(info->driver, KBUILD_MODNAME);
sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
sprintf(info->bus_info, "%s",
dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
sprintf(info->bus_info, "%s", dev_name(drvr_priv->pub.dev));
}
static struct ethtool_ops brcmf_ethtool_ops = {
@ -761,7 +762,7 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
/* send to dongle (must be up, and wl) */
if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
if ((drvr_priv->pub.bus_if->state != BRCMF_BUS_DATA)) {
brcmf_dbg(ERROR, "DONGLE_DOWN\n");
err = -EIO;
goto done;
@ -940,7 +941,8 @@ void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
}
}
struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
struct brcmf_pub *brcmf_attach(struct brcmf_sdio *bus, uint bus_hdrlen,
struct device *dev)
{
struct brcmf_info *drvr_priv = NULL;
@ -959,6 +961,8 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
/* Link to bus module */
drvr_priv->pub.bus = bus;
drvr_priv->pub.hdrlen = bus_hdrlen;
drvr_priv->pub.bus_if = dev_get_drvdata(dev);
drvr_priv->pub.dev = dev;
/* Attach and link in the protocol */
if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
@ -988,14 +992,14 @@ int brcmf_bus_start(struct brcmf_pub *drvr)
brcmf_dbg(TRACE, "\n");
/* Bring up the bus */
ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub);
ret = brcmf_sdbrcm_bus_init(drvr_priv->pub.dev);
if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
return ret;
}
/* If bus is not ready, can't come up */
if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
if (drvr_priv->pub.bus_if->state != BRCMF_BUS_DATA) {
brcmf_dbg(ERROR, "failed bus is not ready\n");
return -ENODEV;
}
@ -1077,10 +1081,7 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
/* attach to cfg80211 for primary interface */
if (!ifidx) {
drvr->config =
brcmf_cfg80211_attach(ndev,
brcmf_bus_get_device(drvr->bus),
drvr);
drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
if (drvr->config == NULL) {
brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
goto fail;
@ -1114,7 +1115,7 @@ static void brcmf_bus_detach(struct brcmf_pub *drvr)
brcmf_proto_stop(&drvr_priv->pub);
/* Stop the bus module */
brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus);
brcmf_sdbrcm_bus_stop(drvr_priv->pub.dev);
}
}
}
@ -1148,34 +1149,6 @@ void brcmf_detach(struct brcmf_pub *drvr)
}
}
static void __exit brcmf_module_cleanup(void)
{
brcmf_dbg(TRACE, "Enter\n");
brcmf_bus_unregister();
}
static int __init brcmf_module_init(void)
{
int error;
brcmf_dbg(TRACE, "Enter\n");
error = brcmf_bus_register();
if (error) {
brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
goto failed;
}
return 0;
failed:
return -EINVAL;
}
module_init(brcmf_module_init);
module_exit(brcmf_module_cleanup);
int brcmf_os_proto_block(struct brcmf_pub *drvr)
{
struct brcmf_info *drvr_priv = drvr->info;

View file

@ -91,7 +91,6 @@ struct rte_console {
#include "dhd_bus.h"
#include "dhd_proto.h"
#include "dhd_dbg.h"
#include <bcmchip.h>
#define TXQLEN 2048 /* bulk tx queue length */
#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
@ -310,6 +309,11 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
#define BRCMFMAC_FW_NAME "brcm/brcmfmac.bin"
#define BRCMFMAC_NV_NAME "brcm/brcmfmac.txt"
MODULE_FIRMWARE(BRCMFMAC_FW_NAME);
MODULE_FIRMWARE(BRCMFMAC_NV_NAME);
/*
* Conversion of 802.1D priority to precedence level
*/
@ -445,7 +449,7 @@ struct sdpcm_shared_le {
/* misc chip info needed by some of the routines */
/* Private data for SDIO bus interaction */
struct brcmf_bus {
struct brcmf_sdio {
struct brcmf_pub *drvr;
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
@ -562,9 +566,7 @@ struct brcmf_bus {
struct semaphore sdsem;
const char *fw_name;
const struct firmware *firmware;
const char *nv_name;
u32 fw_ptr;
};
@ -602,7 +604,7 @@ static void pkt_align(struct sk_buff *p, int len, int align)
}
/* To check if there's window offered */
static bool data_ok(struct brcmf_bus *bus)
static bool data_ok(struct brcmf_sdio *bus)
{
return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
@ -613,7 +615,7 @@ static bool data_ok(struct brcmf_bus *bus)
* adresses on the 32 bit backplane bus.
*/
static void
r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
{
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
*retryvar = 0;
@ -633,7 +635,7 @@ r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
}
static void
w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
{
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
*retryvar = 0;
@ -658,14 +660,14 @@ w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
/* Packet free applicable unconditionally for sdio and sdspi.
* Conditional if bufpool was present for gspi bus.
*/
static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
{
if (bus->usebufpool)
brcmu_pkt_buf_free_skb(pkt);
}
/* Turn backplane clock on or off */
static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
{
int err;
u8 clkctl, clkreq, devctl;
@ -786,7 +788,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
}
/* Change idle/active SD state */
static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
{
brcmf_dbg(TRACE, "Enter\n");
@ -799,7 +801,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
}
/* Transition SD and backplane clock readiness */
static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
{
#ifdef BCMDBG
uint oldstate = bus->clkstate;
@ -855,7 +857,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
return 0;
}
static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
{
uint retries = 0;
@ -927,13 +929,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
return 0;
}
static void bus_wake(struct brcmf_bus *bus)
static void bus_wake(struct brcmf_sdio *bus)
{
if (bus->sleeping)
brcmf_sdbrcm_bussleep(bus, false);
}
static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
u32 hmb_data;
@ -1009,7 +1011,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
return intstatus;
}
static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
{
uint retries = 0;
u16 lastrbc;
@ -1066,11 +1068,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
/* If we can't reach the device, signal failure */
if (err || brcmf_sdcard_regfail(bus->sdiodev))
bus->drvr->busstate = BRCMF_BUS_DOWN;
bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
}
/* copy a buffer into a pkt buffer chain */
static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_bus *bus, uint len)
static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
{
uint n, ret = 0;
struct sk_buff *p;
@ -1093,7 +1095,7 @@ static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_bus *bus, uint len)
}
/* return total length of buffer chain */
static uint brcmf_sdbrcm_glom_len(struct brcmf_bus *bus)
static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
{
struct sk_buff *p;
uint total;
@ -1104,7 +1106,7 @@ static uint brcmf_sdbrcm_glom_len(struct brcmf_bus *bus)
return total;
}
static void brcmf_sdbrcm_free_glom(struct brcmf_bus *bus)
static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
{
struct sk_buff *cur, *next;
@ -1114,13 +1116,13 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_bus *bus)
}
}
static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
u16 sublen, check;
struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
struct sk_buff *pfirst, *pnext;
int errcode;
u8 chan, seq, doff, sfdoff;
@ -1137,7 +1139,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
pfirst = plast = pnext = NULL;
pfirst = pnext = NULL;
dlen = (u16) (bus->glomd->len);
dptr = bus->glomd->data;
if (!dlen || (dlen & 1)) {
@ -1228,17 +1230,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
* packet and and copy into the chain.
*/
if (usechain) {
errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2,
F2SYNC, (u8 *) pfirst->data, dlen,
pfirst);
SDIO_FUNC_2, F2SYNC, &bus->glom);
} else if (bus->dataptr) {
errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2,
F2SYNC, bus->dataptr, dlen,
NULL);
SDIO_FUNC_2, F2SYNC,
bus->dataptr, dlen);
sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
if (sublen != dlen) {
brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
@ -1338,10 +1337,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* Remove superframe header, remember offset */
skb_pull(pfirst, doff);
sfdoff = doff;
num = 0;
/* Validate all the subframe headers */
for (num = 0, pnext = pfirst; pnext && !errcode;
num++, pnext = pnext->next) {
skb_queue_walk(&bus->glom, pnext) {
/* leave when invalid subframe is found */
if (errcode)
break;
dptr = (u8 *) (pnext->data);
dlen = (u16) (pnext->len);
sublen = get_unaligned_le16(dptr);
@ -1374,6 +1377,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
num, doff, sublen, SDPCM_HDRLEN);
errcode = -1;
}
/* increase the subframe count */
num++;
}
if (errcode) {
@ -1394,13 +1399,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
}
/* Basic SD framing looks ok - process each packet (header) */
save_pfirst = pfirst;
plast = NULL;
for (num = 0; pfirst; rxseq++, pfirst = pnext) {
pnext = pfirst->next;
pfirst->next = NULL;
skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
dptr = (u8 *) (pfirst->data);
sublen = get_unaligned_le16(dptr);
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@ -1420,6 +1420,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
bus->rx_badseq++;
rxseq = seq;
}
rxseq++;
#ifdef BCMDBG
if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
printk(KERN_DEBUG "Rx Subframe Data:\n");
@ -1432,36 +1434,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
skb_pull(pfirst, doff);
if (pfirst->len == 0) {
skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
if (plast)
plast->next = pnext;
else
save_pfirst = pnext;
continue;
} else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
pfirst) != 0) {
brcmf_dbg(ERROR, "rx protocol error\n");
bus->drvr->rx_errors++;
skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
if (plast)
plast->next = pnext;
else
save_pfirst = pnext;
continue;
}
/* this packet will go up, link back into
chain and count it */
pfirst->next = pnext;
plast = pfirst;
num++;
#ifdef BCMDBG
if (BRCMF_GLOM_ON()) {
brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
num, pfirst, pfirst->data,
bus->glom.qlen, pfirst, pfirst->data,
pfirst->len, pfirst->next,
pfirst->prev);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
@ -1470,19 +1458,20 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
}
#endif /* BCMDBG */
}
if (num) {
/* sent any remaining packets up */
if (bus->glom.qlen) {
up(&bus->sdsem);
brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
brcmf_rx_frame(bus->drvr, ifidx, &bus->glom);
down(&bus->sdsem);
}
bus->rxglomframes++;
bus->rxglompkts += num;
bus->rxglompkts += bus->glom.qlen;
}
return num;
}
static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
bool *pending)
{
DECLARE_WAITQUEUE(wait, current);
@ -1504,7 +1493,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
return timeout;
}
static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->dcmd_resp_wait))
wake_up_interruptible(&bus->dcmd_resp_wait);
@ -1512,7 +1501,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
return 0;
}
static void
brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
{
uint rdlen, pad;
@ -1570,8 +1559,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2,
F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen,
NULL);
F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
bus->f2rxdata++;
/* Control frame failures need retransmission */
@ -1602,7 +1590,7 @@ done:
}
/* Pad read to blocksize for efficiency */
static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
{
if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
*pad = bus->blocksize - (*rdlen % bus->blocksize);
@ -1615,7 +1603,7 @@ static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
}
static void
brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
struct sk_buff **pkt, u8 **rxbuf)
{
int sdret; /* Return code from calls */
@ -1627,9 +1615,8 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
*rxbuf = (u8 *) ((*pkt)->data);
/* Read the entire frame */
sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC,
*rxbuf, rdlen, *pkt);
sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, *pkt);
bus->f2rxdata++;
if (sdret < 0) {
@ -1648,7 +1635,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
/* Checks the header */
static int
brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf,
brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
u8 rxseq, u16 nextlen, u16 *len)
{
u16 check;
@ -1704,7 +1691,7 @@ fail:
/* Return true if there may be more frames to read */
static uint
brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
{
u16 len, check; /* Extracted hardware header fields */
u8 chan, seq, doff; /* Extracted software header fields */
@ -1727,7 +1714,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
*finished = false;
for (rxseq = bus->rx_seq, rxleft = maxframes;
!bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
!bus->rxskip && rxleft &&
bus->drvr->bus_if->state != BRCMF_BUS_DOWN;
rxseq++, rxleft--) {
/* Handle glomming separately */
@ -1857,7 +1845,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Read frame header (hardware and software) */
sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, bus->rxhdr,
BRCMF_FIRSTREAD, NULL);
BRCMF_FIRSTREAD);
bus->f2rxhdrs++;
if (sdret < 0) {
@ -2006,9 +1994,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
pkt_align(pkt, rdlen, BRCMF_SDALIGN);
/* Read the remaining frame data */
sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
rdlen, pkt);
sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
bus->f2rxdata++;
if (sdret < 0) {
@ -2075,7 +2062,7 @@ deliver:
/* Unlock during rx call */
up(&bus->sdsem);
brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
brcmf_rx_packet(bus->drvr, ifidx, pkt);
down(&bus->sdsem);
}
rxcount = maxframes - rxleft;
@ -2095,16 +2082,8 @@ deliver:
return rxcount;
}
static int
brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
u8 *buf, uint nbytes, struct sk_buff *pkt)
{
return brcmf_sdcard_send_buf
(bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
}
static void
brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
{
up(&bus->sdsem);
wait_event_interruptible_timeout(bus->ctrl_wait,
@ -2114,7 +2093,7 @@ brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
}
static void
brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->ctrl_wait))
wake_up_interruptible(&bus->ctrl_wait);
@ -2123,7 +2102,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
uint chan, bool free_pkt)
{
int ret;
@ -2212,9 +2191,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
if (len & (ALIGNMENT - 1))
len = roundup(len, ALIGNMENT);
ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, frame,
len, pkt);
ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
bus->f2txdata++;
if (ret < 0) {
@ -2261,7 +2239,7 @@ done:
return ret;
}
static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
u32 intstatus = 0;
@ -2309,14 +2287,14 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
}
/* Deflow-control stack if needed */
if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
if (drvr->up && (drvr->bus_if->state == BRCMF_BUS_DATA) &&
drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
brcmf_txflowcontrol(drvr, 0, OFF);
return cnt;
}
static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
{
u32 intstatus, newstatus = 0;
uint retries = 0;
@ -2344,7 +2322,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
bus->drvr->busstate = BRCMF_BUS_DOWN;
bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
}
#endif /* BCMDBG */
@ -2354,7 +2332,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error reading CSR: %d\n",
err);
bus->drvr->busstate = BRCMF_BUS_DOWN;
bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
}
brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
@ -2367,7 +2345,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
err);
bus->drvr->busstate = BRCMF_BUS_DOWN;
bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
@ -2375,7 +2353,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
err);
bus->drvr->busstate = BRCMF_BUS_DOWN;
bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
} else {
@ -2477,9 +2455,9 @@ clkwait:
(bus->clkstate == CLK_AVAIL)) {
int ret, i;
ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
(u32) bus->ctrl_frame_len, NULL);
(u32) bus->ctrl_frame_len);
if (ret < 0) {
/* On failure, abort the command and
@ -2531,11 +2509,11 @@ clkwait:
else await next interrupt */
/* On failed register access, all bets are off:
no resched or interrupts */
if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
if ((bus->drvr->bus_if->state == BRCMF_BUS_DOWN) ||
brcmf_sdcard_regfail(bus->sdiodev)) {
brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
brcmf_sdcard_regfail(bus->sdiodev));
bus->drvr->busstate = BRCMF_BUS_DOWN;
bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
bus->intstatus = 0;
} else if (bus->clkstate == CLK_PENDING) {
brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
@ -2562,7 +2540,7 @@ clkwait:
static int brcmf_sdbrcm_dpc_thread(void *data)
{
struct brcmf_bus *bus = (struct brcmf_bus *) data;
struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
allow_signal(SIGTERM);
/* Run until signal received */
@ -2572,12 +2550,12 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
/* Call bus dpc unless it indicated down
(then clean stop) */
if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
if (bus->drvr->bus_if->state != BRCMF_BUS_DOWN) {
if (brcmf_sdbrcm_dpc(bus))
complete(&bus->dpc_wait);
} else {
/* after stopping the bus, exit thread */
brcmf_sdbrcm_bus_stop(bus);
brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
bus->dpc_tsk = NULL;
break;
}
@ -2587,10 +2565,13 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
return 0;
}
int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
uint datalen, prec;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@ -2638,7 +2619,7 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
}
static int
brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
uint size)
{
int bcmerror = 0;
@ -2699,7 +2680,7 @@ xfer_done:
#ifdef BCMDBG
#define CONSOLE_LINE_MAX 192
static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
{
struct brcmf_console *c = &bus->console;
u8 line[CONSOLE_LINE_MAX], ch;
@ -2776,14 +2757,14 @@ break2:
}
#endif /* BCMDBG */
static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
{
int i;
int ret;
bus->ctrl_frame_stat = false;
ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, frame, len, NULL);
ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, frame, len);
if (ret < 0) {
/* On failure, abort the command and terminate the frame */
@ -2819,7 +2800,7 @@ static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
}
int
brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
u16 len;
@ -2827,6 +2808,9 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
uint retries = 0;
u8 doff = 0;
int ret = -1;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@ -2934,11 +2918,14 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
}
int
brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
{
int timeleft;
uint rxlen = 0;
bool pending;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@ -2971,7 +2958,7 @@ brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
{
int bcmerror = 0;
@ -3004,7 +2991,7 @@ err:
return bcmerror;
}
static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
{
int bcmerror = 0;
u32 varsize;
@ -3091,7 +3078,7 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
return bcmerror;
}
static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
{
uint retries;
int bcmerror = 0;
@ -3134,13 +3121,13 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
/* Allow HT Clock now that the ARM is running. */
bus->alp_only = false;
bus->drvr->busstate = BRCMF_BUS_LOAD;
bus->drvr->bus_if->state = BRCMF_BUS_LOAD;
}
fail:
return bcmerror;
}
static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
{
if (bus->firmware->size < bus->fw_ptr + len)
len = bus->firmware->size - bus->fw_ptr;
@ -3150,10 +3137,7 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
return len;
}
MODULE_FIRMWARE(BCM4329_FW_NAME);
MODULE_FIRMWARE(BCM4329_NV_NAME);
static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
{
int offset = 0;
uint len;
@ -3162,8 +3146,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
brcmf_dbg(INFO, "Enter\n");
bus->fw_name = BCM4329_FW_NAME;
ret = request_firmware(&bus->firmware, bus->fw_name,
ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
@ -3253,15 +3236,14 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
return buf_len;
}
static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
uint len;
char *memblock = NULL;
char *bufp;
int ret;
bus->nv_name = BCM4329_NV_NAME;
ret = request_firmware(&bus->firmware, bus->nv_name,
ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
@ -3301,7 +3283,7 @@ err:
return ret;
}
static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
int bcmerror = -1;
@ -3334,7 +3316,7 @@ err:
}
static bool
brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
bool ret;
@ -3348,12 +3330,15 @@ brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
return ret;
}
void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
void brcmf_sdbrcm_bus_stop(struct device *dev)
{
u32 local_hostintmask;
u8 saveclk;
uint retries;
int err;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@ -3382,7 +3367,7 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
bus->hostintmask = 0;
/* Change our idea of bus state */
bus->drvr->busstate = BRCMF_BUS_DOWN;
bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
/* Force clocks on backplane to be sure F2 interrupt propagates */
saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
@ -3426,9 +3411,11 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
up(&bus->sdsem);
}
int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
int brcmf_sdbrcm_bus_init(struct device *dev)
{
struct brcmf_bus *bus = drvr->bus;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
struct brcmf_sdio *bus = sdiodev->bus;
unsigned long timeout;
uint retries = 0;
u8 ready, enable;
@ -3438,7 +3425,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
brcmf_dbg(TRACE, "Enter\n");
/* try to download image and nvram to the dongle */
if (drvr->busstate == BRCMF_BUS_DOWN) {
if (bus_if->state == BRCMF_BUS_DOWN) {
if (!(brcmf_sdbrcm_download_firmware(bus)))
return -1;
}
@ -3504,7 +3491,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
SBSDIO_WATERMARK, 8, &err);
/* Set bus state according to enable result */
drvr->busstate = BRCMF_BUS_DATA;
bus_if->state = BRCMF_BUS_DATA;
}
else {
@ -3519,7 +3506,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
/* If we didn't come up, turn off backplane clock */
if (drvr->busstate != BRCMF_BUS_DATA)
if (bus_if->state != BRCMF_BUS_DATA)
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
exit:
@ -3530,7 +3517,7 @@ exit:
void brcmf_sdbrcm_isr(void *arg)
{
struct brcmf_bus *bus = (struct brcmf_bus *) arg;
struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
brcmf_dbg(TRACE, "Enter\n");
@ -3539,7 +3526,7 @@ void brcmf_sdbrcm_isr(void *arg)
return;
}
if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
if (bus->drvr->bus_if->state == BRCMF_BUS_DOWN) {
brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
return;
}
@ -3562,14 +3549,14 @@ void brcmf_sdbrcm_isr(void *arg)
complete(&bus->dpc_wait);
}
static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
{
struct brcmf_bus *bus;
#ifdef BCMDBG
struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
#endif /* BCMDBG */
brcmf_dbg(TIMER, "Enter\n");
bus = drvr->bus;
/* Ignore the timer if simulating bus down */
if (bus->sleeping)
return false;
@ -3613,7 +3600,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
}
#ifdef BCMDBG
/* Poll for console output periodically */
if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) {
if (bus_if->state == BRCMF_BUS_DATA &&
bus->console_interval != 0) {
bus->console.count += BRCMF_WD_POLL_MS;
if (bus->console.count >= bus->console_interval) {
bus->console.count -= bus->console_interval;
@ -3651,7 +3639,7 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
return false;
}
static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@ -3663,7 +3651,7 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
bus->databuf = NULL;
}
static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus)
static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@ -3699,7 +3687,7 @@ fail:
}
static bool
brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
{
u8 clkctl = 0;
int err = 0;
@ -3784,7 +3772,7 @@ fail:
return false;
}
static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@ -3792,7 +3780,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
SDIO_FUNC_ENABLE_1, NULL);
bus->drvr->busstate = BRCMF_BUS_DOWN;
bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
bus->sleeping = false;
bus->rxflow = false;
@ -3819,7 +3807,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
static int
brcmf_sdbrcm_watchdog_thread(void *data)
{
struct brcmf_bus *bus = (struct brcmf_bus *)data;
struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
allow_signal(SIGTERM);
/* Run until signal received */
@ -3827,7 +3815,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (kthread_should_stop())
break;
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
brcmf_sdbrcm_bus_watchdog(bus->drvr);
brcmf_sdbrcm_bus_watchdog(bus);
/* Count the tick for reference */
bus->drvr->tickcnt++;
} else
@ -3839,7 +3827,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
static void
brcmf_sdbrcm_watchdog(unsigned long data)
{
struct brcmf_bus *bus = (struct brcmf_bus *)data;
struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
if (bus->watchdog_tsk) {
complete(&bus->watchdog_wait);
@ -3850,7 +3838,7 @@ brcmf_sdbrcm_watchdog(unsigned long data)
}
}
static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@ -3867,7 +3855,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
}
/* Detach and free everything */
static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@ -3889,21 +3877,10 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
brcmf_dbg(TRACE, "Disconnected\n");
}
void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
u32 regsva, struct brcmf_sdio_dev *sdiodev)
void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
{
int ret;
struct brcmf_bus *bus;
/* Init global variables at run-time, not as part of the declaration.
* This is required to support init/de-init of the driver.
* Initialization
* of globals as part of the declaration results in non-deterministic
* behavior since the value of the globals may be different on the
* first time that the driver is initialized vs subsequent
* initializations.
*/
brcmf_c_init();
struct brcmf_sdio *bus;
brcmf_dbg(TRACE, "Enter\n");
@ -3911,7 +3888,7 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
* regsva == SI_ENUM_BASE*/
/* Allocate private bus interface state */
bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
if (!bus)
goto fail;
@ -3963,7 +3940,7 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
}
/* Attach to the brcmf/OS/network interface */
bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
bus->drvr = brcmf_attach(bus, SDPCM_RESERVE, bus->sdiodev->dev);
if (!bus->drvr) {
brcmf_dbg(ERROR, "brcmf_attach failed\n");
goto fail;
@ -4015,7 +3992,7 @@ fail:
void brcmf_sdbrcm_disconnect(void *ptr)
{
struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
brcmf_dbg(TRACE, "Enter\n");
@ -4025,13 +4002,8 @@ void brcmf_sdbrcm_disconnect(void *ptr)
brcmf_dbg(TRACE, "Disconnected\n");
}
struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
{
return &bus->sdiodev->func[2]->dev;
}
void
brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
{
/* Totally stop the timer */
if (!wdtick && bus->wd_timer_valid == true) {
@ -4042,7 +4014,7 @@ brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
}
/* don't start the wd until fw is loaded */
if (bus->drvr->busstate == BRCMF_BUS_DOWN)
if (bus->drvr->bus_if->state == BRCMF_BUS_DOWN)
return;
if (wdtick) {

View file

@ -132,9 +132,9 @@ struct brcmf_sdio_dev {
atomic_t suspend; /* suspend flag */
wait_queue_head_t request_byte_wait;
wait_queue_head_t request_word_wait;
wait_queue_head_t request_packet_wait;
wait_queue_head_t request_chain_wait;
wait_queue_head_t request_buffer_wait;
struct device *dev;
};
/* Register/deregister device interrupt handler. */
@ -182,11 +182,21 @@ extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
* NOTE: Async operation is not currently supported.
*/
extern int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff *pkt);
extern int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
uint flags, u8 *buf, uint nbytes);
extern int
brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff *pkt);
extern int
brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
uint flags, u8 *buf, uint nbytes);
extern int
brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff_head *pktq);
/* Flags bits */
@ -237,16 +247,18 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
/* read or write any buffer using cmd53 */
extern int
brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
uint fix_inc, uint rw, uint fnc_num,
u32 addr, uint regwidth,
u32 buflen, u8 *buffer, struct sk_buff *pkt);
uint fix_inc, uint rw, uint fnc_num, u32 addr,
struct sk_buff *pkt);
extern int
brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
uint write, uint func, uint addr,
struct sk_buff_head *pktq);
/* Watchdog timer interface for pm ops */
extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
bool enable);
extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
u32 regsva, struct brcmf_sdio_dev *sdiodev);
extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
extern void brcmf_sdbrcm_disconnect(void *ptr);
extern void brcmf_sdbrcm_isr(void *arg);
#endif /* _BRCM_SDH_H_ */

View file

@ -1429,7 +1429,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
static s32
brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type, s32 dbm)
enum nl80211_tx_power_setting type, s32 mbm)
{
struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
@ -1437,6 +1437,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
u16 txpwrmw;
s32 err = 0;
s32 disable = 0;
s32 dbm = MBM_TO_DBM(mbm);
WL_TRACE("Enter\n");
if (!check_sys_up(wiphy))
@ -1446,12 +1447,6 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
case NL80211_TX_POWER_AUTOMATIC:
break;
case NL80211_TX_POWER_LIMITED:
if (dbm < 0) {
WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
err = -EINVAL;
goto done;
}
break;
case NL80211_TX_POWER_FIXED:
if (dbm < 0) {
WL_ERR("TX_POWER_FIXED - dbm is negative\n");

View file

@ -1239,10 +1239,9 @@ bool dma_rxreset(struct dma_pub *pub)
* the error(toss frames) could be fatal and cause many subsequent hard
* to debug problems
*/
int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
{
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p, *next;
unsigned char *data;
uint len;
u16 txout;
@ -1254,50 +1253,37 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
txout = di->txout;
/*
* Walk the chain of packet buffers
* allocating and initializing transmit descriptor entries.
* obtain and initialize transmit descriptor entry.
*/
for (p = p0; p; p = next) {
data = p->data;
len = p->len;
next = p->next;
data = p->data;
len = p->len;
/* return nonzero if out of tx descriptors */
if (nexttxd(di, txout) == di->txin)
goto outoftxd;
/* no use to transmit a zero length packet */
if (len == 0)
return 0;
if (len == 0)
continue;
/* return nonzero if out of tx descriptors */
if (nexttxd(di, txout) == di->txin)
goto outoftxd;
/* get physical address of buffer start */
pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
/* get physical address of buffer start */
pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
flags = 0;
if (p == p0)
flags |= D64_CTRL1_SOF;
/* With a DMA segment list, Descriptor table is filled
* using the segment list instead of looping over
* buffers in multi-chain DMA. Therefore, EOF for SGLIST
* is when end of segment list is reached.
*/
flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
if (txout == (di->ntxd - 1))
flags |= D64_CTRL1_EOT;
/* With a DMA segment list, Descriptor table is filled
* using the segment list instead of looping over
* buffers in multi-chain DMA. Therefore, EOF for SGLIST
* is when end of segment list is reached.
*/
if (next == NULL)
flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
if (txout == (di->ntxd - 1))
flags |= D64_CTRL1_EOT;
dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
txout = nexttxd(di, txout);
}
/* if last txd eof not set, fix it */
if (!(flags & D64_CTRL1_EOF))
di->txd64[prevtxd(di, txout)].ctrl1 =
cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
txout = nexttxd(di, txout);
/* save the packet */
di->txp[prevtxd(di, txout)] = p0;
di->txp[prevtxd(di, txout)] = p;
/* bump the tx descriptor index */
di->txout = txout;
@ -1314,7 +1300,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
outoftxd:
DMA_ERROR("%s: out of txds !!!\n", di->name);
brcmu_pkt_buf_free_skb(p0);
brcmu_pkt_buf_free_skb(p);
di->dma.txavail = 0;
di->dma.txnobuf++;
return -1;

View file

@ -40,10 +40,10 @@
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_PLCPFAIL | \
FIF_CONTROL | \
FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC)
FIF_BCN_PRBRESP_PROMISC | \
FIF_PSPOLL)
#define CHAN2GHZ(channel, freqency, chflags) { \
.band = IEEE80211_BAND_2GHZ, \
@ -373,7 +373,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
conf->listen_interval);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR)
wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
__func__, conf->flags & IEEE80211_CONF_MONITOR ?
"true" : "false");
if (changed & IEEE80211_CONF_CHANGE_PS)
@ -550,29 +550,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
changed_flags &= MAC_FILTERS;
*total_flags &= MAC_FILTERS;
if (changed_flags & FIF_PROMISC_IN_BSS)
wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n");
wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
if (changed_flags & FIF_ALLMULTI)
wiphy_err(wiphy, "FIF_ALLMULTI\n");
wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
if (changed_flags & FIF_FCSFAIL)
wiphy_err(wiphy, "FIF_FCSFAIL\n");
if (changed_flags & FIF_PLCPFAIL)
wiphy_err(wiphy, "FIF_PLCPFAIL\n");
wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
if (changed_flags & FIF_CONTROL)
wiphy_err(wiphy, "FIF_CONTROL\n");
wiphy_dbg(wiphy, "FIF_CONTROL\n");
if (changed_flags & FIF_OTHER_BSS)
wiphy_err(wiphy, "FIF_OTHER_BSS\n");
if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
spin_lock_bh(&wl->lock);
if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
} else {
brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
}
spin_unlock_bh(&wl->lock);
}
wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
if (changed_flags & FIF_PSPOLL)
wiphy_dbg(wiphy, "FIF_PSPOLL\n");
if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
spin_lock_bh(&wl->lock);
brcms_c_mac_promisc(wl->wlc, *total_flags);
spin_unlock_bh(&wl->lock);
return;
}

View file

@ -955,8 +955,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
brcms_c_txfifo_complete(wlc, queue, 1);
if (lastframe) {
p->next = NULL;
p->prev = NULL;
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
@ -3064,7 +3062,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
return false;
/* disallow PS when one of these meets when not scanning */
if (wlc->monitor)
if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
return false;
if (cfg->associated) {
@ -3584,29 +3582,31 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
}
/*
* Set or clear maccontrol bits MCTL_PROMISC, MCTL_BCNS_PROMISC and
* MCTL_KEEPCONTROL
* Set or clear filtering related maccontrol bits based on
* specified filter flags
*/
static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
{
u32 promisc_bits = 0;
if (wlc->bcnmisc_monitor)
wlc->filter_flags = filter_flags;
if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
promisc_bits |= MCTL_PROMISC;
if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
promisc_bits |= MCTL_BCNS_PROMISC;
if (wlc->monitor)
promisc_bits |=
MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL;
if (filter_flags & FIF_FCSFAIL)
promisc_bits |= MCTL_KEEPBADFCS;
if (filter_flags & (FIF_CONTROL | FIF_PSPOLL))
promisc_bits |= MCTL_KEEPCONTROL;
brcms_b_mctrl(wlc->hw,
MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL,
promisc_bits);
}
void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
{
wlc->bcnmisc_monitor = promisc;
brcms_c_mac_promisc(wlc);
MCTL_PROMISC | MCTL_BCNS_PROMISC |
MCTL_KEEPCONTROL | MCTL_KEEPBADFCS,
promisc_bits);
}
/*
@ -3636,9 +3636,6 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
} else {
/* disable an active IBSS if we are not on the home channel */
}
/* update the various promisc bits */
brcms_c_mac_promisc(wlc);
}
static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
@ -8074,14 +8071,8 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
len = p->len;
if (rxh->RxStatus1 & RXS_FCSERR) {
if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
" tossing\n");
if (!(wlc->filter_flags & FIF_FCSFAIL))
goto toss;
} else {
wiphy_err(wlc->wiphy, "RCSERR!!!\n");
goto toss;
}
}
/* check received pkt has at least frame control field */

View file

@ -519,8 +519,7 @@ struct brcms_c_info {
struct brcms_timer *radio_timer;
/* promiscuous */
bool monitor;
bool bcnmisc_monitor;
uint filter_flags;
/* driver feature */
bool _rifs;
@ -658,8 +657,7 @@ extern void brcms_c_print_txdesc(struct d11txh *txh);
#endif
extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
bool promisc);
extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
extern void brcms_c_send_q(struct brcms_c_info *wlc);
extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
uint *fifo);

View file

@ -109,7 +109,7 @@ static const struct chan_info_basic chan_info_all[] = {
{204, 5020},
{208, 5040},
{212, 5060},
{216, 50800}
{216, 5080}
};
static const u8 ofdm_rate_lookup[] = {

View file

@ -655,6 +655,9 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID123(
"Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
0xe6ec52ce, 0x08649af2, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
"Canon", "Wireless LAN CF Card K30225", "Version 01.00",
0x96ef6fe2, 0x263fcbab, 0xa57adb8c),
PCMCIA_DEVICE_PROD_ID123(
"D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
0x71b18589, 0xb6f1b0ab, 0x4b74baa0),

View file

@ -1290,7 +1290,6 @@ static struct spi_driver libertas_spi_driver = {
.remove = __devexit_p(libertas_spi_remove),
.driver = {
.name = "libertas_spi",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &if_spi_pm_ops,
},

View file

@ -120,10 +120,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
static int
mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
int dbm)
int mbm)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
struct mwifiex_power_cfg power_cfg;
int dbm = MBM_TO_DBM(mbm);
if (type == NL80211_TX_POWER_FIXED) {
power_cfg.is_power_auto = 0;

View file

@ -699,7 +699,6 @@ static int __devexit p54spi_remove(struct spi_device *spi)
static struct spi_driver p54spi_driver = {
.driver = {
.name = "p54spi",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},

View file

@ -2493,323 +2493,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
return ret;
}
/* Note: currently, use hostapd ioctl from the Host AP driver for WPA
* support. This is to be replaced with Linux wireless extensions once they
* get WPA support. */
/* Note II: please leave all this together as it will be easier to remove later,
* once wireless extensions add WPA support -mcgrof */
/* PRISM54_HOSTAPD ioctl() cmd: */
enum {
PRISM2_SET_ENCRYPTION = 6,
PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
PRISM2_HOSTAPD_MLME = 13,
PRISM2_HOSTAPD_SCAN_REQ = 14,
};
#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12
#define PRISM54_HOSTAPD SIOCIWFIRSTPRIV+25
#define PRISM54_DROP_UNENCRYPTED SIOCIWFIRSTPRIV+26
#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
offsetof(struct prism2_hostapd_param, u.generic_elem.data)
/* Maximum length for algorithm names (-1 for nul termination)
* used in ioctl() */
#define HOSTAP_CRYPT_ALG_NAME_LEN 16
struct prism2_hostapd_param {
u32 cmd;
u8 sta_addr[ETH_ALEN];
union {
struct {
u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
u32 flags;
u32 err;
u8 idx;
u8 seq[8]; /* sequence counter (set: RX, get: TX) */
u16 key_len;
u8 key[0];
} crypt;
struct {
u8 len;
u8 data[0];
} generic_elem;
struct {
#define MLME_STA_DEAUTH 0
#define MLME_STA_DISASSOC 1
u16 cmd;
u16 reason_code;
} mlme;
struct {
u8 ssid_len;
u8 ssid[32];
} scan_req;
} u;
};
static int
prism2_ioctl_set_encryption(struct net_device *dev,
struct prism2_hostapd_param *param,
int param_len)
{
islpci_private *priv = netdev_priv(dev);
int rvalue = 0, force = 0;
int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
union oid_res_t r;
/* with the new API, it's impossible to get a NULL pointer.
* New version of iwconfig set the IW_ENCODE_NOKEY flag
* when no key is given, but older versions don't. */
if (param->u.crypt.key_len > 0) {
/* we have a key to set */
int index = param->u.crypt.idx;
int current_index;
struct obj_key key = { DOT11_PRIV_TKIP, 0, "" };
/* get the current key index */
rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
current_index = r.u;
/* Verify that the key is not marked as invalid */
if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) {
key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ?
sizeof (param->u.crypt.key) : param->u.crypt.key_len;
memcpy(key.key, param->u.crypt.key, key.length);
if (key.length == 32)
/* we want WPA-PSK */
key.type = DOT11_PRIV_TKIP;
if ((index < 0) || (index > 3))
/* no index provided use the current one */
index = current_index;
/* now send the key to the card */
rvalue |=
mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
&key);
}
/*
* If a valid key is set, encryption should be enabled
* (user may turn it off later).
* This is also how "iwconfig ethX key on" works
*/
if ((index == current_index) && (key.length > 0))
force = 1;
} else {
int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1;
if ((index >= 0) && (index <= 3)) {
/* we want to set the key index */
rvalue |=
mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
&index);
} else {
if (!(param->u.crypt.flags & IW_ENCODE_MODE)) {
/* we cannot do anything. Complain. */
return -EINVAL;
}
}
}
/* now read the flags */
if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
/* Encoding disabled,
* authen = DOT11_AUTH_OS;
* invoke = 0;
* exunencrypt = 0; */
}
if (param->u.crypt.flags & IW_ENCODE_OPEN)
/* Encode but accept non-encoded packets. No auth */
invoke = 1;
if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) {
/* Refuse non-encoded packets. Auth */
authen = DOT11_AUTH_BOTH;
invoke = 1;
exunencrypt = 1;
}
/* do the change if requested */
if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) {
rvalue |=
mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
rvalue |=
mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
rvalue |=
mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
&exunencrypt);
}
return rvalue;
}
static int
prism2_ioctl_set_generic_element(struct net_device *ndev,
struct prism2_hostapd_param *param,
int param_len)
{
islpci_private *priv = netdev_priv(ndev);
int max_len, len, alen, ret=0;
struct obj_attachment *attach;
len = param->u.generic_elem.len;
max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
if (max_len < 0 || max_len < len)
return -EINVAL;
alen = sizeof(*attach) + len;
attach = kzalloc(alen, GFP_KERNEL);
if (attach == NULL)
return -ENOMEM;
#define WLAN_FC_TYPE_MGMT 0
#define WLAN_FC_STYPE_ASSOC_REQ 0
#define WLAN_FC_STYPE_REASSOC_REQ 2
/* Note: endianness is covered by mgt_set_varlen */
attach->type = (WLAN_FC_TYPE_MGMT << 2) |
(WLAN_FC_STYPE_ASSOC_REQ << 4);
attach->id = -1;
attach->size = len;
memcpy(attach->data, param->u.generic_elem.data, len);
ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
if (ret == 0) {
attach->type = (WLAN_FC_TYPE_MGMT << 2) |
(WLAN_FC_STYPE_REASSOC_REQ << 4);
ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
if (ret == 0)
printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
ndev->name);
}
kfree(attach);
return ret;
}
static int
prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param)
{
return -EOPNOTSUPP;
}
static int
prism2_ioctl_scan_req(struct net_device *ndev,
struct prism2_hostapd_param *param)
{
islpci_private *priv = netdev_priv(ndev);
struct iw_request_info info;
int i, rvalue;
struct obj_bsslist *bsslist;
u32 noise = 0;
char *extra = "";
char *current_ev = "foo";
union oid_res_t r;
if (islpci_get_state(priv) < PRV_STATE_INIT) {
/* device is not ready, fail gently */
return 0;
}
/* first get the noise value. We will use it to report the link quality */
rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
noise = r.u;
/* Ask the device for a list of known bss. We can report at most
* IW_MAX_AP=64 to the range struct. But the device won't repport anything
* if you change the value of IWMAX_BSS=24.
*/
rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
bsslist = r.ptr;
info.cmd = PRISM54_HOSTAPD;
info.flags = 0;
/* ok now, scan the list and translate its info */
for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++)
current_ev = prism54_translate_bss(ndev, &info, current_ev,
extra + IW_SCAN_MAX_DATA,
&(bsslist->bsslist[i]),
noise);
kfree(bsslist);
return rvalue;
}
static int
prism54_hostapd(struct net_device *ndev, struct iw_point *p)
{
struct prism2_hostapd_param *param;
int ret = 0;
u32 uwrq;
printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
if (p->length < sizeof(struct prism2_hostapd_param) ||
p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
param = memdup_user(p->pointer, p->length);
if (IS_ERR(param))
return PTR_ERR(param);
switch (param->cmd) {
case PRISM2_SET_ENCRYPTION:
printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n",
ndev->name);
ret = prism2_ioctl_set_encryption(ndev, param, p->length);
break;
case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n",
ndev->name);
ret = prism2_ioctl_set_generic_element(ndev, param,
p->length);
break;
case PRISM2_HOSTAPD_MLME:
printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n",
ndev->name);
ret = prism2_ioctl_mlme(ndev, param);
break;
case PRISM2_HOSTAPD_SCAN_REQ:
printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n",
ndev->name);
ret = prism2_ioctl_scan_req(ndev, param);
break;
case PRISM54_SET_WPA:
printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n",
ndev->name);
uwrq = 1;
ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
break;
case PRISM54_DROP_UNENCRYPTED:
printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n",
ndev->name);
#if 0
uwrq = 0x01;
mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
down_write(&priv->mib_sem);
mgt_commit(priv);
up_write(&priv->mib_sem);
#endif
/* Not necessary, as set_wpa does it, should we just do it here though? */
ret = 0;
break;
default:
printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n",
ndev->name);
ret = -EOPNOTSUPP;
break;
}
if (ret == 0 && copy_to_user(p->pointer, param, p->length))
ret = -EFAULT;
kfree(param);
return ret;
}
static int
prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
@ -3223,20 +2907,3 @@ const struct iw_handler_def prism54_handler_def = {
.private_args = (struct iw_priv_args *) prism54_private_args,
.get_wireless_stats = prism54_get_wireless_stats,
};
/* For wpa_supplicant */
int
prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct iwreq *wrq = (struct iwreq *) rq;
int ret = -1;
switch (cmd) {
case PRISM54_HOSTAPD:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
ret = prism54_hostapd(ndev, &wrq->u.data);
return ret;
}
return -EOPNOTSUPP;
}

View file

@ -43,8 +43,6 @@ void prism54_wpa_bss_ie_clean(islpci_private *priv);
int prism54_set_mac_address(struct net_device *, void *);
int prism54_ioctl(struct net_device *, struct ifreq *, int);
extern const struct iw_handler_def prism54_handler_def;
#endif /* _ISL_IOCTL_H */

View file

@ -804,7 +804,6 @@ static const struct ethtool_ops islpci_ethtool_ops = {
static const struct net_device_ops islpci_netdev_ops = {
.ndo_open = islpci_open,
.ndo_stop = islpci_close,
.ndo_do_ioctl = prism54_ioctl,
.ndo_start_xmit = islpci_eth_transmit,
.ndo_tx_timeout = islpci_eth_tx_timeout,
.ndo_set_mac_address = prism54_set_mac_address,

View file

@ -1203,8 +1203,10 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
!(filter_flags & FIF_PSPOLL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
!(filter_flags & FIF_CONTROL));
rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);

View file

@ -780,6 +780,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
unsigned long flags;
u32 inta = 0;
u32 intb = 0;
irqreturn_t ret = IRQ_HANDLED;
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
@ -787,8 +788,10 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
/*Shared IRQ or HW disappared */
if (!inta || inta == 0xffff)
if (!inta || inta == 0xffff) {
ret = IRQ_NONE;
goto done;
}
/*<1> beacon related */
if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
@ -892,7 +895,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
done:
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
return IRQ_HANDLED;
return ret;
}
static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)

View file

@ -319,7 +319,6 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
static struct spi_driver wl1251_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},

View file

@ -462,7 +462,6 @@ static int __devexit wl1271_remove(struct spi_device *spi)
static struct spi_driver wl1271_spi_driver = {
.driver = {
.name = "wl1271_spi",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},

View file

@ -1339,7 +1339,7 @@ error:
return 0;
}
int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
struct sk_buff *skb,
data_exchange_cb_t cb,
void *cb_context)

View file

@ -538,6 +538,9 @@
* OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME
* messages. Note that per PHY only one application may register.
*
* @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether
* No Acknowledgement Policy should be applied.
*
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@ -675,6 +678,8 @@ enum nl80211_commands {
NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
NL80211_CMD_SET_NOACK_MAP,
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@ -1185,6 +1190,9 @@ enum nl80211_commands {
* abides to when initiating radiation on DFS channels. A country maps
* to one DFS region.
*
* @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of
* up to 16 TIDs.
*
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@ -1428,6 +1436,8 @@ enum nl80211_attrs {
NL80211_ATTR_DISABLE_HT,
NL80211_ATTR_HT_CAPABILITY_MASK,
NL80211_ATTR_NOACK_MAP,
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@ -2084,6 +2094,10 @@ enum nl80211_mntr_flags {
* access to a broader network beyond the MBSS. This is done via Root
* Announcement frames.
*
* @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in
* TUs) during which a mesh STA can send only one Action frame containing a
* PERR element.
*
* @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
*
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
@ -2107,6 +2121,7 @@ enum nl80211_meshconf_params {
NL80211_MESHCONF_ELEMENT_TTL,
NL80211_MESHCONF_HWMP_RANN_INTERVAL,
NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,

View file

@ -782,6 +782,7 @@ struct mesh_config {
u16 min_discovery_timeout;
u32 dot11MeshHWMPactivePathTimeout;
u16 dot11MeshHWMPpreqMinInterval;
u16 dot11MeshHWMPperrMinInterval;
u16 dot11MeshHWMPnetDiameterTraversalTime;
u8 dot11MeshHWMPRootMode;
u16 dot11MeshHWMPRannInterval;
@ -802,6 +803,7 @@ struct mesh_config {
* @ie_len: length of vendor information elements
* @is_authenticated: this mesh requires authentication
* @is_secure: this mesh uses security
* @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
*
* These parameters are fixed when the mesh is created.
*/
@ -814,6 +816,7 @@ struct mesh_setup {
u8 ie_len;
bool is_authenticated;
bool is_secure;
int mcast_rate[IEEE80211_NUM_BANDS];
};
/**
@ -1399,7 +1402,8 @@ struct cfg80211_gtk_rekey_data {
* have changed. The actual parameter values are available in
* struct wiphy. If returning an error, no value should be changed.
*
* @set_tx_power: set the transmit power according to the parameters
* @set_tx_power: set the transmit power according to the parameters,
* the power passed is in mBm, to get dBm use MBM_TO_DBM().
* @get_tx_power: store the current TX power into the dbm variable;
* return 0 if successful
*
@ -1465,6 +1469,8 @@ struct cfg80211_gtk_rekey_data {
*
* @probe_client: probe an associated client, must return a cookie that it
* later passes to cfg80211_probe_status().
*
* @set_noack_map: Set the NoAck Map for the TIDs.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@ -1658,6 +1664,10 @@ struct cfg80211_ops {
int (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, u64 *cookie);
int (*set_noack_map)(struct wiphy *wiphy,
struct net_device *dev,
u16 noack_map);
struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy);
};

View file

@ -247,15 +247,3 @@ config MAC80211_DEBUG_COUNTERS
and show them in debugfs.
If unsure, say N.
config MAC80211_DRIVER_API_TRACER
bool "Driver API tracer"
depends on MAC80211_DEBUG_MENU
depends on EVENT_TRACING
help
Say Y here to make mac80211 register with the ftrace
framework for the driver API -- you can then see which
driver methods it is calling and which API functions
drivers are calling by looking at the trace.
If unsure, say Y.

View file

@ -24,7 +24,8 @@ mac80211-y := \
util.o \
wme.o \
event.o \
chan.o
chan.o \
driver-trace.o
mac80211-$(CONFIG_MAC80211_LEDS) += led.o
mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@ -41,7 +42,6 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
mac80211-$(CONFIG_PM) += pm.o
mac80211-$(CONFIG_MAC80211_DRIVER_API_TRACER) += driver-trace.o
CFLAGS_driver-trace.o := -I$(src)
# objects for PID algorithm

View file

@ -73,8 +73,11 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
sta->sta.addr, tid);
printk(KERN_DEBUG
"Rx BA session stop requested for %pM tid %u %s reason: %d\n",
sta->sta.addr, tid,
initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
(int)reason);
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
@ -85,7 +88,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
/* check if this is a self generated aggregation halt */
if (initiator == WLAN_BACK_RECIPIENT && tx)
ieee80211_send_delba(sta->sdata, sta->sta.addr,
tid, 0, reason);
tid, WLAN_BACK_RECIPIENT, reason);
del_timer_sync(&tid_rx->session_timer);
del_timer_sync(&tid_rx->reorder_timer);
@ -109,7 +112,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
int i;
rcu_read_lock();
sta = sta_info_get(sdata, addr);
sta = sta_info_get_bss(sdata, addr);
if (!sta) {
rcu_read_unlock();
return;

View file

@ -180,6 +180,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
del_timer_sync(&tid_tx->addba_resp_timer);
del_timer_sync(&tid_tx->session_timer);
/*
* After this packets are no longer handed right through
@ -349,6 +350,28 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
tid_tx->timeout);
}
/*
* After accepting the AddBA Response we activated a timer,
* resetting it after each frame that we send.
*/
static void sta_tx_agg_session_timer_expired(unsigned long data)
{
/* not an elegant detour, but there is no choice as the timer passes
* only one argument, and various sta_info are needed here, so init
* flow in sta_info_create gives the TID as data, while the timer_to_id
* array gives the sta through container_of */
u8 *ptid = (u8 *)data;
u8 *timer_to_id = ptid - *ptid;
struct sta_info *sta = container_of(timer_to_id, struct sta_info,
timer_to_tid[0]);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
#endif
ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
}
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
u16 timeout)
{
@ -418,11 +441,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
tid_tx->timeout = timeout;
/* Tx timer */
/* response timer */
tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
init_timer(&tid_tx->addba_resp_timer);
/* tx timer */
tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
init_timer(&tid_tx->session_timer);
/* assign a dialog token */
sta->ampdu_mlme.dialog_token_allocator++;
tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
@ -527,7 +555,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
}
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, ra);
sta = sta_info_get_bss(sdata, ra);
if (!sta) {
mutex_unlock(&local->sta_mtx);
#ifdef CONFIG_MAC80211_HT_DEBUG
@ -656,7 +684,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, ra);
sta = sta_info_get_bss(sdata, ra);
if (!sta) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@ -778,6 +806,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
ieee80211_agg_tx_operational(local, sta, tid);
sta->ampdu_mlme.addba_req_num[tid] = 0;
if (tid_tx->timeout)
mod_timer(&tid_tx->session_timer,
TU_TO_EXP_TIME(tid_tx->timeout));
} else {
___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
true);

View file

@ -102,6 +102,16 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
return 0;
}
static int ieee80211_set_noack_map(struct wiphy *wiphy,
struct net_device *dev,
u16 noack_map)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
sdata->noack_map = noack_map;
return 0;
}
static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr,
struct key_params *params)
@ -499,7 +509,7 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
if (!resp || !resp_len)
return -EINVAL;
old = sdata->u.ap.probe_resp;
old = rtnl_dereference(sdata->u.ap.probe_resp);
new = dev_alloc_skb(resp_len);
if (!new)
@ -1185,6 +1195,8 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
{
u8 *new_ie;
const u8 *old_ie;
struct ieee80211_sub_if_data *sdata = container_of(ifmsh,
struct ieee80211_sub_if_data, u.mesh);
/* allocate information elements */
new_ie = NULL;
@ -1211,6 +1223,10 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
if (setup->is_secure)
ifmsh->security |= IEEE80211_MESH_SEC_SECURED;
/* mcast rate setting in Mesh Node */
memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate,
sizeof(setup->mcast_rate));
return 0;
}
@ -1256,6 +1272,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask))
conf->dot11MeshHWMPpreqMinInterval =
nconf->dot11MeshHWMPpreqMinInterval;
if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, mask))
conf->dot11MeshHWMPperrMinInterval =
nconf->dot11MeshHWMPperrMinInterval;
if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
mask))
conf->dot11MeshHWMPnetDiameterTraversalTime =
@ -2698,4 +2717,5 @@ struct cfg80211_ops mac80211_config_ops = {
.tdls_mgmt = ieee80211_tdls_mgmt,
.probe_client = ieee80211_probe_client,
.get_channel = ieee80211_wiphy_get_channel,
.set_noack_map = ieee80211_set_noack_map,
};

View file

@ -97,40 +97,6 @@ static const struct file_operations reset_ops = {
.llseek = noop_llseek,
};
static ssize_t noack_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
return mac80211_format_buffer(user_buf, count, ppos, "%d\n",
local->wifi_wme_noack_test);
}
static ssize_t noack_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
char buf[10];
size_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
local->wifi_wme_noack_test = !!simple_strtoul(buf, NULL, 0);
return count;
}
static const struct file_operations noack_ops = {
.read = noack_read,
.write = noack_write,
.open = mac80211_open_file_generic,
.llseek = default_llseek,
};
static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@ -398,7 +364,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(wep_iv);
DEBUGFS_ADD(queues);
DEBUGFS_ADD_MODE(reset, 0200);
DEBUGFS_ADD(noack);
DEBUGFS_ADD(uapsd_queues);
DEBUGFS_ADD(uapsd_max_sp_len);
DEBUGFS_ADD(channel_type);

View file

@ -405,6 +405,8 @@ IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval,
u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
@ -534,6 +536,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshMaxPeerLinks);
MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout);
MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval);
MESHPARAMS_ADD(dot11MeshHWMPperrMinInterval);
MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime);
MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
MESHPARAMS_ADD(path_refresh_time);

View file

@ -10,6 +10,16 @@ static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER));
}
static inline struct ieee80211_sub_if_data *
get_bss_sdata(struct ieee80211_sub_if_data *sdata)
{
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
u.ap);
return sdata;
}
static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
{
local->ops->tx(&local->hw, skb);
@ -421,6 +431,7 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
sdata = get_bss_sdata(sdata);
check_sdata_in_driver(sdata);
trace_drv_sta_notify(local, sdata, cmd, sta);
@ -437,6 +448,7 @@ static inline int drv_sta_add(struct ieee80211_local *local,
might_sleep();
sdata = get_bss_sdata(sdata);
check_sdata_in_driver(sdata);
trace_drv_sta_add(local, sdata, sta);
@ -454,6 +466,7 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
{
might_sleep();
sdata = get_bss_sdata(sdata);
check_sdata_in_driver(sdata);
trace_drv_sta_remove(local, sdata, sta);
@ -547,6 +560,7 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
might_sleep();
sdata = get_bss_sdata(sdata);
check_sdata_in_driver(sdata);
trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);

View file

@ -5,17 +5,6 @@
#include <net/mac80211.h>
#include "ieee80211_i.h"
#if !defined(CONFIG_MAC80211_DRIVER_API_TRACER) || defined(__CHECKER__)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(...)
#undef DEFINE_EVENT
#define DEFINE_EVENT(evt_class, name, proto, ...) \
static inline void trace_ ## name(proto) {}
#endif
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211

View file

@ -28,9 +28,9 @@ bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata)
return false;
}
void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_ht_cap *ht_cap,
u16 flag)
static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_ht_cap *ht_cap,
u16 flag)
{
__le16 le_flag = cpu_to_le16(flag);
if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) {

View file

@ -514,7 +514,9 @@ struct ieee80211_if_mesh {
atomic_t mpaths;
/* Timestamp of last SN update */
unsigned long last_sn_update;
/* Timestamp of last SN sent */
/* Time when it's ok to send next PERR */
unsigned long next_perr;
/* Timestamp of last PREQ sent */
unsigned long last_preq;
struct mesh_rmc *rmc;
spinlock_t mesh_preq_queue_lock;
@ -611,6 +613,9 @@ struct ieee80211_sub_if_data {
struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
unsigned int fragment_next;
/* TID bitmap for NoAck policy */
u16 noack_map;
struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
struct ieee80211_key __rcu *default_unicast_key;
struct ieee80211_key __rcu *default_multicast_key;
@ -961,7 +966,6 @@ struct ieee80211_local {
int total_ps_buffered; /* total number of all buffered unicast and
* multicast packets for power saving stations
*/
int wifi_wme_noack_test;
unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
/*
@ -1216,13 +1220,11 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
void ieee80211_sched_scan_stopped_work(struct work_struct *work);
/* off-channel helpers */
bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
bool tell_ap);
void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
bool offchannel_ps_enable);
void ieee80211_offchannel_return(struct ieee80211_local *local,
bool enable_beaconing,
bool offchannel_ps_disable);
void ieee80211_hw_roc_setup(struct ieee80211_local *local);

View file

@ -672,7 +672,6 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
struct ieee80211_radiotap_header *rtap = (void *)skb->data;
u8 *p;
if (local->hw.queues < 4)
return 0;
@ -683,19 +682,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
if (!ieee80211_is_data(hdr->frame_control)) {
skb->priority = 7;
return ieee802_1d_to_ac[skb->priority];
}
if (!ieee80211_is_data_qos(hdr->frame_control)) {
skb->priority = 0;
return ieee802_1d_to_ac[skb->priority];
}
p = ieee80211_get_qos_ctl(hdr);
skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
return ieee80211_downgrade_queue(local, skb);
return ieee80211_select_queue_80211(local, skb, hdr);
}
static const struct net_device_ops ieee80211_monitorif_ops = {
@ -866,6 +853,8 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
sdata->control_port_no_encrypt = false;
sdata->noack_map = 0;
/* only monitor differs */
sdata->dev->type = ARPHRD_ETHER;

View file

@ -92,47 +92,6 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
ieee80211_configure_filter(local);
}
/*
* Returns true if we are logically configured to be on
* the operating channel AND the hardware-conf is currently
* configured on the operating channel. Compares channel-type
* as well.
*/
bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
{
struct ieee80211_channel *chan;
enum nl80211_channel_type channel_type;
/* This logic needs to match logic in ieee80211_hw_config */
if (local->scan_channel) {
chan = local->scan_channel;
/* If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
if (chan == local->oper_channel)
channel_type = local->_oper_channel_type;
else
channel_type = NL80211_CHAN_NO_HT;
} else if (local->tmp_channel) {
chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
} else {
chan = local->oper_channel;
channel_type = local->_oper_channel_type;
}
if (chan != local->oper_channel ||
channel_type != local->_oper_channel_type)
return false;
/* Check current hardware-config against oper_channel. */
if (local->oper_channel != local->hw.conf.channel ||
local->_oper_channel_type != local->hw.conf.channel_type)
return false;
return true;
}
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
{
struct ieee80211_channel *chan;

View file

@ -749,6 +749,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
atomic_set(&ifmsh->mpaths, 0);
mesh_rmc_init(sdata);
ifmsh->last_preq = jiffies;
ifmsh->next_perr = jiffies;
/* Allocate all mesh structures when creating the first mesh interface. */
if (!mesh_allocated)
ieee80211s_init();

View file

@ -233,6 +233,8 @@ void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
/* Mesh paths */
int mesh_nexthop_lookup(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
int mesh_nexthop_resolve(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
struct mesh_path *mesh_path_lookup(u8 *dst,
struct ieee80211_sub_if_data *sdata);

View file

@ -241,11 +241,15 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_mgmt *mgmt;
u8 *pos, ie_len;
int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
sizeof(mgmt->u.action.u.mesh_action);
if (time_before(jiffies, ifmsh->next_perr))
return -EAGAIN;
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
hdr_len +
2 + 15 /* PERR IE */);
@ -290,6 +294,8 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
/* see note in function header */
prepare_frame_for_deferred_tx(sdata, skb);
ifmsh->next_perr = TU_TO_EXP_TIME(
ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
ieee80211_add_pending_skb(local, skb);
return 0;
}
@ -393,15 +399,13 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
orig_metric = PREQ_IE_METRIC(hwmp_ie);
break;
case MPATH_PREP:
/* Originator here refers to the MP that was the destination in
* the Path Request. The draft refers to that MP as the
* destination address, even though usually it is the origin of
* the PREP frame. We divert from the nomenclature in the draft
/* Originator here refers to the MP that was the target in the
* Path Request. We divert from the nomenclature in the draft
* so that we can easily use a single function to gather path
* information from both PREQ and PREP frames.
*/
orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
orig_sn = PREP_IE_ORIG_SN(hwmp_ie);
orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
orig_metric = PREP_IE_METRIC(hwmp_ie);
break;
@ -562,9 +566,9 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
ttl = ifmsh->mshcfg.element_ttl;
if (ttl != 0) {
mhwmp_dbg("replying to the PREQ");
mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
cpu_to_le32(target_sn), 0, orig_addr,
cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
cpu_to_le32(orig_sn), 0, target_addr,
cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
cpu_to_le32(lifetime), cpu_to_le32(metric),
0, sdata);
} else
@ -618,14 +622,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
/* Note that we divert from the draft nomenclature and denominate
* destination to what the draft refers to as origininator. So in this
* function destnation refers to the final destination of the PREP,
* which corresponds with the originator of the PREQ which this PREP
* replies
*/
target_addr = PREP_IE_TARGET_ADDR(prep_elem);
if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
/* destination, no forwarding required */
return;
@ -636,7 +634,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
}
rcu_read_lock();
mpath = mesh_path_lookup(target_addr, sdata);
mpath = mesh_path_lookup(orig_addr, sdata);
if (mpath)
spin_lock_bh(&mpath->state_lock);
else
@ -651,7 +649,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
flags = PREP_IE_FLAGS(prep_elem);
lifetime = PREP_IE_LIFETIME(prep_elem);
hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
target_addr = PREP_IE_TARGET_ADDR(prep_elem);
target_sn = PREP_IE_TARGET_SN(prep_elem);
orig_sn = PREP_IE_ORIG_SN(prep_elem);
@ -984,71 +982,97 @@ enddiscovery:
kfree(preq_node);
}
/**
* mesh_nexthop_lookup - put the appropriate next hop on a mesh frame
/* mesh_nexthop_resolve - lookup next hop for given skb and start path
* discovery if no forwarding information is found.
*
* @skb: 802.11 frame to be sent
* @sdata: network subif the frame will be sent through
*
* Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
* found, the function will start a path discovery and queue the frame so it is
* sent when the path is resolved. This means the caller must not free the skb
* in this case.
* Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
* skb is freeed here if no mpath could be allocated.
*/
int mesh_nexthop_lookup(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata)
int mesh_nexthop_resolve(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata)
{
struct sk_buff *skb_to_free = NULL;
struct mesh_path *mpath;
struct sta_info *next_hop;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mesh_path *mpath;
struct sk_buff *skb_to_free = NULL;
u8 *target_addr = hdr->addr3;
int err = 0;
rcu_read_lock();
mpath = mesh_path_lookup(target_addr, sdata);
err = mesh_nexthop_lookup(skb, sdata);
if (!err)
goto endlookup;
/* no nexthop found, start resolving */
mpath = mesh_path_lookup(target_addr, sdata);
if (!mpath) {
mesh_path_add(target_addr, sdata);
mpath = mesh_path_lookup(target_addr, sdata);
if (!mpath) {
sdata->u.mesh.mshstats.dropped_frames_no_route++;
mesh_path_discard_frame(skb, sdata);
err = -ENOSPC;
goto endlookup;
}
}
if (mpath->flags & MESH_PATH_ACTIVE) {
if (time_after(jiffies,
mpath->exp_time -
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
!memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
!(mpath->flags & MESH_PATH_RESOLVING) &&
!(mpath->flags & MESH_PATH_FIXED)) {
mesh_queue_preq(mpath,
PREQ_Q_F_START | PREQ_Q_F_REFRESH);
}
next_hop = rcu_dereference(mpath->next_hop);
if (next_hop)
memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
else
err = -ENOENT;
} else {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!(mpath->flags & MESH_PATH_RESOLVING)) {
/* Start discovery only if it is not running yet */
mesh_queue_preq(mpath, PREQ_Q_F_START);
}
if (!(mpath->flags & MESH_PATH_RESOLVING))
mesh_queue_preq(mpath, PREQ_Q_F_START);
if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
skb_to_free = skb_dequeue(&mpath->frame_queue);
if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
skb_to_free = skb_dequeue(&mpath->frame_queue);
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
ieee80211_set_qos_hdr(sdata, skb);
skb_queue_tail(&mpath->frame_queue, skb);
if (skb_to_free)
mesh_path_discard_frame(skb_to_free, sdata);
err = -ENOENT;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
ieee80211_set_qos_hdr(sdata, skb);
skb_queue_tail(&mpath->frame_queue, skb);
err = -ENOENT;
if (skb_to_free)
mesh_path_discard_frame(skb_to_free, sdata);
endlookup:
rcu_read_unlock();
return err;
}
/**
* mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
* this function is considered "using" the associated mpath, so preempt a path
* refresh if this mpath expires soon.
*
* @skb: 802.11 frame to be sent
* @sdata: network subif the frame will be sent through
*
* Returns: 0 if the next hop was found. Nonzero otherwise.
*/
int mesh_nexthop_lookup(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
struct sta_info *next_hop;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
u8 *target_addr = hdr->addr3;
int err = -ENOENT;
rcu_read_lock();
mpath = mesh_path_lookup(target_addr, sdata);
if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
goto endlookup;
if (time_after(jiffies,
mpath->exp_time -
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
!memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
!(mpath->flags & MESH_PATH_RESOLVING) &&
!(mpath->flags & MESH_PATH_FIXED))
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
next_hop = rcu_dereference(mpath->next_hop);
if (next_hop) {
memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
err = 0;
}
endlookup:

View file

@ -221,6 +221,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
__skb_queue_tail(&tmpq, skb);
}
@ -264,6 +265,7 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
memcpy(hdr->addr1, next_hop, ETH_ALEN);
rcu_read_unlock();
memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, dst_addr, ETH_ALEN);
}
@ -971,38 +973,11 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
* @skb: frame to discard
* @sdata: network subif the frame was to be sent through
*
* If the frame was being forwarded from another MP, a PERR frame will be sent
* to the precursor. The precursor's address (i.e. the previous hop) was saved
* in addr1 of the frame-to-be-forwarded, and would only be overwritten once
* the destination is successfully resolved.
*
* Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct mesh_path *mpath;
u32 sn = 0;
__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
u8 *ra, *da;
da = hdr->addr3;
ra = hdr->addr1;
rcu_read_lock();
mpath = mesh_path_lookup(da, sdata);
if (mpath) {
spin_lock_bh(&mpath->state_lock);
sn = ++mpath->sn;
spin_unlock_bh(&mpath->state_lock);
}
rcu_read_unlock();
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
cpu_to_le32(sn), reason, ra, sdata);
}
kfree_skb(skb);
sdata->u.mesh.mshstats.dropped_frames_no_route++;
}

View file

@ -156,7 +156,6 @@ void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
}
void ieee80211_offchannel_return(struct ieee80211_local *local,
bool enable_beaconing,
bool offchannel_ps_disable)
{
struct ieee80211_sub_if_data *sdata;
@ -188,11 +187,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
netif_tx_wake_all_queues(sdata->dev);
}
/* Check to see if we should re-enable beaconing */
if (enable_beaconing &&
(sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_ADHOC ||
sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_ADHOC ||
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
ieee80211_bss_info_change_notify(
sdata, BSS_CHANGED_BEACON_ENABLED);
}

View file

@ -1892,13 +1892,16 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
static ieee80211_rx_result
ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr;
struct ieee80211_hdr *fwd_hdr, *hdr;
struct ieee80211_tx_info *info;
struct ieee80211s_hdr *mesh_hdr;
unsigned int hdrlen;
struct sk_buff *skb = rx->skb, *fwd_skb;
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
u16 q, hdrlen;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@ -1914,15 +1917,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (!mesh_hdr->ttl)
/* illegal frame */
return RX_DROP_MONITOR;
if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
dropped_frames_congestion);
return RX_DROP_MONITOR;
}
if (mesh_hdr->flags & MESH_FLAGS_AE) {
struct mesh_path *mppath;
char *proxied_addr;
@ -1954,59 +1950,50 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
return RX_CONTINUE;
skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
mesh_hdr->ttl--;
q = ieee80211_select_queue_80211(local, skb, hdr);
if (ieee80211_queue_stopped(&local->hw, q)) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
return RX_DROP_MONITOR;
}
skb_set_queue_mapping(skb, q);
if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
if (!mesh_hdr->ttl)
IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
dropped_frames_ttl);
else {
struct ieee80211_hdr *fwd_hdr;
struct ieee80211_tx_info *info;
if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
goto out;
fwd_skb = skb_copy(skb, GFP_ATOMIC);
if (!fwd_skb && net_ratelimit())
printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
sdata->name);
if (!fwd_skb)
goto out;
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
info = IEEE80211_SKB_CB(fwd_skb);
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
info->control.jiffies = jiffies;
if (is_multicast_ether_addr(fwd_hdr->addr1)) {
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_mcast);
} else {
int err;
/*
* Save TA to addr1 to send TA a path error if a
* suitable next hop is not found
*/
memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
ETH_ALEN);
err = mesh_nexthop_lookup(fwd_skb, sdata);
/* Failed to immediately resolve next hop:
* fwded frame was dropped or will be added
* later to the pending skb queue. */
if (err)
return RX_DROP_MONITOR;
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_unicast);
}
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_frames);
ieee80211_add_pending_skb(local, fwd_skb);
}
if (!--mesh_hdr->ttl) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
return RX_DROP_MONITOR;
}
fwd_skb = skb_copy(skb, GFP_ATOMIC);
if (!fwd_skb) {
if (net_ratelimit())
printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
sdata->name);
goto out;
}
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
info = IEEE80211_SKB_CB(fwd_skb);
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
info->control.jiffies = jiffies;
if (is_multicast_ether_addr(fwd_hdr->addr1)) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
} else if (!mesh_nexthop_lookup(fwd_skb, sdata)) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
} else {
/* unable to resolve next hop */
mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
0, reason, fwd_hdr->addr2, sdata);
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
return RX_DROP_MONITOR;
}
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
ieee80211_add_pending_skb(local, fwd_skb);
out:
if (is_multicast_ether_addr(hdr->addr1) ||
sdata->dev->flags & IFF_PROMISC)

View file

@ -297,7 +297,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
if (!was_hw_scan) {
ieee80211_configure_filter(local);
drv_sw_scan_complete(local);
ieee80211_offchannel_return(local, true, true);
ieee80211_offchannel_return(local, true);
}
ieee80211_recalc_idle(local);
@ -602,7 +602,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
* in off-channel state..will put that back
* on-channel at the end of scanning.
*/
ieee80211_offchannel_return(local, true, false);
ieee80211_offchannel_return(local, false);
*next_delay = HZ / 5;
/* afterwards, resume scan & go to next channel */

View file

@ -351,10 +351,6 @@ static int sta_info_finish_insert(struct sta_info *sta,
if (!sta->dummy || dummy_reinsert) {
/* notify driver */
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data,
u.ap);
err = drv_sta_add(local, sdata, &sta->sta);
if (err) {
if (!async)

View file

@ -88,6 +88,7 @@ enum ieee80211_sta_info_flags {
* struct tid_ampdu_tx - TID aggregation information (Tx).
*
* @rcu_head: rcu head for freeing structure
* @session_timer: check if we keep Tx-ing on the TID (by timeout value)
* @addba_resp_timer: timer for peer's response to addba request
* @pending: pending frames queue -- use sta's spinlock to protect
* @dialog_token: dialog token for aggregation session
@ -110,6 +111,7 @@ enum ieee80211_sta_info_flags {
*/
struct tid_ampdu_tx {
struct rcu_head rcu_head;
struct timer_list session_timer;
struct timer_list addba_resp_timer;
struct sk_buff_head pending;
unsigned long state;
@ -497,7 +499,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
*/
int sta_info_insert(struct sta_info *sta);
int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
int sta_info_insert_atomic(struct sta_info *sta);
int sta_info_reinsert(struct sta_info *sta);
int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,

View file

@ -151,11 +151,15 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
rate = mrate;
}
/* Time needed to transmit ACK
* (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
* to closest integer */
dur = ieee80211_frame_duration(local, 10, rate, erp,
/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
if (ieee80211_is_data_qos(hdr->frame_control) &&
*(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
dur = 0;
else
/* Time needed to transmit ACK
* (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
* to closest integer */
dur = ieee80211_frame_duration(local, 10, rate, erp,
tx->sdata->vif.bss_conf.use_short_preamble);
if (next_frag_len) {
@ -636,6 +640,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
else
txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
/* set up RTS protection if desired */
@ -1063,9 +1068,11 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
int tid)
{
bool queued = false;
bool reset_agg_timer = false;
if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
info->flags |= IEEE80211_TX_CTL_AMPDU;
reset_agg_timer = true;
} else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
/*
* nothing -- this aggregation session is being started
@ -1097,6 +1104,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
/* do nothing, let packet pass through */
} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
info->flags |= IEEE80211_TX_CTL_AMPDU;
reset_agg_timer = true;
} else {
queued = true;
info->control.vif = &tx->sdata->vif;
@ -1106,6 +1114,11 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
spin_unlock(&tx->sta->lock);
}
/* reset session timer */
if (reset_agg_timer && tid_tx->timeout)
mod_timer(&tid_tx->session_timer,
TU_TO_EXP_TIME(tid_tx->timeout));
return queued;
}
@ -1173,16 +1186,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
if (is_multicast_ether_addr(hdr->addr1)) {
tx->flags &= ~IEEE80211_TX_UNICAST;
info->flags |= IEEE80211_TX_CTL_NO_ACK;
} else {
} else
tx->flags |= IEEE80211_TX_UNICAST;
if (unlikely(local->wifi_wme_noack_test))
info->flags |= IEEE80211_TX_CTL_NO_ACK;
/*
* Flags are initialized to 0. Hence, no need to
* explicitly unset IEEE80211_TX_CTL_NO_ACK since
* it might already be set for injected frames.
*/
}
if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
if (!(tx->flags & IEEE80211_TX_UNICAST) ||
@ -1223,9 +1228,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
* queue is woken again.
*/
if (txpending)
skb_queue_splice(skbs, &local->pending[q]);
skb_queue_splice_init(skbs, &local->pending[q]);
else
skb_queue_splice_tail(skbs, &local->pending[q]);
skb_queue_splice_tail_init(skbs,
&local->pending[q]);
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
flags);
@ -1297,7 +1303,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
ieee80211_tpt_led_trig_tx(local, fc, led_len);
ieee80211_led_tx(local, 1);
WARN_ON(!skb_queue_empty(skbs));
WARN_ON_ONCE(!skb_queue_empty(skbs));
return result;
}
@ -1458,7 +1464,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
if (ieee80211_vif_is_mesh(&sdata->vif) &&
ieee80211_is_data(hdr->frame_control) &&
!is_multicast_ether_addr(hdr->addr1))
if (mesh_nexthop_lookup(skb, sdata)) {
if (mesh_nexthop_resolve(skb, sdata)) {
/* skb queued: don't free */
rcu_read_unlock();
return;
@ -2260,10 +2266,10 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
/* Bitmap control */
*pos++ = n1 | aid0;
/* Part Virt Bitmap */
skb_put(skb, n2 - n1);
memcpy(pos, bss->tim + n1, n2 - n1 + 1);
tim[1] = n2 - n1 + 4;
skb_put(skb, n2 - n1);
} else {
*pos++ = aid0; /* Bitmap control */
*pos++ = 0; /* Part Virt Bitmap */

View file

@ -1240,8 +1240,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
changed |= BSS_CHANGED_IBSS;
/* fall through */
case NL80211_IFTYPE_AP:
changed |= BSS_CHANGED_SSID |
BSS_CHANGED_AP_PROBE_RESP;
changed |= BSS_CHANGED_SSID;
if (sdata->vif.type == NL80211_IFTYPE_AP)
changed |= BSS_CHANGED_AP_PROBE_RESP;
/* fall through */
case NL80211_IFTYPE_MESH_POINT:
changed |= BSS_CHANGED_BEACON |

View file

@ -52,6 +52,30 @@ static int wme_downgrade_ac(struct sk_buff *skb)
}
}
/* Indicate which queue to use for this fully formed 802.11 frame */
u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
struct sk_buff *skb,
struct ieee80211_hdr *hdr)
{
u8 *p;
if (local->hw.queues < 4)
return 0;
if (!ieee80211_is_data(hdr->frame_control)) {
skb->priority = 7;
return ieee802_1d_to_ac[skb->priority];
}
if (!ieee80211_is_data_qos(hdr->frame_control)) {
skb->priority = 0;
return ieee802_1d_to_ac[skb->priority];
}
p = ieee80211_get_qos_ctl(hdr);
skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
return ieee80211_downgrade_queue(local, skb);
}
/* Indicate which queue to use. */
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
@ -139,6 +163,7 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
/* Fill in the QoS header if there is one. */
if (ieee80211_is_data_qos(hdr->frame_control)) {
@ -150,9 +175,12 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
/* preserve EOSP bit */
ack_policy = *p & IEEE80211_QOS_CTL_EOSP;
if (unlikely(sdata->local->wifi_wme_noack_test) ||
is_multicast_ether_addr(hdr->addr1))
if (is_multicast_ether_addr(hdr->addr1) ||
sdata->noack_map & BIT(tid)) {
ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
info->flags |= IEEE80211_TX_CTL_NO_ACK;
}
/* qos header is 2 bytes */
*p++ = ack_policy | tid;
*p = ieee80211_vif_is_mesh(&sdata->vif) ?

View file

@ -15,6 +15,9 @@
extern const int ieee802_1d_to_ac[8];
u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
struct sk_buff *skb,
struct ieee80211_hdr *hdr);
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,

View file

@ -862,44 +862,6 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
kfree_skb(skb);
}
static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
enum nl80211_channel_type oper_ct)
{
switch (wk_ct) {
case NL80211_CHAN_NO_HT:
return true;
case NL80211_CHAN_HT20:
if (oper_ct != NL80211_CHAN_NO_HT)
return true;
return false;
case NL80211_CHAN_HT40MINUS:
case NL80211_CHAN_HT40PLUS:
return (wk_ct == oper_ct);
}
WARN_ON(1); /* shouldn't get here */
return false;
}
static enum nl80211_channel_type
ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
enum nl80211_channel_type oper_ct)
{
switch (wk_ct) {
case NL80211_CHAN_NO_HT:
return oper_ct;
case NL80211_CHAN_HT20:
if (oper_ct != NL80211_CHAN_NO_HT)
return oper_ct;
return wk_ct;
case NL80211_CHAN_HT40MINUS:
case NL80211_CHAN_HT40PLUS:
return wk_ct;
}
WARN_ON(1); /* shouldn't get here */
return wk_ct;
}
static void ieee80211_work_timer(unsigned long data)
{
struct ieee80211_local *local = (void *) data;
@ -950,40 +912,12 @@ static void ieee80211_work_work(struct work_struct *work)
}
if (!started && !local->tmp_channel) {
bool on_oper_chan, on_oper_chan2;
enum nl80211_channel_type wk_ct;
on_oper_chan = ieee80211_cfg_on_oper_channel(local);
/* Work with existing channel type if possible. */
wk_ct = wk->chan_type;
if (wk->chan == local->hw.conf.channel)
wk_ct = ieee80211_calc_ct(wk->chan_type,
local->hw.conf.channel_type);
ieee80211_offchannel_stop_vifs(local, true);
local->tmp_channel = wk->chan;
local->tmp_channel_type = wk_ct;
/*
* Leave the station vifs in awake mode if they
* happen to be on the same channel as
* the requested channel.
*/
on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
if (on_oper_chan != on_oper_chan2) {
if (on_oper_chan2) {
/* going off oper channel, PS too */
ieee80211_offchannel_stop_vifs(local,
true);
ieee80211_hw_config(local, 0);
} else {
/* going on channel, but leave PS
* off-channel. */
ieee80211_hw_config(local, 0);
ieee80211_offchannel_return(local,
true,
false);
}
}
local->tmp_channel_type = wk->chan_type;
ieee80211_hw_config(local, 0);
started = true;
wk->timeout = jiffies;
@ -1052,34 +986,17 @@ static void ieee80211_work_work(struct work_struct *work)
list_for_each_entry(wk, &local->work_list, list) {
if (!wk->started)
continue;
if (wk->chan != local->tmp_channel)
continue;
if (!ieee80211_work_ct_coexists(wk->chan_type,
local->tmp_channel_type))
if (wk->chan != local->tmp_channel ||
wk->chan_type != local->tmp_channel_type)
continue;
remain_off_channel = true;
}
if (!remain_off_channel && local->tmp_channel) {
local->tmp_channel = NULL;
/* If tmp_channel wasn't operating channel, then
* we need to go back on-channel.
* NOTE: If we can ever be here while scannning,
* or if the hw_config() channel config logic changes,
* then we may need to do a more thorough check to see if
* we still need to do a hardware config. Currently,
* we cannot be here while scanning, however.
*/
if (!ieee80211_cfg_on_oper_channel(local))
ieee80211_hw_config(local, 0);
ieee80211_hw_config(local, 0);
/* At the least, we need to disable offchannel_ps,
* so just go ahead and run the entire offchannel
* return logic here. We *could* skip enabling
* beaconing if we were already on-oper-channel
* as a future optimization.
*/
ieee80211_offchannel_return(local, true, true);
ieee80211_offchannel_return(local, true);
/* give connection some time to breathe */
run_again(local, jiffies + HZ/2);

Some files were not shown because too many files have changed in this diff Show more