Auto-update from upstream
This commit is contained in:
commit
fc464476aa
9 changed files with 72 additions and 43 deletions
|
@ -784,7 +784,7 @@ DVB SUBSYSTEM AND DRIVERS
|
||||||
P: LinuxTV.org Project
|
P: LinuxTV.org Project
|
||||||
M: linux-dvb-maintainer@linuxtv.org
|
M: linux-dvb-maintainer@linuxtv.org
|
||||||
L: linux-dvb@linuxtv.org (subscription required)
|
L: linux-dvb@linuxtv.org (subscription required)
|
||||||
W: http://linuxtv.org/developer/dvb.xml
|
W: http://linuxtv.org/
|
||||||
S: Supported
|
S: Supported
|
||||||
|
|
||||||
EATA-DMA SCSI DRIVER
|
EATA-DMA SCSI DRIVER
|
||||||
|
|
|
@ -334,7 +334,7 @@ static void __cpuinit tsc_sync_wait(void)
|
||||||
{
|
{
|
||||||
if (notscsync || !cpu_has_tsc)
|
if (notscsync || !cpu_has_tsc)
|
||||||
return;
|
return;
|
||||||
sync_tsc(boot_cpu_id);
|
sync_tsc(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init int notscsync_setup(char *s)
|
static __init int notscsync_setup(char *s)
|
||||||
|
|
|
@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev)
|
||||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||||
disable_irq(adapter->pdev->irq);
|
disable_irq(adapter->pdev->irq);
|
||||||
e1000_intr(adapter->pdev->irq, netdev, NULL);
|
e1000_intr(adapter->pdev->irq, netdev, NULL);
|
||||||
|
e1000_clean_tx_irq(adapter);
|
||||||
enable_irq(adapter->pdev->irq);
|
enable_irq(adapter->pdev->irq);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -593,7 +593,7 @@ void w1_search(struct w1_master *dev, w1_slave_found_callback cb)
|
||||||
* Return 0 - device(s) present, 1 - no devices present.
|
* Return 0 - device(s) present, 1 - no devices present.
|
||||||
*/
|
*/
|
||||||
if (w1_reset_bus(dev)) {
|
if (w1_reset_bus(dev)) {
|
||||||
dev_info(&dev->dev, "No devices present on the wire.\n");
|
dev_dbg(&dev->dev, "No devices present on the wire.\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
|
|
||||||
#include <linux/netdevice.h>
|
#include <linux/netdevice.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
|
||||||
struct netpoll;
|
struct netpoll;
|
||||||
|
@ -26,6 +27,7 @@ struct netpoll {
|
||||||
struct netpoll_info {
|
struct netpoll_info {
|
||||||
spinlock_t poll_lock;
|
spinlock_t poll_lock;
|
||||||
int poll_owner;
|
int poll_owner;
|
||||||
|
int tries;
|
||||||
int rx_flags;
|
int rx_flags;
|
||||||
spinlock_t rx_lock;
|
spinlock_t rx_lock;
|
||||||
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
||||||
|
@ -60,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void netpoll_poll_lock(struct net_device *dev)
|
static inline void *netpoll_poll_lock(struct net_device *dev)
|
||||||
{
|
{
|
||||||
|
rcu_read_lock(); /* deal with race on ->npinfo */
|
||||||
if (dev->npinfo) {
|
if (dev->npinfo) {
|
||||||
spin_lock(&dev->npinfo->poll_lock);
|
spin_lock(&dev->npinfo->poll_lock);
|
||||||
dev->npinfo->poll_owner = smp_processor_id();
|
dev->npinfo->poll_owner = smp_processor_id();
|
||||||
|
return dev->npinfo;
|
||||||
}
|
}
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void netpoll_poll_unlock(struct net_device *dev)
|
static inline void netpoll_poll_unlock(void *have)
|
||||||
{
|
{
|
||||||
if (dev->npinfo) {
|
struct netpoll_info *npi = have;
|
||||||
dev->npinfo->poll_owner = -1;
|
|
||||||
spin_unlock(&dev->npinfo->poll_lock);
|
if (npi) {
|
||||||
|
npi->poll_owner = -1;
|
||||||
|
spin_unlock(&npi->poll_lock);
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define netpoll_rx(a) 0
|
#define netpoll_rx(a) 0
|
||||||
#define netpoll_poll_lock(a)
|
#define netpoll_poll_lock(a) 0
|
||||||
#define netpoll_poll_unlock(a)
|
#define netpoll_poll_unlock(a)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -255,7 +255,7 @@ struct sk_buff {
|
||||||
nohdr:1;
|
nohdr:1;
|
||||||
/* 3 bits spare */
|
/* 3 bits spare */
|
||||||
__u8 pkt_type;
|
__u8 pkt_type;
|
||||||
__u16 protocol;
|
__be16 protocol;
|
||||||
|
|
||||||
void (*destructor)(struct sk_buff *skb);
|
void (*destructor)(struct sk_buff *skb);
|
||||||
#ifdef CONFIG_NETFILTER
|
#ifdef CONFIG_NETFILTER
|
||||||
|
|
|
@ -1696,6 +1696,7 @@ static void net_rx_action(struct softirq_action *h)
|
||||||
struct softnet_data *queue = &__get_cpu_var(softnet_data);
|
struct softnet_data *queue = &__get_cpu_var(softnet_data);
|
||||||
unsigned long start_time = jiffies;
|
unsigned long start_time = jiffies;
|
||||||
int budget = netdev_budget;
|
int budget = netdev_budget;
|
||||||
|
void *have;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
|
@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h)
|
||||||
|
|
||||||
dev = list_entry(queue->poll_list.next,
|
dev = list_entry(queue->poll_list.next,
|
||||||
struct net_device, poll_list);
|
struct net_device, poll_list);
|
||||||
netpoll_poll_lock(dev);
|
have = netpoll_poll_lock(dev);
|
||||||
|
|
||||||
if (dev->quota <= 0 || dev->poll(dev, &budget)) {
|
if (dev->quota <= 0 || dev->poll(dev, &budget)) {
|
||||||
netpoll_poll_unlock(dev);
|
netpoll_poll_unlock(have);
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
list_del(&dev->poll_list);
|
list_del(&dev->poll_list);
|
||||||
list_add_tail(&dev->poll_list, &queue->poll_list);
|
list_add_tail(&dev->poll_list, &queue->poll_list);
|
||||||
|
@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h)
|
||||||
else
|
else
|
||||||
dev->quota = dev->weight;
|
dev->quota = dev->weight;
|
||||||
} else {
|
} else {
|
||||||
netpoll_poll_unlock(dev);
|
netpoll_poll_unlock(have);
|
||||||
dev_put(dev);
|
dev_put(dev);
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
#define MAX_UDP_CHUNK 1460
|
#define MAX_UDP_CHUNK 1460
|
||||||
#define MAX_SKBS 32
|
#define MAX_SKBS 32
|
||||||
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
|
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
|
||||||
|
#define MAX_RETRIES 20000
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(skb_list_lock);
|
static DEFINE_SPINLOCK(skb_list_lock);
|
||||||
static int nr_skbs;
|
static int nr_skbs;
|
||||||
|
@ -248,14 +249,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
||||||
int status;
|
int status;
|
||||||
struct netpoll_info *npinfo;
|
struct netpoll_info *npinfo;
|
||||||
|
|
||||||
repeat:
|
if (!np || !np->dev || !netif_running(np->dev)) {
|
||||||
if(!np || !np->dev || !netif_running(np->dev)) {
|
|
||||||
__kfree_skb(skb);
|
__kfree_skb(skb);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* avoid recursion */
|
|
||||||
npinfo = np->dev->npinfo;
|
npinfo = np->dev->npinfo;
|
||||||
|
|
||||||
|
/* avoid recursion */
|
||||||
if (npinfo->poll_owner == smp_processor_id() ||
|
if (npinfo->poll_owner == smp_processor_id() ||
|
||||||
np->dev->xmit_lock_owner == smp_processor_id()) {
|
np->dev->xmit_lock_owner == smp_processor_id()) {
|
||||||
if (np->drop)
|
if (np->drop)
|
||||||
|
@ -265,30 +266,37 @@ repeat:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&np->dev->xmit_lock);
|
do {
|
||||||
np->dev->xmit_lock_owner = smp_processor_id();
|
npinfo->tries--;
|
||||||
|
spin_lock(&np->dev->xmit_lock);
|
||||||
|
np->dev->xmit_lock_owner = smp_processor_id();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* network drivers do not expect to be called if the queue is
|
* network drivers do not expect to be called if the queue is
|
||||||
* stopped.
|
* stopped.
|
||||||
*/
|
*/
|
||||||
if (netif_queue_stopped(np->dev)) {
|
if (netif_queue_stopped(np->dev)) {
|
||||||
|
np->dev->xmit_lock_owner = -1;
|
||||||
|
spin_unlock(&np->dev->xmit_lock);
|
||||||
|
netpoll_poll(np);
|
||||||
|
udelay(50);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
status = np->dev->hard_start_xmit(skb, np->dev);
|
||||||
np->dev->xmit_lock_owner = -1;
|
np->dev->xmit_lock_owner = -1;
|
||||||
spin_unlock(&np->dev->xmit_lock);
|
spin_unlock(&np->dev->xmit_lock);
|
||||||
|
|
||||||
netpoll_poll(np);
|
/* success */
|
||||||
goto repeat;
|
if(!status) {
|
||||||
}
|
npinfo->tries = MAX_RETRIES; /* reset */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
status = np->dev->hard_start_xmit(skb, np->dev);
|
/* transmit busy */
|
||||||
np->dev->xmit_lock_owner = -1;
|
|
||||||
spin_unlock(&np->dev->xmit_lock);
|
|
||||||
|
|
||||||
/* transmit busy */
|
|
||||||
if(status) {
|
|
||||||
netpoll_poll(np);
|
netpoll_poll(np);
|
||||||
goto repeat;
|
udelay(50);
|
||||||
}
|
} while (npinfo->tries > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
|
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
|
||||||
|
@ -349,15 +357,11 @@ static void arp_reply(struct sk_buff *skb)
|
||||||
unsigned char *arp_ptr;
|
unsigned char *arp_ptr;
|
||||||
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
|
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
|
||||||
u32 sip, tip;
|
u32 sip, tip;
|
||||||
unsigned long flags;
|
|
||||||
struct sk_buff *send_skb;
|
struct sk_buff *send_skb;
|
||||||
struct netpoll *np = NULL;
|
struct netpoll *np = NULL;
|
||||||
|
|
||||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
||||||
if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
|
if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
|
||||||
np = npinfo->rx_np;
|
np = npinfo->rx_np;
|
||||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
||||||
|
|
||||||
if (!np)
|
if (!np)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -639,9 +643,11 @@ int netpoll_setup(struct netpoll *np)
|
||||||
if (!npinfo)
|
if (!npinfo)
|
||||||
goto release;
|
goto release;
|
||||||
|
|
||||||
|
npinfo->rx_flags = 0;
|
||||||
npinfo->rx_np = NULL;
|
npinfo->rx_np = NULL;
|
||||||
npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
|
npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
|
||||||
npinfo->poll_owner = -1;
|
npinfo->poll_owner = -1;
|
||||||
|
npinfo->tries = MAX_RETRIES;
|
||||||
npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
|
npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
|
||||||
} else
|
} else
|
||||||
npinfo = ndev->npinfo;
|
npinfo = ndev->npinfo;
|
||||||
|
@ -718,9 +724,16 @@ int netpoll_setup(struct netpoll *np)
|
||||||
npinfo->rx_np = np;
|
npinfo->rx_np = np;
|
||||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* fill up the skb queue */
|
||||||
|
refill_skbs();
|
||||||
|
|
||||||
/* last thing to do is link it to the net device structure */
|
/* last thing to do is link it to the net device structure */
|
||||||
ndev->npinfo = npinfo;
|
ndev->npinfo = npinfo;
|
||||||
|
|
||||||
|
/* avoid racing with NAPI reading npinfo */
|
||||||
|
synchronize_rcu();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
release:
|
release:
|
||||||
|
|
|
@ -1370,15 +1370,21 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
|
|
||||||
if (skb->len > cur_mss) {
|
if (skb->len > cur_mss) {
|
||||||
int old_factor = tcp_skb_pcount(skb);
|
int old_factor = tcp_skb_pcount(skb);
|
||||||
int new_factor;
|
int diff;
|
||||||
|
|
||||||
if (tcp_fragment(sk, skb, cur_mss, cur_mss))
|
if (tcp_fragment(sk, skb, cur_mss, cur_mss))
|
||||||
return -ENOMEM; /* We'll try again later. */
|
return -ENOMEM; /* We'll try again later. */
|
||||||
|
|
||||||
/* New SKB created, account for it. */
|
/* New SKB created, account for it. */
|
||||||
new_factor = tcp_skb_pcount(skb);
|
diff = old_factor - tcp_skb_pcount(skb) -
|
||||||
tp->packets_out -= old_factor - new_factor;
|
tcp_skb_pcount(skb->next);
|
||||||
tp->packets_out += tcp_skb_pcount(skb->next);
|
tp->packets_out -= diff;
|
||||||
|
|
||||||
|
if (diff > 0) {
|
||||||
|
tp->fackets_out -= diff;
|
||||||
|
if ((int)tp->fackets_out < 0)
|
||||||
|
tp->fackets_out = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Collapse two adjacent packets if worthwhile and we can. */
|
/* Collapse two adjacent packets if worthwhile and we can. */
|
||||||
|
|
Loading…
Reference in a new issue