Auto-update from upstream
This commit is contained in:
commit
fc464476aa
9 changed files with 72 additions and 43 deletions
|
@ -784,7 +784,7 @@ DVB SUBSYSTEM AND DRIVERS
|
|||
P: LinuxTV.org Project
|
||||
M: linux-dvb-maintainer@linuxtv.org
|
||||
L: linux-dvb@linuxtv.org (subscription required)
|
||||
W: http://linuxtv.org/developer/dvb.xml
|
||||
W: http://linuxtv.org/
|
||||
S: Supported
|
||||
|
||||
EATA-DMA SCSI DRIVER
|
||||
|
|
|
@ -334,7 +334,7 @@ static void __cpuinit tsc_sync_wait(void)
|
|||
{
|
||||
if (notscsync || !cpu_has_tsc)
|
||||
return;
|
||||
sync_tsc(boot_cpu_id);
|
||||
sync_tsc(0);
|
||||
}
|
||||
|
||||
static __init int notscsync_setup(char *s)
|
||||
|
|
|
@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev)
|
|||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
disable_irq(adapter->pdev->irq);
|
||||
e1000_intr(adapter->pdev->irq, netdev, NULL);
|
||||
e1000_clean_tx_irq(adapter);
|
||||
enable_irq(adapter->pdev->irq);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -593,7 +593,7 @@ void w1_search(struct w1_master *dev, w1_slave_found_callback cb)
|
|||
* Return 0 - device(s) present, 1 - no devices present.
|
||||
*/
|
||||
if (w1_reset_bus(dev)) {
|
||||
dev_info(&dev->dev, "No devices present on the wire.\n");
|
||||
dev_dbg(&dev->dev, "No devices present on the wire.\n");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct netpoll;
|
||||
|
@ -26,6 +27,7 @@ struct netpoll {
|
|||
struct netpoll_info {
|
||||
spinlock_t poll_lock;
|
||||
int poll_owner;
|
||||
int tries;
|
||||
int rx_flags;
|
||||
spinlock_t rx_lock;
|
||||
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
||||
|
@ -60,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline void netpoll_poll_lock(struct net_device *dev)
|
||||
static inline void *netpoll_poll_lock(struct net_device *dev)
|
||||
{
|
||||
rcu_read_lock(); /* deal with race on ->npinfo */
|
||||
if (dev->npinfo) {
|
||||
spin_lock(&dev->npinfo->poll_lock);
|
||||
dev->npinfo->poll_owner = smp_processor_id();
|
||||
return dev->npinfo;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void netpoll_poll_unlock(struct net_device *dev)
|
||||
static inline void netpoll_poll_unlock(void *have)
|
||||
{
|
||||
if (dev->npinfo) {
|
||||
dev->npinfo->poll_owner = -1;
|
||||
spin_unlock(&dev->npinfo->poll_lock);
|
||||
struct netpoll_info *npi = have;
|
||||
|
||||
if (npi) {
|
||||
npi->poll_owner = -1;
|
||||
spin_unlock(&npi->poll_lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else
|
||||
#define netpoll_rx(a) 0
|
||||
#define netpoll_poll_lock(a)
|
||||
#define netpoll_poll_lock(a) 0
|
||||
#define netpoll_poll_unlock(a)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ struct sk_buff {
|
|||
nohdr:1;
|
||||
/* 3 bits spare */
|
||||
__u8 pkt_type;
|
||||
__u16 protocol;
|
||||
__be16 protocol;
|
||||
|
||||
void (*destructor)(struct sk_buff *skb);
|
||||
#ifdef CONFIG_NETFILTER
|
||||
|
|
|
@ -1696,7 +1696,8 @@ static void net_rx_action(struct softirq_action *h)
|
|||
struct softnet_data *queue = &__get_cpu_var(softnet_data);
|
||||
unsigned long start_time = jiffies;
|
||||
int budget = netdev_budget;
|
||||
|
||||
void *have;
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
while (!list_empty(&queue->poll_list)) {
|
||||
|
@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h)
|
|||
|
||||
dev = list_entry(queue->poll_list.next,
|
||||
struct net_device, poll_list);
|
||||
netpoll_poll_lock(dev);
|
||||
have = netpoll_poll_lock(dev);
|
||||
|
||||
if (dev->quota <= 0 || dev->poll(dev, &budget)) {
|
||||
netpoll_poll_unlock(dev);
|
||||
netpoll_poll_unlock(have);
|
||||
local_irq_disable();
|
||||
list_del(&dev->poll_list);
|
||||
list_add_tail(&dev->poll_list, &queue->poll_list);
|
||||
|
@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h)
|
|||
else
|
||||
dev->quota = dev->weight;
|
||||
} else {
|
||||
netpoll_poll_unlock(dev);
|
||||
netpoll_poll_unlock(have);
|
||||
dev_put(dev);
|
||||
local_irq_disable();
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#define MAX_UDP_CHUNK 1460
|
||||
#define MAX_SKBS 32
|
||||
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
|
||||
#define MAX_RETRIES 20000
|
||||
|
||||
static DEFINE_SPINLOCK(skb_list_lock);
|
||||
static int nr_skbs;
|
||||
|
@ -248,14 +249,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
|||
int status;
|
||||
struct netpoll_info *npinfo;
|
||||
|
||||
repeat:
|
||||
if(!np || !np->dev || !netif_running(np->dev)) {
|
||||
if (!np || !np->dev || !netif_running(np->dev)) {
|
||||
__kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
/* avoid recursion */
|
||||
npinfo = np->dev->npinfo;
|
||||
|
||||
/* avoid recursion */
|
||||
if (npinfo->poll_owner == smp_processor_id() ||
|
||||
np->dev->xmit_lock_owner == smp_processor_id()) {
|
||||
if (np->drop)
|
||||
|
@ -265,30 +266,37 @@ repeat:
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock(&np->dev->xmit_lock);
|
||||
np->dev->xmit_lock_owner = smp_processor_id();
|
||||
do {
|
||||
npinfo->tries--;
|
||||
spin_lock(&np->dev->xmit_lock);
|
||||
np->dev->xmit_lock_owner = smp_processor_id();
|
||||
|
||||
/*
|
||||
* network drivers do not expect to be called if the queue is
|
||||
* stopped.
|
||||
*/
|
||||
if (netif_queue_stopped(np->dev)) {
|
||||
/*
|
||||
* network drivers do not expect to be called if the queue is
|
||||
* stopped.
|
||||
*/
|
||||
if (netif_queue_stopped(np->dev)) {
|
||||
np->dev->xmit_lock_owner = -1;
|
||||
spin_unlock(&np->dev->xmit_lock);
|
||||
netpoll_poll(np);
|
||||
udelay(50);
|
||||
continue;
|
||||
}
|
||||
|
||||
status = np->dev->hard_start_xmit(skb, np->dev);
|
||||
np->dev->xmit_lock_owner = -1;
|
||||
spin_unlock(&np->dev->xmit_lock);
|
||||
|
||||
netpoll_poll(np);
|
||||
goto repeat;
|
||||
}
|
||||
/* success */
|
||||
if(!status) {
|
||||
npinfo->tries = MAX_RETRIES; /* reset */
|
||||
return;
|
||||
}
|
||||
|
||||
status = np->dev->hard_start_xmit(skb, np->dev);
|
||||
np->dev->xmit_lock_owner = -1;
|
||||
spin_unlock(&np->dev->xmit_lock);
|
||||
|
||||
/* transmit busy */
|
||||
if(status) {
|
||||
/* transmit busy */
|
||||
netpoll_poll(np);
|
||||
goto repeat;
|
||||
}
|
||||
udelay(50);
|
||||
} while (npinfo->tries > 0);
|
||||
}
|
||||
|
||||
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
|
||||
|
@ -349,15 +357,11 @@ static void arp_reply(struct sk_buff *skb)
|
|||
unsigned char *arp_ptr;
|
||||
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
|
||||
u32 sip, tip;
|
||||
unsigned long flags;
|
||||
struct sk_buff *send_skb;
|
||||
struct netpoll *np = NULL;
|
||||
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
|
||||
np = npinfo->rx_np;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
|
||||
if (!np)
|
||||
return;
|
||||
|
||||
|
@ -639,9 +643,11 @@ int netpoll_setup(struct netpoll *np)
|
|||
if (!npinfo)
|
||||
goto release;
|
||||
|
||||
npinfo->rx_flags = 0;
|
||||
npinfo->rx_np = NULL;
|
||||
npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
|
||||
npinfo->poll_owner = -1;
|
||||
npinfo->tries = MAX_RETRIES;
|
||||
npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
|
||||
} else
|
||||
npinfo = ndev->npinfo;
|
||||
|
@ -718,9 +724,16 @@ int netpoll_setup(struct netpoll *np)
|
|||
npinfo->rx_np = np;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
}
|
||||
|
||||
/* fill up the skb queue */
|
||||
refill_skbs();
|
||||
|
||||
/* last thing to do is link it to the net device structure */
|
||||
ndev->npinfo = npinfo;
|
||||
|
||||
/* avoid racing with NAPI reading npinfo */
|
||||
synchronize_rcu();
|
||||
|
||||
return 0;
|
||||
|
||||
release:
|
||||
|
|
|
@ -1370,15 +1370,21 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
if (skb->len > cur_mss) {
|
||||
int old_factor = tcp_skb_pcount(skb);
|
||||
int new_factor;
|
||||
int diff;
|
||||
|
||||
if (tcp_fragment(sk, skb, cur_mss, cur_mss))
|
||||
return -ENOMEM; /* We'll try again later. */
|
||||
|
||||
/* New SKB created, account for it. */
|
||||
new_factor = tcp_skb_pcount(skb);
|
||||
tp->packets_out -= old_factor - new_factor;
|
||||
tp->packets_out += tcp_skb_pcount(skb->next);
|
||||
diff = old_factor - tcp_skb_pcount(skb) -
|
||||
tcp_skb_pcount(skb->next);
|
||||
tp->packets_out -= diff;
|
||||
|
||||
if (diff > 0) {
|
||||
tp->fackets_out -= diff;
|
||||
if ((int)tp->fackets_out < 0)
|
||||
tp->fackets_out = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Collapse two adjacent packets if worthwhile and we can. */
|
||||
|
|
Loading…
Reference in a new issue