Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (42 commits)
  virtio_net: Make delayed refill more reliable
  sfc: Use fixed-size buffers for MCDI NVRAM requests
  sfc: Add workspace for GMAC bug workaround to MCDI MAC_STATS buffer
  tcp_probe: avoid modulus operation and wrap fix
  qlge: Only free resources if they were allocated
  netns xfrm: deal with dst entries in netns
  sky2: revert config space change
  vlan: fix vlan_skb_recv()
  netns xfrm: fix "ip xfrm state|policy count" misreport
  sky2: Enable/disable WOL per hardware device
  net: Fix IPv6 GSO type checks in Intel ethernet drivers
  igb/igbvf: cleanup exception handling in tx_map_adv
  MAINTAINERS: Add Intel igbvf maintainer
  e1000/e1000e: don't use small hardware rx buffers
  fmvj18x_cs: add new id (Panasonic lan & modem card)
  be2net: swap only first 2 fields of mcc_wrb
  Please add support for Microsoft MN-120 PCMCIA network card
  be2net: fix bug in rx page posting
  wimax/i2400m: Add support for more i6x50 SKUs
  e1000e: enhance frame fragment detection
  ...
This commit is contained in:
Linus Torvalds 2010-01-25 18:57:07 -08:00
commit e2197787ef
60 changed files with 389 additions and 193 deletions

View file

@ -987,7 +987,6 @@ F: drivers/platform/x86/asus-laptop.c
ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
M: Dan Williams <dan.j.williams@intel.com>
M: Maciej Sosnowski <maciej.sosnowski@intel.com>
W: http://sourceforge.net/projects/xscaleiop
S: Supported
F: Documentation/crypto/async-tx-api.txt
@ -1823,7 +1822,6 @@ S: Supported
F: fs/dlm/
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Maciej Sosnowski <maciej.sosnowski@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
S: Supported
F: drivers/dma/
@ -2786,7 +2784,7 @@ F: arch/x86/kernel/microcode_core.c
F: arch/x86/kernel/microcode_intel.c
INTEL I/OAT DMA DRIVER
M: Maciej Sosnowski <maciej.sosnowski@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
S: Supported
F: drivers/dma/ioat*
@ -2824,10 +2822,11 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ixp2000/
INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe)
INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe)
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
M: Jesse Brandeburg <jesse.brandeburg@intel.com>
M: Bruce Allan <bruce.w.allan@intel.com>
M: Alex Duyck <alexander.h.duyck@intel.com>
M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
M: John Ronciak <john.ronciak@intel.com>
L: e1000-devel@lists.sourceforge.net
@ -2837,6 +2836,7 @@ F: drivers/net/e100.c
F: drivers/net/e1000/
F: drivers/net/e1000e/
F: drivers/net/igb/
F: drivers/net/igbvf/
F: drivers/net/ixgb/
F: drivers/net/ixgbe/

View file

@ -286,7 +286,7 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
MCC_WRB_SGE_CNT_SHIFT;
wrb->payload_length = payload_len;
wrb->tag0 = opcode;
be_dws_cpu_to_le(wrb, 20);
be_dws_cpu_to_le(wrb, 8);
}
/* Don't touch the hdr after it's prepared */

View file

@ -910,7 +910,7 @@ static inline struct page *be_alloc_pages(u32 size)
static void be_post_rx_frags(struct be_adapter *adapter)
{
struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
struct be_rx_page_info *page_info = NULL;
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct page *pagep = NULL;
struct be_eth_rx_d *rxd;
@ -941,7 +941,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
rxd = queue_head_node(rxq);
rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
queue_head_inc(rxq);
/* Any space left in the current big page for another frag? */
if ((page_offset + rx_frag_size + rx_frag_size) >
@ -949,10 +948,13 @@ static void be_post_rx_frags(struct be_adapter *adapter)
pagep = NULL;
page_info->last_page_user = true;
}
prev_page_info = page_info;
queue_head_inc(rxq);
page_info = &page_info_tbl[rxq->head];
}
if (pagep)
page_info->last_page_user = true;
prev_page_info->last_page_user = true;
if (posted) {
atomic_add(posted, &rxq->used);

View file

@ -33,6 +33,7 @@
#include <asm/dma.h>
#include <linux/dma-mapping.h>
#include <asm/dpmc.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
#include <asm/portmux.h>
@ -386,8 +387,8 @@ static int mii_probe(struct net_device *dev)
u32 sclk, mdc_div;
/* Enable PHY output early */
if (!(bfin_read_VR_CTL() & PHYCLKOE))
bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
if (!(bfin_read_VR_CTL() & CLKBUFOE))
bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
sclk = get_sclk();
mdc_div = ((sclk / MDC_CLK) / 2) - 1;

View file

@ -326,6 +326,8 @@ struct e1000_adapter {
/* for ioport free */
int bars;
int need_ioport;
bool discarding;
};
enum e1000_state_t {

View file

@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
case E1000_RXBUFFER_256:
rctl |= E1000_RCTL_SZ_256;
rctl &= ~E1000_RCTL_BSEX;
break;
case E1000_RXBUFFER_512:
rctl |= E1000_RCTL_SZ_512;
rctl &= ~E1000_RCTL_BSEX;
break;
case E1000_RXBUFFER_1024:
rctl |= E1000_RCTL_SZ_1024;
rctl &= ~E1000_RCTL_BSEX;
break;
case E1000_RXBUFFER_2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@ -2802,13 +2790,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
count--;
while (count >= 0) {
if (count)
count--;
i--;
if (i < 0)
while (count--) {
if (i==0)
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
}
@ -3176,13 +3164,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* however with the new *_jumbo_rx* routines, jumbo receives will use
* fragmented skbs */
if (max_frame <= E1000_RXBUFFER_256)
adapter->rx_buffer_len = E1000_RXBUFFER_256;
else if (max_frame <= E1000_RXBUFFER_512)
adapter->rx_buffer_len = E1000_RXBUFFER_512;
else if (max_frame <= E1000_RXBUFFER_1024)
adapter->rx_buffer_len = E1000_RXBUFFER_1024;
else if (max_frame <= E1000_RXBUFFER_2048)
if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
else
#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
@ -3850,13 +3832,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
* packet, also make sure the frame isn't just CRC only */
if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
* packet, if thats the case we need to toss it. In fact, we
* to toss every packet with the EOP bit clear and the next
* frame that _does_ have the EOP bit set, as it is by
* definition only a frame fragment
*/
if (unlikely(!(status & E1000_RXD_STAT_EOP)))
adapter->discarding = true;
if (adapter->discarding) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
/* recycle */
buffer_info->skb = skb;
if (status & E1000_RXD_STAT_EOP)
adapter->discarding = false;
goto next_desc;
}

View file

@ -421,6 +421,7 @@ struct e1000_info {
/* CRC Stripping defines */
#define FLAG2_CRC_STRIPPING (1 << 0)
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
#define FLAG2_IS_DISCARDING (1 << 2)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))

View file

@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
* packet, also make sure the frame isn't just CRC only */
if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
/*
* !EOP means multiple descriptors were used to store a single
* packet, if that's the case we need to toss it. In fact, we
* need to toss every packet with the EOP bit clear and the
* next frame that _does_ have the EOP bit set, as it is by
* definition only a frame fragment
*/
if (unlikely(!(status & E1000_RXD_STAT_EOP)))
adapter->flags2 |= FLAG2_IS_DISCARDING;
if (adapter->flags2 & FLAG2_IS_DISCARDING) {
/* All receives must fit into a single buffer */
e_dbg("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
if (status & E1000_RXD_STAT_EOP)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
PCI_DMA_FROMDEVICE);
buffer_info->dma = 0;
if (!(staterr & E1000_RXD_STAT_EOP)) {
/* see !EOP comment in other rx routine */
if (!(staterr & E1000_RXD_STAT_EOP))
adapter->flags2 |= FLAG2_IS_DISCARDING;
if (adapter->flags2 & FLAG2_IS_DISCARDING) {
e_dbg("Packet Split buffers didn't pick up the full "
"packet\n");
dev_kfree_skb_irq(skb);
if (staterr & E1000_RXD_STAT_EOP)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
writel(0, adapter->hw.hw_addr + rx_ring->head);
writel(0, adapter->hw.hw_addr + rx_ring->tail);
@ -2333,18 +2350,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
case 256:
rctl |= E1000_RCTL_SZ_256;
rctl &= ~E1000_RCTL_BSEX;
break;
case 512:
rctl |= E1000_RCTL_SZ_512;
rctl &= ~E1000_RCTL_BSEX;
break;
case 1024:
rctl |= E1000_RCTL_SZ_1024;
rctl &= ~E1000_RCTL_BSEX;
break;
case 2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@ -3781,7 +3786,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
0, IPPROTO_TCP, 0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@ -3962,13 +3967,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
count--;
while (count >= 0) {
if (count)
count--;
i--;
if (i < 0)
while (count--) {
if (i==0)
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
e1000_put_txbuf(adapter, buffer_info);;
}
@ -4317,13 +4322,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* fragmented skbs
*/
if (max_frame <= 256)
adapter->rx_buffer_len = 256;
else if (max_frame <= 512)
adapter->rx_buffer_len = 512;
else if (max_frame <= 1024)
adapter->rx_buffer_len = 1024;
else if (max_frame <= 2048)
if (max_frame <= 2048)
adapter->rx_buffer_len = 2048;
else
adapter->rx_buffer_len = 4096;

View file

@ -3422,7 +3422,7 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
iph->daddr, 0,
IPPROTO_TCP,
0);
} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@ -3584,6 +3584,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
count++;
i++;
if (i == tx_ring->count)
i = 0;
@ -3605,7 +3606,6 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
if (pci_dma_mapping_error(pdev, buffer_info->dma))
goto dma_error;
count++;
}
tx_ring->buffer_info[i].skb = skb;

View file

@ -1963,7 +1963,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@ -2126,6 +2126,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
count++;
i++;
if (i == tx_ring->count)
i = 0;
@ -2146,7 +2147,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
goto dma_error;
count++;
}
tx_ring->buffer_info[i].skb = skb;
@ -2163,14 +2163,14 @@ dma_error:
buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
count--;
if (count)
count--;
/* clear timestamp and dma mappings for remaining portion of packet */
while (count >= 0) {
count--;
i--;
if (i < 0)
while (count--) {
if (i==0)
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
igbvf_put_txbuf(adapter, buffer_info);
}

View file

@ -1363,13 +1363,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
count--;
while (count >= 0) {
if (count)
count--;
i--;
if (i < 0)
while (count--) {
if (i==0)
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
}

View file

@ -4928,7 +4928,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@ -5167,14 +5167,14 @@ dma_error:
tx_buffer_info->dma = 0;
tx_buffer_info->time_stamp = 0;
tx_buffer_info->next_to_watch = 0;
count--;
if (count)
count--;
/* clear timestamp and dma mappings for remaining portion of packet */
while (count >= 0) {
count--;
i--;
if (i < 0)
while (count--) {
if (i==0)
i += tx_ring->count;
i--;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
}

View file

@ -717,6 +717,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
PCMCIA_DEVICE_NULL,

View file

@ -410,7 +410,6 @@ EXPORT_SYMBOL(phy_start_aneg);
static void phy_change(struct work_struct *work);
static void phy_state_machine(struct work_struct *work);
/**
* phy_start_machine - start PHY state machine tracking
@ -430,7 +429,6 @@ void phy_start_machine(struct phy_device *phydev,
{
phydev->adjust_state = handler;
INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine);
schedule_delayed_work(&phydev->state_queue, HZ);
}
@ -761,7 +759,7 @@ EXPORT_SYMBOL(phy_start);
* phy_state_machine - Handle the state machine
* @work: work_struct that describes the work to be done
*/
static void phy_state_machine(struct work_struct *work)
void phy_state_machine(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =

View file

@ -177,6 +177,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
dev->state = PHY_DOWN;
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
return dev;
}

View file

@ -4119,7 +4119,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
err = pcie_set_readrq(pdev, 4096);
if (err) {
dev_err(&pdev->dev, "Set readrq failed.\n");
goto err_out;
goto err_out1;
}
err = pci_request_regions(pdev, DRV_NAME);
@ -4140,7 +4140,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration.\n");
goto err_out;
goto err_out2;
}
/* Set PCIe reset type for EEH to fundamental. */
@ -4152,7 +4152,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
err = -ENOMEM;
goto err_out;
goto err_out2;
}
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
@ -4162,14 +4162,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
err = -ENOMEM;
goto err_out;
goto err_out2;
}
err = ql_get_board_info(qdev);
if (err) {
dev_err(&pdev->dev, "Register access failed.\n");
err = -EIO;
goto err_out;
goto err_out2;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
spin_lock_init(&qdev->hw_lock);
@ -4179,7 +4179,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
err = qdev->nic_ops->get_flash(qdev);
if (err) {
dev_err(&pdev->dev, "Invalid FLASH.\n");
goto err_out;
goto err_out2;
}
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
@ -4212,8 +4212,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
DRV_NAME, DRV_VERSION);
}
return 0;
err_out:
err_out2:
ql_release_all(pdev);
err_out1:
pci_disable_device(pdev);
return err;
}

View file

@ -3421,7 +3421,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
break;
}
} else {
if (!(val64 & busy_bit)) {
if (val64 & busy_bit) {
ret = SUCCESS;
break;
}

View file

@ -804,7 +804,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)];
u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
size_t outlen;
int rc;
@ -828,7 +828,7 @@ fail:
int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer, size_t length)
{
u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)];
u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
@ -838,7 +838,8 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf),
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
if (rc)
goto fail;

View file

@ -111,6 +111,7 @@ extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer,
size_t length);
#define EFX_MCDI_NVRAM_LEN_MAX 128
extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
loff_t offset, size_t length);
extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,

View file

@ -1090,8 +1090,10 @@
#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
#define MC_CMD_MAC_RX_MATCH_FAULT 59
#define MC_CMD_GMAC_DMABUF_START 64
#define MC_CMD_GMAC_DMABUF_END 95
/* Insert new members here. */
#define MC_CMD_MAC_GENERATION_END 60
#define MC_CMD_MAC_GENERATION_END 96
#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
/* MC_CMD_MAC_STATS:

View file

@ -23,7 +23,6 @@
#include "mcdi_pcol.h"
#define EFX_SPI_VERIFY_BUF_LEN 16
#define EFX_MCDI_CHUNK_LEN 128
struct efx_mtd_partition {
struct mtd_info mtd;
@ -428,7 +427,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
int rc = 0;
while (offset < end) {
chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
@ -491,7 +490,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
}
while (offset < end) {
chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)

View file

@ -318,12 +318,6 @@ static int qt202x_reset_phy(struct efx_nic *efx)
/* Wait 250ms for the PHY to complete bootup */
msleep(250);
/* Check that all the MMDs we expect are present and responding. We
* expect faults on some if the link is down, but not on the PHY XS */
rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
if (rc < 0)
goto fail;
falcon_board(efx)->type->init_phy(efx);
return rc;

View file

@ -644,6 +644,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
{
u32 reg1;
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~phy_power[port];
@ -651,6 +652,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
sky2_pci_read32(hw, PCI_DEV_REG1);
if (hw->chip_id == CHIP_ID_YUKON_FE)
@ -707,9 +709,11 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
}
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
/* Force a renegotiation */
@ -2149,7 +2153,9 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
/* reset PHY Link Detect */
phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
sky2_link_up(sky2);
}
@ -2640,6 +2646,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err;
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit())
dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
@ -2647,12 +2654,14 @@ static void sky2_hw_intr(struct sky2_hw *hw)
sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_IS_PCI_EXP) {
/* PCI-Express uncorrectable Error occurred */
u32 err;
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
0xfffffffful);
@ -2660,6 +2669,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_HWE_L1_MASK)
@ -3038,6 +3048,7 @@ static void sky2_reset(struct sky2_hw *hw)
}
sky2_power_on(hw);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
for (i = 0; i < hw->ports; i++) {
sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@ -3074,6 +3085,7 @@ static void sky2_reset(struct sky2_hw *hw)
reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
/* reset PHY Link Detect */
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
sky2_pci_write16(hw, PSM_CONFIG_REG4,
reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
@ -3091,6 +3103,7 @@ static void sky2_reset(struct sky2_hw *hw)
/* restore the PCIe Link Control register */
sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
}
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
@ -3228,6 +3241,27 @@ static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
}
static void sky2_hw_set_wol(struct sky2_hw *hw)
{
int wol = 0;
int i;
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
struct sky2_port *sky2 = netdev_priv(dev);
if (sky2->wol)
wol = 1;
}
if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
hw->chip_id == CHIP_ID_YUKON_EX ||
hw->chip_id == CHIP_ID_YUKON_FE_P)
sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
device_set_wakeup_enable(&hw->pdev->dev, wol);
}
static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
const struct sky2_port *sky2 = netdev_priv(dev);
@ -3247,13 +3281,7 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
sky2->wol = wol->wolopts;
if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
hw->chip_id == CHIP_ID_YUKON_EX ||
hw->chip_id == CHIP_ID_YUKON_FE_P)
sky2_write32(hw, B0_CTST, sky2->wol
? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
device_set_wakeup_enable(&hw->pdev->dev, sky2->wol);
sky2_hw_set_wol(hw);
if (!netif_running(dev))
sky2_wol_init(sky2);

View file

@ -249,6 +249,7 @@ static struct pci_device_id tulip_pci_tbl[] = {
{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
{ 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ } /* terminate list */
};

View file

@ -3279,13 +3279,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
if (bd == ugeth->txBd[txQ]) /* queue empty? */
skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
if (!skb)
break;
dev->stats.tx_packets++;
skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
skb_recycle_check(skb,
ugeth->ug_info->uf_info.max_rx_buf_length +

View file

@ -395,8 +395,7 @@ static void refill_work(struct work_struct *work)
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
try_fill_recv(vi, GFP_KERNEL);
still_empty = (vi->num == 0);
still_empty = !try_fill_recv(vi, GFP_KERNEL);
napi_enable(&vi->napi);
/* In theory, this can happen: if we don't get any buffers in

View file

@ -151,6 +151,7 @@ enum {
/* Device IDs */
USB_DEVICE_ID_I6050 = 0x0186,
USB_DEVICE_ID_I6050_2 = 0x0188,
};
@ -234,6 +235,7 @@ struct i2400mu {
u8 rx_size_auto_shrink;
struct dentry *debugfs_dentry;
unsigned i6050:1; /* 1 if this is a 6050 based SKU */
};

View file

@ -478,7 +478,16 @@ int i2400mu_probe(struct usb_interface *iface,
i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
i2400m->bus_bm_mac_addr_impaired = 0;
if (id->idProduct == USB_DEVICE_ID_I6050) {
switch (id->idProduct) {
case USB_DEVICE_ID_I6050:
case USB_DEVICE_ID_I6050_2:
i2400mu->i6050 = 1;
break;
default:
break;
}
if (i2400mu->i6050) {
i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
i2400mu->endpoint_cfg.bulk_out = 0;
i2400mu->endpoint_cfg.notification = 3;
@ -719,6 +728,7 @@ int i2400mu_post_reset(struct usb_interface *iface)
static
struct usb_device_id i2400mu_id_table[] = {
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
{ USB_DEVICE(0x8086, 0x0181) },
{ USB_DEVICE(0x8086, 0x1403) },
{ USB_DEVICE(0x8086, 0x1405) },

View file

@ -1598,6 +1598,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
@ -1622,6 +1623,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
};
@ -1667,6 +1669,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
@ -1691,6 +1694,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
@ -1715,6 +1719,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};

View file

@ -1,3 +1,29 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/module.h>
/* sparse doesn't like tracepoint macros */

View file

@ -1,3 +1,29 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
#define __IWLWIFI_DEVICE_TRACE

View file

@ -973,6 +973,10 @@ int iwm_send_pmkid_update(struct iwm_priv *iwm,
memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE;
update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) -
sizeof(struct iwm_umac_wifi_if));
update.command = cpu_to_le32(command);
if (pmksa->bssid)
memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);

View file

@ -463,6 +463,7 @@ struct iwm_umac_cmd_stop_resume_tx {
#define IWM_CMD_PMKID_FLUSH 3
struct iwm_umac_pmkid_update {
struct iwm_umac_wifi_if hdr;
__le32 command;
u8 bssid[ETH_ALEN];
__le16 reserved;

View file

@ -197,6 +197,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
i %= ring_limit;
continue;
}
if (unlikely(len > priv->common.rx_mtu)) {
if (net_ratelimit())
dev_err(&priv->pdev->dev, "rx'd frame size "
"exceeds length threshold.\n");
len = priv->common.rx_mtu;
}
skb_put(skb, len);
if (p54_rx(dev, skb)) {

View file

@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
{ USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },

View file

@ -758,6 +758,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),

View file

@ -485,6 +485,7 @@ void phy_driver_unregister(struct phy_driver *drv);
int phy_driver_register(struct phy_driver *new_driver);
void phy_prepare_link(struct phy_device *phydev,
void (*adjust_link)(struct net_device *));
void phy_state_machine(struct work_struct *work);
void phy_start_machine(struct phy_device *phydev,
void (*handler)(struct net_device *));
void phy_stop_machine(struct phy_device *phydev);

View file

@ -5,6 +5,7 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/xfrm.h>
#include <net/dst_ops.h>
struct ctl_table_header;
@ -42,6 +43,11 @@ struct netns_xfrm {
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
struct dst_ops xfrm4_dst_ops;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct dst_ops xfrm6_dst_ops;
#endif
struct sock *nlsk;
struct sock *nlsk_stash;

View file

@ -132,6 +132,8 @@ static __inline__ void nr_node_put(struct nr_node *nr_node)
static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh)
{
if (atomic_dec_and_test(&nr_neigh->refcount)) {
if (nr_neigh->ax25)
ax25_cb_put(nr_neigh->ax25);
kfree(nr_neigh->digipeat);
kfree(nr_neigh);
}

View file

@ -1367,8 +1367,8 @@ struct xfrmk_spdinfo {
extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq);
extern int xfrm_state_delete(struct xfrm_state *x);
extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si);
extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si);
extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
extern int xfrm_replay_check(struct xfrm_state *x,
struct sk_buff *skb, __be32 seq);
extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);

View file

@ -163,7 +163,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
goto err_unlock;
}
rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats,
rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
smp_processor_id());
rx_stats->rx_packets++;
rx_stats->rx_bytes += skb->len;

View file

@ -819,7 +819,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
ma = &ifa->address;
else { /* We need to make a copy of the entry. */
da.s_node = sa.s_node;
da.s_net = da.s_net;
da.s_net = sa.s_net;
ma = &da;
}

View file

@ -92,6 +92,12 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2
#endif
}
/*
* There is one ref for the state machine; a caller needs
* one more to put it back, just like with the existing one.
*/
ax25_cb_hold(ax25);
ax25_cb_add(ax25);
ax25->state = AX25_STATE_1;

View file

@ -77,34 +77,24 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
return err;
}
static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
{
struct kmem_cache *slab;
char slab_name_fmt[32], *slab_name;
va_list args;
va_start(args, fmt);
vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
va_end(args);
slab_name = kstrdup(slab_name_fmt, GFP_KERNEL);
if (slab_name == NULL)
return NULL;
slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (slab == NULL)
kfree(slab_name);
return slab;
}
static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
{
if (slab != NULL) {
const char *name = kmem_cache_name(slab);
if (slab != NULL)
kmem_cache_destroy(slab);
kfree(name);
}
}
static int ccid_activate(struct ccid_operations *ccid_ops)
@ -113,6 +103,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
ccid_ops->ccid_hc_rx_slab =
ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
ccid_ops->ccid_hc_rx_slab_name,
"ccid%u_hc_rx_sock",
ccid_ops->ccid_id);
if (ccid_ops->ccid_hc_rx_slab == NULL)
@ -120,6 +111,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
ccid_ops->ccid_hc_tx_slab =
ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
ccid_ops->ccid_hc_tx_slab_name,
"ccid%u_hc_tx_sock",
ccid_ops->ccid_id);
if (ccid_ops->ccid_hc_tx_slab == NULL)

View file

@ -49,6 +49,8 @@ struct ccid_operations {
const char *ccid_name;
struct kmem_cache *ccid_hc_rx_slab,
*ccid_hc_tx_slab;
char ccid_hc_rx_slab_name[32];
char ccid_hc_tx_slab_name[32];
__u32 ccid_hc_rx_obj_size,
ccid_hc_tx_obj_size;
/* Interface Routines */

View file

@ -161,7 +161,8 @@ static __init int dccpprobe_init(void)
if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
goto err0;
ret = register_jprobe(&dccp_send_probe);
ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0),
"dccp");
if (ret)
goto err1;

View file

@ -368,7 +368,7 @@ static int inet_diag_bc_run(const void *bc, int len,
yes = entry->sport >= op[1].no;
break;
case INET_DIAG_BC_S_LE:
yes = entry->dport <= op[1].no;
yes = entry->sport <= op[1].no;
break;
case INET_DIAG_BC_D_GE:
yes = entry->dport >= op[1].no;

View file

@ -586,7 +586,9 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
remove_proc_entry("rt_cache", net->proc_net_stat);
remove_proc_entry("rt_cache", net->proc_net);
#ifdef CONFIG_NET_CLS_ROUTE
remove_proc_entry("rt_acct", net->proc_net);
#endif
}
static struct pernet_operations ip_rt_proc_ops __net_initdata = {

View file

@ -39,9 +39,9 @@ static int port __read_mostly = 0;
MODULE_PARM_DESC(port, "Port to match (0=all)");
module_param(port, int, 0);
static int bufsize __read_mostly = 4096;
static unsigned int bufsize __read_mostly = 4096;
MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
module_param(bufsize, int, 0);
module_param(bufsize, uint, 0);
static int full __read_mostly;
MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
@ -75,12 +75,12 @@ static struct {
static inline int tcp_probe_used(void)
{
return (tcp_probe.head - tcp_probe.tail) % bufsize;
return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1);
}
static inline int tcp_probe_avail(void)
{
return bufsize - tcp_probe_used();
return bufsize - tcp_probe_used() - 1;
}
/*
@ -116,7 +116,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
p->ssthresh = tcp_current_ssthresh(sk);
p->srtt = tp->srtt >> 3;
tcp_probe.head = (tcp_probe.head + 1) % bufsize;
tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
}
tcp_probe.lastcwnd = tp->snd_cwnd;
spin_unlock(&tcp_probe.lock);
@ -149,7 +149,7 @@ static int tcpprobe_open(struct inode * inode, struct file * file)
static int tcpprobe_sprint(char *tbuf, int n)
{
const struct tcp_log *p
= tcp_probe.log + tcp_probe.tail % bufsize;
= tcp_probe.log + tcp_probe.tail;
struct timespec tv
= ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
@ -192,7 +192,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
width = tcpprobe_sprint(tbuf, sizeof(tbuf));
if (cnt + width < len)
tcp_probe.tail = (tcp_probe.tail + 1) % bufsize;
tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1);
spin_unlock_bh(&tcp_probe.lock);
@ -222,9 +222,10 @@ static __init int tcpprobe_init(void)
init_waitqueue_head(&tcp_probe.wait);
spin_lock_init(&tcp_probe.lock);
if (bufsize < 0)
if (bufsize == 0)
return -EINVAL;
bufsize = roundup_pow_of_two(bufsize);
tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
if (!tcp_probe.log)
goto err0;
@ -236,7 +237,7 @@ static __init int tcpprobe_init(void)
if (ret)
goto err1;
pr_info("TCP probe registered (port=%d)\n", port);
pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize);
return 0;
err1:
proc_net_remove(&init_net, procname);

View file

@ -15,7 +15,6 @@
#include <net/xfrm.h>
#include <net/ip.h>
static struct dst_ops xfrm4_dst_ops;
static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
@ -190,8 +189,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
static inline int xfrm4_garbage_collect(struct dst_ops *ops)
{
xfrm4_policy_afinfo.garbage_collect(&init_net);
return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2);
struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
xfrm4_policy_afinfo.garbage_collect(net);
return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
}
static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
@ -268,7 +269,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
static struct ctl_table xfrm4_policy_table[] = {
{
.procname = "xfrm4_gc_thresh",
.data = &xfrm4_dst_ops.gc_thresh,
.data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
@ -295,8 +296,6 @@ static void __exit xfrm4_policy_fini(void)
void __init xfrm4_init(int rt_max_size)
{
xfrm4_state_init();
xfrm4_policy_init();
/*
* Select a default value for the gc_thresh based on the main route
* table hash size. It seems to me the worst case scenario is when
@ -308,6 +307,9 @@ void __init xfrm4_init(int rt_max_size)
* and start cleaning when were 1/2 full
*/
xfrm4_dst_ops.gc_thresh = rt_max_size/2;
xfrm4_state_init();
xfrm4_policy_init();
#ifdef CONFIG_SYSCTL
sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
xfrm4_policy_table);

View file

@ -24,7 +24,6 @@
#include <net/mip6.h>
#endif
static struct dst_ops xfrm6_dst_ops;
static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
@ -224,8 +223,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
static inline int xfrm6_garbage_collect(struct dst_ops *ops)
{
xfrm6_policy_afinfo.garbage_collect(&init_net);
return (atomic_read(&xfrm6_dst_ops.entries) > xfrm6_dst_ops.gc_thresh*2);
struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
xfrm6_policy_afinfo.garbage_collect(net);
return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
}
static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
@ -310,7 +311,7 @@ static void xfrm6_policy_fini(void)
static struct ctl_table xfrm6_policy_table[] = {
{
.procname = "xfrm6_gc_thresh",
.data = &xfrm6_dst_ops.gc_thresh,
.data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
@ -326,13 +327,6 @@ int __init xfrm6_init(void)
int ret;
unsigned int gc_thresh;
ret = xfrm6_policy_init();
if (ret)
goto out;
ret = xfrm6_state_init();
if (ret)
goto out_policy;
/*
* We need a good default value for the xfrm6 gc threshold.
* In ipv4 we set it to the route hash table size * 8, which
@ -346,6 +340,15 @@ int __init xfrm6_init(void)
*/
gc_thresh = FIB6_TABLE_HASHSZ * 8;
xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh;
ret = xfrm6_policy_init();
if (ret)
goto out;
ret = xfrm6_state_init();
if (ret)
goto out_policy;
#ifdef CONFIG_SYSCTL
sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path,
xfrm6_policy_table);

View file

@ -1331,6 +1331,9 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_conf *conf = &local->hw.conf;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
return -EOPNOTSUPP;

View file

@ -190,7 +190,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
rate_control_pid_normalize(pinfo, sband->n_bitrates);
/* Compute the proportional, integral and derivative errors. */
err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf;
err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT;
err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift;
spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop;

View file

@ -843,12 +843,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
dptr = skb_push(skb, 1);
*dptr = AX25_P_NETROM;
ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev);
if (nr_neigh->ax25 && ax25s) {
/* We were already holding this ax25_cb */
ax25s = nr_neigh->ax25;
nr_neigh->ax25 = ax25_send_frame(skb, 256,
(ax25_address *)dev->dev_addr,
&nr_neigh->callsign,
nr_neigh->digipeat, nr_neigh->dev);
if (ax25s)
ax25_cb_put(ax25s);
}
nr_neigh->ax25 = ax25s;
dev_put(dev);
ret = (nr_neigh->ax25 != NULL);

View file

@ -101,13 +101,17 @@ static void rose_t0timer_expiry(unsigned long param)
static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
{
ax25_address *rose_call;
ax25_cb *ax25s;
if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
rose_call = (ax25_address *)neigh->dev->dev_addr;
else
rose_call = &rose_callsign;
ax25s = neigh->ax25;
neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
if (ax25s)
ax25_cb_put(ax25s);
return (neigh->ax25 != NULL);
}
@ -120,13 +124,17 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
static int rose_link_up(struct rose_neigh *neigh)
{
ax25_address *rose_call;
ax25_cb *ax25s;
if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
rose_call = (ax25_address *)neigh->dev->dev_addr;
else
rose_call = &rose_callsign;
ax25s = neigh->ax25;
neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
if (ax25s)
ax25_cb_put(ax25s);
return (neigh->ax25 != NULL);
}

View file

@ -235,6 +235,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
if ((s = rose_neigh_list) == rose_neigh) {
rose_neigh_list = rose_neigh->next;
if (rose_neigh->ax25)
ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
@ -243,6 +245,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
while (s != NULL && s->next != NULL) {
if (s->next == rose_neigh) {
s->next = rose_neigh->next;
if (rose_neigh->ax25)
ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
@ -812,6 +816,7 @@ void rose_link_failed(ax25_cb *ax25, int reason)
if (rose_neigh != NULL) {
rose_neigh->ax25 = NULL;
ax25_cb_put(ax25);
rose_del_route_by_neigh(rose_neigh);
rose_kill_by_neigh(rose_neigh);

View file

@ -655,6 +655,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
memset(&wrqu, 0, sizeof(wrqu));
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
wdev->wext.connect.ssid_len = 0;
#endif
}

View file

@ -469,16 +469,16 @@ static inline int xfrm_byidx_should_resize(struct net *net, int total)
return 0;
}
void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
{
read_lock_bh(&xfrm_policy_lock);
si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN];
si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT];
si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD];
si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
si->spdhcnt = init_net.xfrm.policy_idx_hmask;
si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
si->spdhcnt = net->xfrm.policy_idx_hmask;
si->spdhmcnt = xfrm_policy_hashmax;
read_unlock_bh(&xfrm_policy_lock);
}
@ -1309,15 +1309,28 @@ static inline int xfrm_get_tos(struct flowi *fl, int family)
return tos;
}
static inline struct xfrm_dst *xfrm_alloc_dst(int family)
static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
{
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
struct dst_ops *dst_ops;
struct xfrm_dst *xdst;
if (!afinfo)
return ERR_PTR(-EINVAL);
xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS);
switch (family) {
case AF_INET:
dst_ops = &net->xfrm.xfrm4_dst_ops;
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
dst_ops = &net->xfrm.xfrm6_dst_ops;
break;
#endif
default:
BUG();
}
xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
xfrm_policy_put_afinfo(afinfo);
@ -1366,6 +1379,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
struct flowi *fl,
struct dst_entry *dst)
{
struct net *net = xp_net(policy);
unsigned long now = jiffies;
struct net_device *dev;
struct dst_entry *dst_prev = NULL;
@ -1389,7 +1403,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst_hold(dst);
for (; i < nx; i++) {
struct xfrm_dst *xdst = xfrm_alloc_dst(family);
struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
struct dst_entry *dst1 = &xdst->u.dst;
err = PTR_ERR(xdst);
@ -2279,6 +2293,7 @@ EXPORT_SYMBOL(xfrm_bundle_ok);
int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
{
struct net *net;
int err = 0;
if (unlikely(afinfo == NULL))
return -EINVAL;
@ -2302,6 +2317,27 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
xfrm_policy_afinfo[afinfo->family] = afinfo;
}
write_unlock_bh(&xfrm_policy_afinfo_lock);
rtnl_lock();
for_each_net(net) {
struct dst_ops *xfrm_dst_ops;
switch (afinfo->family) {
case AF_INET:
xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
break;
#endif
default:
BUG();
}
*xfrm_dst_ops = *afinfo->dst_ops;
}
rtnl_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_policy_register_afinfo);
@ -2332,6 +2368,22 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
}
EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
static void __net_init xfrm_dst_ops_init(struct net *net)
{
struct xfrm_policy_afinfo *afinfo;
read_lock_bh(&xfrm_policy_afinfo_lock);
afinfo = xfrm_policy_afinfo[AF_INET];
if (afinfo)
net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
afinfo = xfrm_policy_afinfo[AF_INET6];
if (afinfo)
net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
#endif
read_unlock_bh(&xfrm_policy_afinfo_lock);
}
static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
{
struct xfrm_policy_afinfo *afinfo;
@ -2494,6 +2546,7 @@ static int __net_init xfrm_net_init(struct net *net)
rv = xfrm_policy_init(net);
if (rv < 0)
goto out_policy;
xfrm_dst_ops_init(net);
rv = xfrm_sysctl_init(net);
if (rv < 0)
goto out_sysctl;

View file

@ -641,11 +641,11 @@ out:
}
EXPORT_SYMBOL(xfrm_state_flush);
void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
{
spin_lock_bh(&xfrm_state_lock);
si->sadcnt = init_net.xfrm.state_num;
si->sadhcnt = init_net.xfrm.state_hmask;
si->sadcnt = net->xfrm.state_num;
si->sadhcnt = net->xfrm.state_hmask;
si->sadhmcnt = xfrm_state_hashmax;
spin_unlock_bh(&xfrm_state_lock);
}

View file

@ -781,7 +781,8 @@ static inline size_t xfrm_spdinfo_msgsize(void)
+ nla_total_size(sizeof(struct xfrmu_spdhinfo));
}
static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
static int build_spdinfo(struct sk_buff *skb, struct net *net,
u32 pid, u32 seq, u32 flags)
{
struct xfrmk_spdinfo si;
struct xfrmu_spdinfo spc;
@ -795,7 +796,7 @@ static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
f = nlmsg_data(nlh);
*f = flags;
xfrm_spd_getinfo(&si);
xfrm_spd_getinfo(net, &si);
spc.incnt = si.incnt;
spc.outcnt = si.outcnt;
spc.fwdcnt = si.fwdcnt;
@ -828,7 +829,7 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
if (r_skb == NULL)
return -ENOMEM;
if (build_spdinfo(r_skb, spid, seq, *flags) < 0)
if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
BUG();
return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
@ -841,7 +842,8 @@ static inline size_t xfrm_sadinfo_msgsize(void)
+ nla_total_size(4); /* XFRMA_SAD_CNT */
}
static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
static int build_sadinfo(struct sk_buff *skb, struct net *net,
u32 pid, u32 seq, u32 flags)
{
struct xfrmk_sadinfo si;
struct xfrmu_sadhinfo sh;
@ -854,7 +856,7 @@ static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
f = nlmsg_data(nlh);
*f = flags;
xfrm_sad_getinfo(&si);
xfrm_sad_getinfo(net, &si);
sh.sadhmcnt = si.sadhmcnt;
sh.sadhcnt = si.sadhcnt;
@ -882,7 +884,7 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
if (r_skb == NULL)
return -ENOMEM;
if (build_sadinfo(r_skb, spid, seq, *flags) < 0)
if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
BUG();
return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);