Merge remote-tracking branch 'sm8250/lineage-20' into lineage-20

* sm8250/lineage-20:
  Linux 4.19.282
  ASN.1: Fix check for strdup() success
  iio: adc: at91-sama5d2_adc: fix an error code in at91_adc_allocate_trigger()
  counter: 104-quad-8: Fix race condition between FLAG and CNTR reads
  sctp: Call inet6_destroy_sock() via sk->sk_destruct().
  dccp: Call inet6_destroy_sock() via sk->sk_destruct().
  inet6: Remove inet6_destroy_sock() in sk->sk_prot->destroy().
  tcp/udp: Call inet6_destroy_sock() in IPv6 sk->sk_destruct().
  udp: Call inet6_destroy_sock() in setsockopt(IPV6_ADDRFORM).
  ext4: fix use-after-free in ext4_xattr_set_entry
  ext4: remove duplicate definition of ext4_xattr_ibody_inline_set()
  Revert "ext4: fix use-after-free in ext4_xattr_set_entry"
  x86/purgatory: Don't generate debug info for purgatory.ro
  memstick: fix memory leak if card device is never registered
  nilfs2: initialize unused bytes in segment summary blocks
  xen/netback: use same error messages for same errors
  s390/ptrace: fix PTRACE_GET_LAST_BREAK error handling
  net: dsa: b53: mmap: add phy ops
  scsi: core: Improve scsi_vpd_inquiry() checks
  scsi: megaraid_sas: Fix fw_crash_buffer_show()
  selftests: sigaltstack: fix -Wuninitialized
  Input: i8042 - add quirk for Fujitsu Lifebook A574/H
  f2fs: Fix f2fs_truncate_partial_nodes ftrace event
  e1000e: Disable TSO on i219-LM card to increase speed
  mlxfw: fix null-ptr-deref in mlxfw_mfa2_tlv_next()
  i40e: fix i40e_setup_misc_vector() error handling
  i40e: fix accessing vsi->active_filters without holding lock
  virtio_net: bugfix overflow inside xdp_linearize_page()
  net: sched: sch_qfq: prevent slab-out-of-bounds in qfq_activate_agg
  ARM: dts: rockchip: fix a typo error for rk3288 spdif node
  Linux 4.19.281
  arm64: KVM: Fix system register enumeration
  KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST
  KVM: arm64: Factor out core register ID enumeration
  KVM: nVMX: add missing consistency checks for CR0 and CR4
  coresight-etm4: Fix for() loop drvdata->nr_addr_cmp range bug
  watchdog: sbsa_wdog: Make sure the timeout programming is within the limits
  cgroup/cpuset: Wake up cpuset_attach_wq tasks in cpuset_cancel_attach()
  ubi: Fix deadlock caused by recursively holding work_sem
  mtd: ubi: wl: Fix a couple of kernel-doc issues
  ubi: Fix failure attaching when vid_hdr offset equals to (sub)page size
  x86/PCI: Add quirk for AMD XHCI controller that loses MSI-X state in D3hot
  scsi: ses: Handle enclosure with just a primary component gracefully
  verify_pefile: relax wrapper length check
  efi: sysfb_efi: Add quirk for Lenovo Yoga Book X91F/L
  i2c: imx-lpi2c: clean rx/tx buffers upon new message
  power: supply: cros_usbpd: reclassify "default case!" as debug
  udp6: fix potential access to stale information
  net: macb: fix a memory corruption in extended buffer descriptor mode
  sctp: fix a potential overflow in sctp_ifwdtsn_skip
  qlcnic: check pci_reset_function result
  niu: Fix missing unwind goto in niu_alloc_channels()
  9p/xen : Fix use after free bug in xen_9pfs_front_remove due to race condition
  mtdblock: tolerate corrected bit-flips
  Bluetooth: Fix race condition in hidp_session_thread
  Bluetooth: L2CAP: Fix use-after-free in l2cap_disconnect_{req,rsp}
  ALSA: hda/sigmatel: fix S/PDIF out on Intel D*45* motherboards
  ALSA: i2c/cs8427: fix iec958 mixer control deactivation
  ALSA: hda/sigmatel: add pin overrides for Intel DP45SG motherboard
  ALSA: emu10k1: fix capture interrupt handler unlinking
  Revert "pinctrl: amd: Disable and mask interrupts on resume"
  mm/swap: fix swap_info_struct race between swapoff and get_swap_pages()
  ring-buffer: Fix race while reader and writer are on the same page
  ftrace: Mark get_lock_parent_ip() __always_inline
  perf/core: Fix the same task check in perf_event_set_output
  ALSA: hda/realtek: Add quirk for Clevo X370SNW
  nilfs2: fix sysfs interface lifetime
  nilfs2: fix potential UAF of struct nilfs_sc_info in nilfs_segctor_thread()
  tty: serial: sh-sci: Fix Rx on RZ/G2L SCI
  tty: serial: sh-sci: Fix transmit end interrupt handler
  iio: dac: cio-dac: Fix max DAC write value check for 12-bit
  USB: serial: option: add Quectel RM500U-CN modem
  USB: serial: option: add Telit FE990 compositions
  USB: serial: cp210x: add Silicon Labs IFS-USB-DATACABLE IDs
  gpio: davinci: Add irq chip flag to skip set wake
  ipv6: Fix an uninit variable access bug in __ip6_make_skb()
  sctp: check send stream number after wait_for_sndbuf
  net: don't let netpoll invoke NAPI if in xmit context
  icmp: guard against too small mtu
  wifi: mac80211: fix invalid drv_sta_pre_rcu_remove calls for non-uploaded sta
  pwm: cros-ec: Explicitly set .polarity in .get_state()
  NFSv4: Fix hangs when recovering open state after a server reboot
  NFSv4: Check the return value of update_open_stateid()
  NFSv4: Convert struct nfs4_state to use refcount_t
  pinctrl: amd: Disable and mask interrupts on resume
  pinctrl: amd: disable and mask interrupts on probe
  pinctrl: amd: Use irqchip template
  pinctrl: Added IRQF_SHARED flag for amd-pinctrl driver
  techpack: audio: Remove build timestamp injection
  Revert "dm thin: fix deadlock when swapping to thin device"
  Linux 4.19.280
  cgroup: Add missing cpus_read_lock() to cgroup_attach_task_all()
  cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock
  cgroup/cpuset: Change cpuset_rwsem and hotplug lock order
  net: sched: cbq: dont intepret cls results when asked to drop
  gfs2: Always check inode size of inline inodes
  firmware: arm_scmi: Fix device node validation for mailbox transport
  ext4: fix kernel BUG in 'ext4_write_inline_data_end()'
  usb: host: ohci-pxa27x: Fix and & vs | typo
  s390/uaccess: add missing earlyclobber annotations to __clear_user()
  drm/etnaviv: fix reference leak when mmaping imported buffer
  ALSA: usb-audio: Fix regression on detection of Roland VS-100
  ALSA: hda/conexant: Partial revert of a quirk for Lenovo
  pinctrl: at91-pio4: fix domain name assignment
  xen/netback: don't do grant copy across page boundary
  cifs: fix DFS traversal oops without CONFIG_CIFS_DFS_UPCALL
  cifs: prevent infinite recursion in CIFSGetDFSRefer()
  Input: focaltech - use explicitly signed char type
  Input: alps - fix compatibility with -funsigned-char
  net: mvneta: make tx buffer array agnostic
  net: dsa: mv88e6xxx: Enable IGMP snooping on user ports only
  i40e: fix registers dump after run ethtool adapter self test
  can: bcm: bcm_tx_setup(): fix KMSAN uninit-value in vfs_write
  scsi: megaraid_sas: Fix crash after a double completion
  ca8210: Fix unsigned mac_len comparison with zero in ca8210_skb_tx()
  fbdev: au1200fb: Fix potential divide by zero
  fbdev: lxfb: Fix potential divide by zero
  fbdev: intelfb: Fix potential divide by zero
  fbdev: nvidia: Fix potential divide by zero
  sched_getaffinity: don't assume 'cpumask_size()' is fully initialized
  fbdev: tgafb: Fix potential divide by zero
  ALSA: hda/ca0132: fixup buffer overrun at tuning_ctl_set()
  ALSA: asihpi: check pao in control_message()
  md: avoid signed overflow in slot_store()
  bus: imx-weim: fix branch condition evaluates to a garbage value
  ocfs2: fix data corruption after failed write
  tun: avoid double free in tun_free_netdev
  sched/fair: Sanitize vruntime of entity being migrated
  sched/fair: sanitize vruntime of entity being placed
  dm crypt: add cond_resched() to dmcrypt_write()
  dm stats: check for and propagate alloc_percpu failure
  i2c: xgene-slimpro: Fix out-of-bounds bug in xgene_slimpro_i2c_xfer()
  nilfs2: fix kernel-infoleak in nilfs_ioctl_wrap_copy()
  usb: chipidea: core: fix possible concurrent when switch role
  usb: chipdea: core: fix return -EINVAL if request role is the same with current role
  dm thin: fix deadlock when swapping to thin device
  igb: revert rtnl_lock() that causes deadlock
  usb: gadget: u_audio: don't let userspace block driver unbind
  scsi: core: Add BLIST_SKIP_VPD_PAGES for SKhynix H28U74301AMR
  cifs: empty interface list when server doesn't support query interfaces
  sh: sanitize the flags on sigreturn
  net: usb: qmi_wwan: add Telit 0x1080 composition
  net: usb: cdc_mbim: avoid altsetting toggling for Telit FE990
  scsi: ufs: core: Add soft dependency on governor_simpleondemand
  scsi: target: iscsi: Fix an error message in iscsi_check_key()
  m68k: Only force 030 bus error if PC not in exception table
  ca8210: fix mac_len negative array access
  riscv: Bump COMMAND_LINE_SIZE value to 1024
  thunderbolt: Use const qualifier for `ring_interrupt_index`
  uas: Add US_FL_NO_REPORT_OPCODES for JMicron JMS583Gen 2
  hwmon (it87): Fix voltage scaling for chips with 10.9mV ADCs
  Bluetooth: btsdio: fix use after free bug in btsdio_remove due to unfinished work
  Bluetooth: btqcomsmd: Fix command timeout after setting BD address
  net: mdio: thunder: Add missing fwnode_handle_put()
  hvc/xen: prevent concurrent accesses to the shared ring
  net/sonic: use dma_mapping_error() for error check
  erspan: do not use skb_mac_header() in ndo_start_xmit()
  atm: idt77252: fix kmemleak when rmmod idt77252
  net/mlx5: Read the TC mapping of all priorities on ETS query
  bpf: Adjust insufficient default bpf_jit_limit
  net/ps3_gelic_net: Use dma_mapping_error
  net/ps3_gelic_net: Fix RX sk_buff length
  net: qcom/emac: Fix use after free bug in emac_remove due to race condition
  xirc2ps_cs: Fix use after free bug in xirc2ps_detach
  qed/qed_sriov: guard against NULL derefs from qed_iov_get_vf_info
  net: usb: smsc95xx: Limit packet length to skb->len
  scsi: scsi_dh_alua: Fix memleak for 'qdata' in alua_activate()
  i2c: imx-lpi2c: check only for enabled interrupt flags
  igbvf: Regard vf reset nack as success
  intel/igbvf: free irq on the error path in igbvf_request_msix()
  iavf: fix inverted Rx hash condition leading to disabled hash
  iavf: diet and reformat
  intel-ethernet: rename i40evf to iavf
  i40evf: Change a VF mac without reloading the VF driver
  power: supply: da9150: Fix use after free bug in da9150_charger_remove due to race condition
  msm: vidc: If QP_value is invalid,assign default_QP
  msm: kgsl: Keep postamble packets in a privileged buffer
  ANDROID: mm/filemap: Fix missing put_page() for speculative page fault
  soc: qcom: qsee_ipc_irq_bridge: Remove redundant cleanup
  fw-api: CL 22203883 - update fw common interface files
  fw-api: CL 22203879 - update fw common interface files
  fw-api: CL 22186584 - update fw common interface files
  fw-api: CL 22164863 - update fw common interface files
  fw-api: CL 22156324 - update fw common interface files
  fw-api: Add HW header files for WCN6450
  fw-api: remove banned words
  fw-api: Make changes to support Big endian
  fw-api: CL 22114305 - update fw common interface files
  fw-api: CL 22096085 - update fw common interface files
  fw-api: CL 22074527 - update fw common interface files
  ANDROID: Re-enable fast mremap and fix UAF with SPF
  ANDROID: mm: fix invalid backport in speculative page fault path
  ANDROID: mm: assert that mmap_lock is taken exclusively in vm_write_begin
  ANDROID: mm: remove sequence counting when mmap_lock is not exclusively owned
  ANDROID: mm/khugepaged: add missing vm_write_{begin|end}
  BACKPORT: FROMLIST: mm: implement speculative handling in filemap_fault()
  fw-api: CL 22046875 - update fw common interface files
  ANDROID: mm: prevent reads of unstable pmd during speculation
  ANDROID: mm: prevent speculative page fault handling for in do_swap_page()
  ANDROID: mm: skip pte_alloc during speculative page fault
  fw-api: CL 22021621 - update fw common interface files
  fw-api: CL 22011590 - update fw common interface files
  fw-api: CL 22011543 - update fw common interface files
  fw-api: CL 21987591 - update fw common interface files
  fw-api: CL 21987565 - update fw common interface files
  fw-api: CL 21882670 - update fw common interface files.
  fw-api: CL 21863023 - update fw common interface files
  fw-api: add REO2SW1_RING_MISC_1 in wcss_seq_hwioumac_reg.h
  fw-api: CL 21817763 - update fw common interface files
  fw-api: CL 21803370 - update fw common interface files
  fw-api: CL 21801844 - update fw common interface files
  fw-api: CL 21775737 - update fw common interface files
  fw-api: CL 21774881 - update fw common interface files
  fw-api: CL 21752010 - update fw common interface files
  fw-api: CL 21737959 - update fw common interface files
  fw-api: CL 21716559 - update fw common interface files
  fw-api: CL 21708534 - update fw common interface files
  fw-api: CL 21708530 - update fw common interface files
  fw-api: CL 21693223 - update fw common interface files
  fw-api: CL 21678453 - update fw common interface files
  fw-api: CL 21675975 - update fw common interface files
  fw-api: CL 21673808 - update fw common interface files
  fw-api: CL 21672613 - update fw common interface files
  fw-api: CL 21666405 - update fw common interface files
  fw-api: CL 21666402 - update fw common interface files
  fw-api: CL 21636648 - update fw common interface files
  fw-api: CL 21636521 - update fw common interface files
  fw-api: CL 21636491 - update fw common interface files
  fw-api: CL 21624235 - update fw common interface files
  fw-api: CL 21624232 - update fw common interface files
  fw-api: CL 21615080 - update fw common interface files
  fw-api: CL 21615063 - update fw common interface files
  fw-api: CL 21614996 - update fw common interface files
  fw-api: CL 21602542 - update fw common interface files
  fw-api: CL 21599461 - update fw common interface files
  fw-api: CL 21557799 - update fw common interface files
  fw-api: CL 21552073 - update fw common interface files
  fw-api: CL 21545735 - update fw common interface files
  fw-api: CL 21541123 - update fw common interface files
  fw-api: CL 21506382 - update fw common interface files
  fw-api: CL 21503143 - update fw common interface files
  fw-api: CL 21482490 - update fw common interface files
  fw-api: CL 21473564 - update fw common interface files
  fw-api: CL 21462084 - update fw common interface files
  fw-api: CL 21416528 - update fw common interface files
  fw-api: CL 21416524 - update fw common interface files
  fw-api: CL 21399770 - update fw common interface files
  fw-api: CL 21399742 - update fw common interface files
  fw-api: CL 21398997 - update fw common interface files
  fw-api: CL 21373891 - update fw common interface files
  fw-api: CL 21373889 - update fw common interface files
  fw-api: CL 21355920 - update fw common interface files
  serial: msm_geni_serial: Avoid UAF memory access in exit path
  mfd: qcom-spmi-pmic: Add remove API

 Conflicts:
	techpack/audio/asoc/codecs/Kbuild

Change-Id: Ib8d500530c7ea9358374a379624b06717ae684c3
This commit is contained in:
Michael Bestas 2023-05-06 08:12:09 +03:00
commit 4e6188b37b
No known key found for this signature in database
GPG key ID: CC95044519BE6669
341 changed files with 24297 additions and 6129 deletions

View file

@ -94,8 +94,8 @@ gianfar.txt
- Gianfar Ethernet Driver.
i40e.txt
- README for the Intel Ethernet Controller XL710 Driver (i40e).
i40evf.txt
- Short note on the Driver for the Intel(R) XL710 X710 Virtual Function
iavf.txt
- README for the Intel Ethernet Adaptive Virtual Function Driver (iavf).
ieee802154.txt
- Linux IEEE 802.15.4 implementation, API and drivers
igb.txt

View file

@ -2,7 +2,7 @@ Linux* Base Driver for Intel(R) Network Connection
==================================================
Intel Ethernet Adaptive Virtual Function Linux driver.
Copyright(c) 2013-2017 Intel Corporation.
Copyright(c) 2013-2018 Intel Corporation.
Contents
========
@ -11,20 +11,21 @@ Contents
- Known Issues/Troubleshooting
- Support
This file describes the i40evf Linux* Base Driver.
This file describes the iavf Linux* Base Driver. This driver
was formerly called i40evf.
The i40evf driver supports the below mentioned virtual function
The iavf driver supports the below mentioned virtual function
devices and can only be activated on kernels running the i40e or
newer Physical Function (PF) driver compiled with CONFIG_PCI_IOV.
The i40evf driver requires CONFIG_PCI_MSI to be enabled.
The iavf driver requires CONFIG_PCI_MSI to be enabled.
The guest OS loading the i40evf driver must support MSI-X interrupts.
The guest OS loading the iavf driver must support MSI-X interrupts.
Supported Hardware
==================
Intel XL710 X710 Virtual Function
Intel Ethernet Adaptive Virtual Function
Intel X722 Virtual Function
Intel Ethernet Adaptive Virtual Function
Identifying Your Adapter
========================
@ -32,7 +33,8 @@ Identifying Your Adapter
For more information on how to identify your adapter, go to the
Adapter & Driver ID Guide at:
http://support.intel.com/support/go/network/adapter/idguide.htm
https://www.intel.com/content/www/us/en/support/articles/000005584/network-and-i-o/ethernet-products.html
Known Issues/Troubleshooting
============================

View file

@ -691,7 +691,7 @@ ref
no-jd
BIOS setup but without jack-detection
intel
Intel DG45* mobos
Intel D*45* mobos
dell-m6-amic
Dell desktops/laptops with analog mics
dell-m6-dmic

View file

@ -7411,7 +7411,7 @@ F: Documentation/networking/ixgb.txt
F: Documentation/networking/ixgbe.txt
F: Documentation/networking/ixgbevf.txt
F: Documentation/networking/i40e.txt
F: Documentation/networking/i40evf.txt
F: Documentation/networking/iavf.txt
F: Documentation/networking/ice.txt
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 279
SUBLEVEL = 282
EXTRAVERSION =
NAME = "People's Front"

View file

@ -924,7 +924,7 @@
status = "disabled";
};
spdif: sound@ff88b0000 {
spdif: sound@ff8b0000 {
compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif";
reg = <0x0 0xff8b0000 0x0 0x10000>;
#sound-dai-cells = <0>;

View file

@ -57,9 +57,8 @@ static u64 core_reg_offset_from_id(u64 id)
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
static int validate_core_offset(const struct kvm_one_reg *reg)
static int core_reg_size_from_offset(u64 off)
{
u64 off = core_reg_offset_from_id(reg->id);
int size;
switch (off) {
@ -89,11 +88,24 @@ static int validate_core_offset(const struct kvm_one_reg *reg)
return -EINVAL;
}
if (KVM_REG_SIZE(reg->id) == size &&
IS_ALIGNED(off, size / sizeof(__u32)))
return 0;
if (!IS_ALIGNED(off, size / sizeof(__u32)))
return -EINVAL;
return -EINVAL;
return size;
}
static int validate_core_offset(const struct kvm_one_reg *reg)
{
u64 off = core_reg_offset_from_id(reg->id);
int size = core_reg_size_from_offset(off);
if (size < 0)
return -EINVAL;
if (KVM_REG_SIZE(reg->id) != size)
return -EINVAL;
return 0;
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
@ -200,9 +212,51 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL;
}
static int kvm_arm_copy_core_reg_indices(u64 __user *uindices)
{
unsigned int i;
int n = 0;
for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
int size = core_reg_size_from_offset(i);
if (size < 0)
continue;
switch (size) {
case sizeof(__u32):
reg |= KVM_REG_SIZE_U32;
break;
case sizeof(__u64):
reg |= KVM_REG_SIZE_U64;
break;
case sizeof(__uint128_t):
reg |= KVM_REG_SIZE_U128;
break;
default:
WARN_ON(1);
continue;
}
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
n++;
}
return n;
}
static unsigned long num_core_regs(void)
{
return sizeof(struct kvm_regs) / sizeof(__u32);
return kvm_arm_copy_core_reg_indices(NULL);
}
/**
@ -276,23 +330,20 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
unsigned int i;
const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
int ret;
for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
if (put_user(core_reg | i, uindices))
return -EFAULT;
uindices++;
}
ret = kvm_arm_copy_core_reg_indices(uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
if (ret)
if (ret < 0)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices);
if (ret)
if (ret < 0)
return ret;
uindices += NUM_TIMER_REGS;

View file

@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
#include <linux/extable.h>
#include <asm/setup.h>
#include <asm/fpu.h>
@ -550,7 +551,8 @@ static inline void bus_error030 (struct frame *fp)
errorcode |= 2;
if (mmusr & (MMU_I | MMU_WP)) {
if (ssw & 4) {
/* We might have an exception table for this PC */
if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,

View file

@ -0,0 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
#ifndef _UAPI_ASM_RISCV_SETUP_H
#define _UAPI_ASM_RISCV_SETUP_H
#define COMMAND_LINE_SIZE 1024
#endif /* _UAPI_ASM_RISCV_SETUP_H */

View file

@ -503,9 +503,7 @@ long arch_ptrace(struct task_struct *child, long request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
put_user(child->thread.last_break,
(unsigned long __user *) data);
return 0;
return put_user(child->thread.last_break, (unsigned long __user *)data);
case PTRACE_ENABLE_TE:
if (!MACHINE_HAS_TE)
return -EIO;
@ -857,9 +855,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
put_user(child->thread.last_break,
(unsigned int __user *) data);
return 0;
return put_user(child->thread.last_break, (unsigned int __user *)data);
}
return compat_ptrace_request(child, request, addr, data);
}

View file

@ -339,7 +339,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
"4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
: "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
return size;
}

View file

@ -57,6 +57,7 @@
#define SR_FD 0x00008000
#define SR_MD 0x40000000
#define SR_USER_MASK 0x00000303 // M, Q, S, T bits
/*
* DSP structure and data
*/

View file

@ -116,6 +116,7 @@ static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
{
unsigned int err = 0;
unsigned int sr = regs->sr & ~SR_USER_MASK;
#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(regs[1]);
@ -131,6 +132,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
COPY(sr); COPY(pc);
#undef COPY
regs->sr = (regs->sr & SR_USER_MASK) | sr;
#ifdef CONFIG_SH_FPU
if (boot_cpu_data.flags & CPU_HAS_FPU) {
int owned_fp;

View file

@ -273,6 +273,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"IdeaPad Duet 3 10IGL5"),
},
},
{
/* Lenovo Yoga Book X91F / X91L */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
/* Non exact match to match F + L versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
},
{},
};

View file

@ -12752,7 +12752,7 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
u32 *exit_qual)
{
bool ia32e;
bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
*exit_qual = ENTRY_FAIL_DEFAULT;
@ -12765,6 +12765,13 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
return 1;
}
if ((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG)
return 1;
if ((ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
return 1;
/*
* If the load IA32_EFER VM-entry control is 1, the following checks
* are performed on the field for the IA32_EFER MSR:
@ -12776,7 +12783,6 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
*/
if (to_vmx(vcpu)->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
((vmcs12->guest_cr0 & X86_CR0_PG) &&

View file

@ -7,6 +7,7 @@
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <asm/amd_nb.h>
#include <asm/hpet.h>
#include <asm/pci_x86.h>
@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
#endif
#ifdef CONFIG_AMD_NB
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L
static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
{
u32 data;
if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
pci_err(dev, "Failed to write data 0x%x\n", data);
} else {
pci_err(dev, "Failed to read data\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
#endif

View file

@ -25,7 +25,7 @@ KCOV_INSTRUMENT := n
# make up the standalone purgatory.ro
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0
PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
@ -56,6 +56,9 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
CFLAGS_string.o += $(PURGATORY_CFLAGS)
AFLAGS_REMOVE_setup-x86_$(BITS).o += -g -Wa,-gdwarf-2
AFLAGS_REMOVE_entry64.o += -g -Wa,-gdwarf-2
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)

View file

@ -139,11 +139,15 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
pr_debug("sig wrapper = { %x, %x, %x }\n",
wrapper.length, wrapper.revision, wrapper.cert_type);
/* Both pesign and sbsign round up the length of certificate table
* (in optional header data directories) to 8 byte alignment.
/* sbsign rounds up the length of certificate table (in optional
* header data directories) to 8 byte alignment. However, the PE
* specification states that while entries are 8-byte aligned, this is
* not included in their length, and as a result, pesign has not
* rounded up since 0.110.
*/
if (round_up(wrapper.length, 8) != ctx->sig_len) {
pr_debug("Signature wrapper len wrong\n");
if (wrapper.length > ctx->sig_len) {
pr_debug("Signature wrapper bigger than sig len (%x > %x)\n",
ctx->sig_len, wrapper.length);
return -ELIBBAD;
}
if (wrapper.revision != WIN_CERT_REVISION_2_0) {

View file

@ -2915,6 +2915,7 @@ close_card_oam(struct idt77252_dev *card)
recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
}
kfree(vc);
}
}
}
@ -2958,6 +2959,15 @@ open_card_ubr0(struct idt77252_dev *card)
return 0;
}
static void
close_card_ubr0(struct idt77252_dev *card)
{
struct vc_map *vc = card->vcs[0];
free_scq(card, vc->scq);
kfree(vc);
}
static int
idt77252_dev_open(struct idt77252_dev *card)
{
@ -3007,6 +3017,7 @@ static void idt77252_dev_close(struct atm_dev *dev)
struct idt77252_dev *card = dev->dev_data;
u32 conf;
close_card_ubr0(card);
close_card_oam(card);
conf = SAR_CFG_RXPTH | /* enable receive path */

View file

@ -146,6 +146,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
return 0;
}
static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
int ret;
ret = qca_set_bdaddr_rome(hdev, bdaddr);
if (ret)
return ret;
/* The firmware stops responding for a while after setting the bdaddr,
* causing timeouts for subsequent commands. Sleep a bit to avoid this.
*/
usleep_range(1000, 10000);
return 0;
}
static int btqcomsmd_probe(struct platform_device *pdev)
{
struct btqcomsmd *btq;
@ -195,7 +210,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->close = btqcomsmd_close;
hdev->send = btqcomsmd_send;
hdev->setup = btqcomsmd_setup;
hdev->set_bdaddr = qca_set_bdaddr_rome;
hdev->set_bdaddr = btqcomsmd_set_bdaddr;
ret = hci_register_dev(hdev);
if (ret < 0)

View file

@ -353,6 +353,7 @@ static void btsdio_remove(struct sdio_func *func)
BT_DBG("func %p", func);
cancel_work_sync(&data->work);
if (!data)
return;

View file

@ -146,8 +146,8 @@ static int __init weim_parse_dt(struct platform_device *pdev,
const struct of_device_id *of_id = of_match_device(weim_id_table,
&pdev->dev);
const struct imx_weim_devtype *devtype = of_id->data;
int ret = 0, have_child = 0;
struct device_node *child;
int ret, have_child = 0;
if (devtype == &imx50_weim_devtype) {
ret = imx_weim_gpr_setup(pdev);

View file

@ -705,6 +705,39 @@ static int scmi_remove(struct platform_device *pdev)
return ret;
}
static int scmi_mailbox_chan_validate(struct device *cdev)
{
int num_mb, num_sh, ret = 0;
struct device_node *np = cdev->of_node;
num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
num_sh = of_count_phandle_with_args(np, "shmem", NULL);
/* Bail out if mboxes and shmem descriptors are inconsistent */
if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
of_node_full_name(np));
return -EINVAL;
}
if (num_sh > 1) {
struct device_node *np_tx, *np_rx;
np_tx = of_parse_phandle(np, "shmem", 0);
np_rx = of_parse_phandle(np, "shmem", 1);
/* SCMI Tx and Rx shared mem areas have to be distinct */
if (!np_tx || !np_rx || np_tx == np_rx) {
dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
of_node_full_name(np));
ret = -EINVAL;
}
of_node_put(np_tx);
of_node_put(np_rx);
}
return ret;
}
static inline int
scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
{
@ -720,6 +753,10 @@ scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
goto idr_alloc;
}
ret = scmi_mailbox_chan_validate(dev);
if (ret)
return ret;
cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;

View file

@ -327,7 +327,7 @@ static struct irq_chip gpio_irqchip = {
.irq_enable = gpio_irq_enable,
.irq_disable = gpio_irq_disable,
.irq_set_type = gpio_irq_type,
.flags = IRQCHIP_SET_TYPE_MASKED,
.flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
};
static void gpio_irq_handler(struct irq_desc *desc)

View file

@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
struct vm_area_struct *vma)
{
return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
int ret;
ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
if (!ret) {
/* Drop the reference acquired by drm_gem_mmap_obj(). */
drm_gem_object_put(&etnaviv_obj->base);
}
return ret;
}
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {

View file

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __ADRENO_H
#define __ADRENO_H
@ -16,9 +16,6 @@
#define DEVICE_3D_NAME "kgsl-3d"
#define DEVICE_3D0_NAME "kgsl-3d0"
/* Index to preemption scratch buffer to store KMD postamble */
#define KMD_POSTAMBLE_IDX 100
/* ADRENO_DEVICE - Given a kgsl_device return the adreno device struct */
#define ADRENO_DEVICE(device) \
container_of(device, struct adreno_device, dev)

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "adreno.h"
@ -558,8 +558,8 @@ unsigned int a6xx_preemption_pre_ibsubmit(
* preemption
*/
if (!adreno_dev->perfcounter) {
u64 kmd_postamble_addr =
PREEMPT_SCRATCH_ADDR(adreno_dev, KMD_POSTAMBLE_IDX);
u64 kmd_postamble_addr = SCRATCH_POSTAMBLE_ADDR
(KGSL_DEVICE(adreno_dev));
*cmds++ = cp_type7_packet(CP_SET_AMBLE, 3);
*cmds++ = lower_32_bits(kmd_postamble_addr);
@ -763,6 +763,8 @@ void a6xx_preemption_close(struct adreno_device *adreno_dev)
int a6xx_preemption_init(struct adreno_device *adreno_dev)
{
u32 flags = ADRENO_FEATURE(adreno_dev, ADRENO_APRIV) ?
KGSL_MEMDESC_PRIVILEGED : 0;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_preemption *preempt = &adreno_dev->preempt;
struct adreno_ringbuffer *rb;
@ -777,8 +779,8 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev)
timer_setup(&preempt->timer, _a6xx_preemption_timer, 0);
ret = kgsl_allocate_global(device, &preempt->scratch, PAGE_SIZE, 0, 0,
"preemption_scratch");
ret = kgsl_allocate_global(device, &preempt->scratch, PAGE_SIZE, 0,
flags, "preemption_scratch");
/* Allocate mem for storing preemption switch record */
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
@ -788,14 +790,15 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev)
}
/*
* First 8 dwords of the preemption scratch buffer is used to store the
* address for CP to save/restore VPC data. Reserve 11 dwords in the
* preemption scratch buffer from index KMD_POSTAMBLE_IDX for KMD
* postamble pm4 packets
* First 28 dwords of the device scratch buffer are used to store
* shadow rb data. Reserve 11 dwords in the device scratch buffer
* from SCRATCH_POSTAMBLE_OFFSET for KMD postamble pm4 packets.
* This should be in *device->scratch* so that userspace cannot
* access it.
*/
if (!adreno_dev->perfcounter) {
u32 *postamble = preempt->scratch.hostptr +
(KMD_POSTAMBLE_IDX * sizeof(u64));
u32 *postamble = device->scratch.hostptr +
SCRATCH_POSTAMBLE_OFFSET;
u32 count = 0;
postamble[count++] = cp_type7_packet(CP_REG_RMW, 3);

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __KGSL_H
#define __KGSL_H
@ -69,6 +70,11 @@
#define SCRATCH_RPTR_GPU_ADDR(dev, id) \
((dev)->scratch.gpuaddr + SCRATCH_RPTR_OFFSET(id))
/* OFFSET to KMD postamble packets in scratch buffer */
#define SCRATCH_POSTAMBLE_OFFSET (100 * sizeof(u64))
#define SCRATCH_POSTAMBLE_ADDR(dev) \
((dev)->scratch.gpuaddr + SCRATCH_POSTAMBLE_OFFSET)
/* Timestamp window used to detect rollovers (half of integer range) */
#define KGSL_TIMESTAMP_WINDOW 0x80000000

View file

@ -495,6 +495,8 @@ static const struct it87_devices it87_devices[] = {
#define has_pwm_freq2(data) ((data)->features & FEAT_PWM_FREQ2)
#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
#define has_scaling(data) ((data)->features & (FEAT_12MV_ADC | \
FEAT_10_9MV_ADC))
struct it87_sio_data {
int sioaddr;
@ -3107,7 +3109,7 @@ static int it87_probe(struct platform_device *pdev)
"Detected broken BIOS defaults, disabling PWM interface\n");
/* Starting with IT8721F, we handle scaling of internal voltages */
if (has_12mv_adc(data)) {
if (has_scaling(data)) {
if (sio_data->internal & BIT(0))
data->in_scaled |= BIT(3); /* in3 is AVCC */
if (sio_data->internal & BIT(1))

View file

@ -173,7 +173,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
writel_relaxed(config->ss_pe_cmp[i],
drvdata->base + TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
writeq_relaxed(config->addr_val[i],
drvdata->base + TRCACVRn(i));
writeq_relaxed(config->addr_acc[i],

View file

@ -468,6 +468,8 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
if (num == 1 && msgs[0].len == 0)
goto stop;
lpi2c_imx->rx_buf = NULL;
lpi2c_imx->tx_buf = NULL;
lpi2c_imx->delivered = 0;
lpi2c_imx->msglen = msgs[i].len;
init_completion(&lpi2c_imx->complete);
@ -508,10 +510,14 @@ disable:
static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_id;
unsigned int enabled;
unsigned int temp;
enabled = readl(lpi2c_imx->base + LPI2C_MIER);
lpi2c_imx_intctrl(lpi2c_imx, 0);
temp = readl(lpi2c_imx->base + LPI2C_MSR);
temp &= enabled;
if (temp & MSR_RDF)
lpi2c_imx_read_rxfifo(lpi2c_imx);

View file

@ -321,6 +321,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
u32 msg[3];
int rc;
if (writelen > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
memcpy(ctx->dma_buffer, data, writelen);
paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
DMA_TO_DEVICE);

View file

@ -989,7 +989,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
indio->id, trigger_name);
if (!trig)
return NULL;
return ERR_PTR(-ENOMEM);
trig->dev.parent = indio->dev.parent;
iio_trigger_set_drvdata(trig, indio);

View file

@ -61,10 +61,6 @@ struct quad8_iio {
#define QUAD8_REG_CHAN_OP 0x11
#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
/* Borrow Toggle flip-flop */
#define QUAD8_FLAG_BT BIT(0)
/* Carry Toggle flip-flop */
#define QUAD8_FLAG_CT BIT(1)
/* Error flag */
#define QUAD8_FLAG_E BIT(4)
/* Up/Down flag */
@ -97,9 +93,6 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
{
struct quad8_iio *const priv = iio_priv(indio_dev);
const int base_offset = priv->base + 2 * chan->channel;
unsigned int flags;
unsigned int borrow;
unsigned int carry;
int i;
switch (mask) {
@ -110,12 +103,7 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
}
flags = inb(base_offset + 1);
borrow = flags & QUAD8_FLAG_BT;
carry = !!(flags & QUAD8_FLAG_CT);
/* Borrow XOR Carry effectively doubles count range */
*val = (borrow ^ carry) << 24;
*val = 0;
/* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,

View file

@ -74,8 +74,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
/* DAC can only accept up to a 16-bit value */
if ((unsigned int)val > 65535)
/* DAC can only accept up to a 12-bit value */
if ((unsigned int)val > 4095)
return -EINVAL;
priv->chan_out_states[chan->channel] = val;

View file

@ -855,8 +855,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
x = y = z = 0;
/* Divide 4 since trackpoint's speed is too fast */
input_report_rel(dev2, REL_X, (char)x / 4);
input_report_rel(dev2, REL_Y, -((char)y / 4));
input_report_rel(dev2, REL_X, (s8)x / 4);
input_report_rel(dev2, REL_Y, -((s8)y / 4));
psmouse_report_standard_buttons(dev2, packet[3]);
@ -1107,8 +1107,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
((packet[3] & 0x20) << 1);
z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
input_report_rel(dev2, REL_X, (char)x);
input_report_rel(dev2, REL_Y, -((char)y));
input_report_rel(dev2, REL_X, (s8)x);
input_report_rel(dev2, REL_Y, -((s8)y));
input_report_abs(dev2, ABS_PRESSURE, z);
psmouse_report_standard_buttons(dev2, packet[1]);
@ -2297,20 +2297,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
if (reg < 0)
return reg;
x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */
y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
if (reg < 0)
return reg;
x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_electrode = 17 + x_electrode;
y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */
y_electrode = 13 + y_electrode;
x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */

View file

@ -206,8 +206,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
state->pressed = packet[0] >> 7;
finger1 = ((packet[0] >> 4) & 0x7) - 1;
if (finger1 < FOC_MAX_FINGERS) {
state->fingers[finger1].x += (char)packet[1];
state->fingers[finger1].y += (char)packet[2];
state->fingers[finger1].x += (s8)packet[1];
state->fingers[finger1].y += (s8)packet[2];
} else {
psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
finger1);
@ -222,8 +222,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
*/
finger2 = ((packet[3] >> 4) & 0x7) - 1;
if (finger2 < FOC_MAX_FINGERS) {
state->fingers[finger2].x += (char)packet[4];
state->fingers[finger2].y += (char)packet[5];
state->fingers[finger2].x += (s8)packet[4];
state->fingers[finger2].y += (s8)packet[5];
}
}

View file

@ -605,6 +605,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
/* Fujitsu Lifebook A574/H */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "FMVA0501PZ"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
/* Gigabyte M912 */
.matches = {

View file

@ -1661,6 +1661,7 @@ pop_from_list:
io = crypt_io_from_node(rb_first(&write_tree));
rb_erase(&io->rb_node, &write_tree);
kcryptd_io_write(io);
cond_resched();
} while (!RB_EMPTY_ROOT(&write_tree));
blk_finish_plug(&plug);
}

View file

@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared)
atomic_read(&shared->in_flight[WRITE]);
}
void dm_stats_init(struct dm_stats *stats)
int dm_stats_init(struct dm_stats *stats)
{
int cpu;
struct dm_stats_last_position *last;
@ -196,11 +196,16 @@ void dm_stats_init(struct dm_stats *stats)
mutex_init(&stats->mutex);
INIT_LIST_HEAD(&stats->list);
stats->last = alloc_percpu(struct dm_stats_last_position);
if (!stats->last)
return -ENOMEM;
for_each_possible_cpu(cpu) {
last = per_cpu_ptr(stats->last, cpu);
last->last_sector = (sector_t)ULLONG_MAX;
last->last_rw = UINT_MAX;
}
return 0;
}
void dm_stats_cleanup(struct dm_stats *stats)

View file

@ -22,7 +22,7 @@ struct dm_stats_aux {
unsigned long long duration_ns;
};
void dm_stats_init(struct dm_stats *st);
int dm_stats_init(struct dm_stats *st);
void dm_stats_cleanup(struct dm_stats *st);
struct mapped_device;

View file

@ -1971,7 +1971,9 @@ static struct mapped_device *alloc_dev(int minor)
bio_set_dev(&md->flush_bio, md->bdev);
md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
dm_stats_init(&md->stats);
r = dm_stats_init(&md->stats);
if (r < 0)
goto bad;
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);

View file

@ -2991,6 +2991,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
err = kstrtouint(buf, 10, (unsigned int *)&slot);
if (err < 0)
return err;
if (slot < 0)
/* overflow */
return -ENOSPC;
}
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also

View file

@ -416,6 +416,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
return card;
err_out:
host->card = old_card;
kfree_const(card->dev.kobj.name);
kfree(card);
return NULL;
}
@ -471,8 +472,10 @@ static void memstick_check(struct work_struct *work)
put_device(&card->dev);
host->card = NULL;
}
} else
} else {
kfree_const(card->dev.kobj.name);
kfree(card);
}
}
out_power_off:

View file

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014-2015, 2017-2019, The Linux Foundation. All rights reserved. */
/* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */
#include <linux/kernel.h>
#include <linux/module.h>
@ -143,8 +144,11 @@ static int pmic_spmi_probe(struct spmi_device *sdev)
MODULE_DEVICE_TABLE(of, pmic_spmi_id_table);
static void pmic_spmi_remove(struct spmi_device *sdev) {}
static struct spmi_driver pmic_spmi_driver = {
.probe = pmic_spmi_probe,
.remove = pmic_spmi_remove,
.driver = {
.name = "pmic-spmi",
.of_match_table = pmic_spmi_id_table,

View file

@ -164,7 +164,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_state = STATE_EMPTY;
ret = mtd_read(mtd, sect_start, sect_size,
&retlen, mtdblk->cache_data);
if (ret)
if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != sect_size)
return -EIO;
@ -199,8 +199,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
if (!sect_size)
return mtd_read(mtd, pos, len, &retlen, buf);
if (!sect_size) {
ret = mtd_read(mtd, pos, len, &retlen, buf);
if (ret && !mtd_is_bitflip(ret))
return ret;
return 0;
}
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@ -220,7 +224,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
ret = mtd_read(mtd, pos, size, &retlen, buf);
if (ret)
if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != size)
return -EIO;

View file

@ -657,12 +657,6 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
ubi->vid_hdr_alsize)) {
ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
return -EINVAL;
}
dbg_gen("min_io_size %d", ubi->min_io_size);
dbg_gen("max_write_size %d", ubi->max_write_size);
dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
@ -680,6 +674,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->vid_hdr_aloffset;
}
/*
* Memory allocation for VID header is ubi->vid_hdr_alsize
* which is described in comments in io.c.
* Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
* ubi->vid_hdr_alsize, so that all vid header operations
* won't access memory out of bounds.
*/
if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
" + VID header size(%zu) > VID header aligned size(%d).",
ubi->vid_hdr_offset, ubi->vid_hdr_shift,
UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
return -EINVAL;
}
/* Similar for the data offset */
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);

View file

@ -568,6 +568,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
* @nested: denotes whether the work_sem is already held
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
@ -1046,8 +1047,6 @@ out_unlock:
* __erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
* @wl_wrk: the work object
* @shutdown: non-zero if the worker has to free memory and exit
* because the WL sub-system is shutting down
*
* This function erases a physical eraseblock and perform torture testing if
* needed. It also takes care about marking the physical eraseblock bad if
@ -1097,7 +1096,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
if (err1) {
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);

View file

@ -215,6 +215,18 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
return 0;
}
static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
u16 *value)
{
return -EIO;
}
static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
u16 value)
{
return -EIO;
}
static const struct b53_io_ops b53_mmap_ops = {
.read8 = b53_mmap_read8,
.read16 = b53_mmap_read16,
@ -226,6 +238,8 @@ static const struct b53_io_ops b53_mmap_ops = {
.write32 = b53_mmap_write32,
.write48 = b53_mmap_write48,
.write64 = b53_mmap_write64,
.phy_read16 = b53_mmap_phy_read16,
.phy_write16 = b53_mmap_phy_write16,
};
static int b53_mmap_probe(struct platform_device *pdev)

View file

@ -2237,9 +2237,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* If this is the upstream port for this switch, enable
* forwarding of unknown unicasts and multicasts.
*/
reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
/* Forward any IPv4 IGMP or IPv6 MLD frames received
* by a USER port to the CPU port to allow snooping.
*/
if (dsa_is_user_port(ds, port))
reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
if (err)
return err;

View file

@ -707,6 +707,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
}
#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
#ifdef CONFIG_MACB_USE_HWSTAMP
if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
addr &= ~GEM_BIT(DMA_RXVALID);
#endif
return addr;
}

View file

@ -235,20 +235,27 @@ config I40E_DCB
If unsure, say N.
# this is here to allow seamless migration from I40EVF --> IAVF name
# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF
config IAVF
tristate
config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF
depends on PCI_MSI
---help---
This driver supports virtual functions for Intel XL710,
X710, X722, and all devices advertising support for Intel
Ethernet Adaptive Virtual Function devices. For more
X710, X722, XXV710, and all devices advertising support for
Intel Ethernet Adaptive Virtual Function devices. For more
information on how to identify your adapter, go to the Adapter
& Driver ID Guide that can be located at:
<http://support.intel.com>
<https://support.intel.com>
This driver was formerly named i40evf.
To compile this driver as a module, choose M here. The module
will be called i40evf. MSI-X interrupt support is required
will be called iavf. MSI-X interrupt support is required
for this driver to work correctly.
config ICE

View file

@ -12,6 +12,6 @@ obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_I40EVF) += i40evf/
obj-$(CONFIG_IAVF) += iavf/
obj-$(CONFIG_FM10K) += fm10k/
obj-$(CONFIG_ICE) += ice/

View file

@ -5230,31 +5230,6 @@ static void e1000_watchdog_task(struct work_struct *work)
ew32(TARC(0), tarc0);
}
/* disable TSO for pcie and 10/100 speeds, to avoid
* some hardware issues
*/
if (!(adapter->flags & FLAG_TSO_FORCE)) {
switch (adapter->link_speed) {
case SPEED_10:
case SPEED_100:
e_info("10/100 speed: disabling TSO\n");
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
break;
case SPEED_1000:
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
break;
default:
/* oops */
break;
}
if (hw->mac.type == e1000_pch_spt) {
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
}
}
/* enable transmits in the hardware, need to do this
* after setting TARC(0)
*/
@ -7191,6 +7166,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM |
NETIF_F_HW_CSUM);
/* disable TSO for pcie and 10/100 speeds to avoid
* some hardware issues and for i219 to fix transfer
* speed being capped at 60%
*/
if (!(adapter->flags & FLAG_TSO_FORCE)) {
switch (adapter->link_speed) {
case SPEED_10:
case SPEED_100:
e_info("10/100 speed: disabling TSO\n");
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
break;
case SPEED_1000:
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
break;
default:
/* oops */
break;
}
if (hw->mac.type == e1000_pch_spt) {
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
}
}
/* Set user-changeable features (subset of all device features) */
netdev->hw_features = netdev->features;
netdev->hw_features |= NETIF_F_RXFCS;

View file

@ -44,7 +44,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
return 0;
}
struct i40e_diag_reg_test_info i40e_reg_list[] = {
const struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
{I40E_QTX_CTL(0), 0x0000FFBF, 1,
I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
@ -78,27 +78,28 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
u32 reg, mask;
u32 elements;
u32 i, j;
for (i = 0; i40e_reg_list[i].offset != 0 &&
!ret_code; i++) {
elements = i40e_reg_list[i].elements;
/* set actual reg range for dynamically allocated resources */
if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
hw->func_caps.num_tx_qp != 0)
i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
elements = hw->func_caps.num_tx_qp;
if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
hw->func_caps.num_msix_vectors != 0)
i40e_reg_list[i].elements =
hw->func_caps.num_msix_vectors - 1;
elements = hw->func_caps.num_msix_vectors - 1;
/* test register access */
mask = i40e_reg_list[i].mask;
for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
for (j = 0; j < elements && !ret_code; j++) {
reg = i40e_reg_list[i].offset +
(j * i40e_reg_list[i].stride);
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);

View file

@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info {
u32 stride; /* bytes between each element */
};
extern struct i40e_diag_reg_test_info i40e_reg_list[];
extern const struct i40e_diag_reg_test_info i40e_reg_list[];
i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);

View file

@ -9702,8 +9702,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
ret = i40e_setup_misc_vector(pf);
if (ret)
goto end_unlock;
}
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@ -12484,15 +12487,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->id = ctxt.vsi_number;
}
vsi->active_filters = 0;
clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
spin_lock_bh(&vsi->mac_filter_hash_lock);
vsi->active_filters = 0;
/* If macvlan filters already exist, force them to get loaded */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
f->state = I40E_FILTER_NEW;
f_count++;
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;

View file

@ -2595,7 +2595,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
!is_multicast_ether_addr(addr) && vf->pf_set_mac &&
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
}
}
@ -4019,9 +4019,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
mac, vf_id);
}
/* Force the VF driver stop so it has to reload with new MAC address */
/* Force the VF interface down so it has to bring up with new MAC
* address
*/
i40e_vc_disable_vf(vf);
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
error_param:
return ret;

View file

@ -1,16 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright(c) 2013 - 2018 Intel Corporation.
#
## Makefile for the Intel(R) 40GbE VF driver
#
#
ccflags-y += -I$(src)
subdir-ccflags-y += -I$(src)
obj-$(CONFIG_I40EVF) += i40evf.o
i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o

File diff suppressed because it is too large Load diff

View file

@ -1,215 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_HMC_H_
#define _I40E_HMC_H_
#define I40E_HMC_MAX_BP_COUNT 512
/* forward-declare the HW struct for the compiler */
struct i40e_hw;
#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
#define I40E_HMC_PD_CNT_IN_SD 512
#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
#define I40E_HMC_PAGED_BP_SIZE 4096
#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
#define I40E_FIRST_VF_FPM_ID 16
struct i40e_hmc_obj_info {
u64 base; /* base addr in FPM */
u32 max_cnt; /* max count available for this hmc func */
u32 cnt; /* count of objects driver actually wants to create */
u64 size; /* size in bytes of one object */
};
enum i40e_sd_entry_type {
I40E_SD_TYPE_INVALID = 0,
I40E_SD_TYPE_PAGED = 1,
I40E_SD_TYPE_DIRECT = 2
};
struct i40e_hmc_bp {
enum i40e_sd_entry_type entry_type;
struct i40e_dma_mem addr; /* populate to be used by hw */
u32 sd_pd_index;
u32 ref_cnt;
};
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
bool rsrc_pg;
bool valid;
};
struct i40e_hmc_pd_table {
struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
u32 ref_cnt;
u32 sd_index;
};
struct i40e_hmc_sd_entry {
enum i40e_sd_entry_type entry_type;
bool valid;
union {
struct i40e_hmc_pd_table pd_table;
struct i40e_hmc_bp bp;
} u;
};
struct i40e_hmc_sd_table {
struct i40e_virt_mem addr; /* used to track sd_entry allocations */
u32 sd_cnt;
u32 ref_cnt;
struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
};
struct i40e_hmc_info {
u32 signature;
/* equals to pci func num for PF and dynamically allocated for VFs */
u8 hmc_fn_id;
u16 first_sd_index; /* index of the first available SD */
/* hmc objects */
struct i40e_hmc_obj_info *hmc_obj;
struct i40e_virt_mem hmc_obj_virt_mem;
struct i40e_hmc_sd_table sd_table;
};
#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
/**
* I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
* @hw: pointer to our hw struct
* @pa: pointer to physical address
* @sd_index: segment descriptor index
* @type: if sd entry is direct or paged
**/
#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
{ \
u32 val1, val2, val3; \
val1 = (u32)(upper_32_bits(pa)); \
val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
}
/**
* I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
* @hw: pointer to our hw struct
* @sd_index: segment descriptor index
* @type: if sd entry is direct or paged
**/
#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
{ \
u32 val2, val3; \
val2 = (I40E_HMC_MAX_BP_COUNT << \
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
}
/**
* I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
**/
#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
wr32((hw), I40E_PFHMC_PDINV, \
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
* @index: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
*
* This function calculates the segment descriptor index and index limit
* for the resource defined by i40e_hmc_rsrc_type.
**/
#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
{ \
u64 fpm_addr, fpm_limit; \
fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
(hmc_info)->hmc_obj[(type)].size * (index); \
fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
*(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
*(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
/* add one more to the limit to correct our range */ \
*(sd_limit) += 1; \
}
/**
* I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
* @hmc_info: pointer to the HMC configuration information struct
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @pd_index: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
* defined by i40e_hmc_rsrc_type.
**/
#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
{ \
u64 fpm_adr, fpm_limit; \
fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
(hmc_info)->hmc_obj[(type)].size * (idx); \
fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
*(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
*(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
/* add one more to the limit to correct our range */ \
*(pd_limit) += 1; \
}
i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 sd_index,
enum i40e_sd_entry_type type,
u64 direct_mode_sz);
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 pd_index,
struct i40e_dma_mem *rsrc_pg);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf);
i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf);
#endif /* _I40E_HMC_H_ */

View file

@ -1,158 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_LAN_HMC_H_
#define _I40E_LAN_HMC_H_
/* forward-declare the HW struct for the compiler */
struct i40e_hw;
/* HMC element context information */
/* Rx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct i40e_hmc_obj_rxq {
u16 head;
u16 cpuid; /* bigger than needed, see above for reason */
u64 base;
u16 qlen;
#define I40E_RXQ_CTX_DBUFF_SHIFT 7
u16 dbuff; /* bigger than needed, see above for reason */
#define I40E_RXQ_CTX_HBUFF_SHIFT 6
u16 hbuff; /* bigger than needed, see above for reason */
u8 dtype;
u8 dsize;
u8 crcstrip;
u8 fc_ena;
u8 l2tsel;
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u32 rxmax; /* bigger than needed, see above for reason */
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct i40e_hmc_obj_txq {
u16 head;
u8 new_context;
u64 base;
u8 fc_ena;
u8 timesync_ena;
u8 fd_ena;
u8 alt_vlan_ena;
u16 thead_wb;
u8 cpuid;
u8 head_wb_ena;
u16 qlen;
u8 tphrdesc_ena;
u8 tphrpacket_ena;
u8 tphwdesc_ena;
u64 head_wb_addr;
u32 crc;
u16 rdylist;
u8 rdylist_act;
};
/* for hsplit_0 field of Rx HMC context */
enum i40e_hmc_obj_rx_hsplit_0 {
I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
};
/* fcoe_cntx and fcoe_filt are for debugging purpose only */
struct i40e_hmc_obj_fcoe_cntx {
u32 rsv[32];
};
struct i40e_hmc_obj_fcoe_filt {
u32 rsv[8];
};
/* Context sizes for LAN objects */
enum i40e_hmc_lan_object_size {
I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
};
#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
#define I40E_HMC_OBJ_SIZE_TXQ 128
#define I40E_HMC_OBJ_SIZE_RXQ 32
#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
enum i40e_hmc_lan_rsrc_type {
I40E_HMC_LAN_FULL = 0,
I40E_HMC_LAN_TX = 1,
I40E_HMC_LAN_RX = 2,
I40E_HMC_FCOE_CTX = 3,
I40E_HMC_FCOE_FILT = 4,
I40E_HMC_LAN_MAX = 5
};
enum i40e_hmc_model {
I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
I40E_HMC_MODEL_DIRECT_ONLY = 1,
I40E_HMC_MODEL_PAGED_ONLY = 2,
I40E_HMC_MODEL_UNKNOWN,
};
struct i40e_hmc_lan_create_obj_info {
struct i40e_hmc_info *hmc_info;
u32 rsrc_type;
u32 start_idx;
u32 count;
enum i40e_sd_entry_type entry_type;
u64 direct_mode_sz;
};
struct i40e_hmc_lan_delete_obj_info {
struct i40e_hmc_info *hmc_info;
u32 rsrc_type;
u32 start_idx;
u32 count;
};
i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
u32 rxq_num, u32 fcoe_cntx_num,
u32 fcoe_filt_num);
i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
enum i40e_hmc_model model);
i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
u16 queue);
i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
u16 queue,
struct i40e_hmc_obj_txq *s);
i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
u16 queue);
i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
u16 queue,
struct i40e_hmc_obj_rxq *s);
#endif /* _I40E_LAN_HMC_H_ */

View file

@ -1,130 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
#include "i40e_type.h"
#include "i40e_alloc.h"
#include <linux/avf/virtchnl.h>
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
* mostly because they are needed even before the init
* has happened and will assist in the early SW and FW
* setup.
*/
/* adminq functions */
i40e_status i40evf_init_adminq(struct i40e_hw *hw);
i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
bool i40evf_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
bool i40evf_check_asq_alive(struct i40e_hw *hw);
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{
return i40evf_ptype_lookup[ptype];
}
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u8 *mac_addr, u16 ethtype, u16 flags,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid);
i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 value);
i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 phy_addr, u16 *value);
i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *
cmd_details);
i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *
cmd_details);
struct i40e_generic_seg_header *
i40evf_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
enum i40e_status_code
i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
enum i40e_status_code
i40evf_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
#endif /* _I40E_PROTOTYPE_H_ */

View file

@ -1,313 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
#define I40E_VFMSIX_PBA1_MAX_INDEX 19
#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
#define I40E_VFMSIX_TADD1_MAX_INDEX 639
#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
#define I40E_VF_ARQH1_ARQH_SHIFT 0
#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define I40E_VF_ARQT1_ARQT_SHIFT 0
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
#define I40E_VF_ATQH1_ATQH_SHIFT 0
#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VF_ATQT1_ATQT_SHIFT 0
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
#define I40E_VFINT_ICR01_SWINT_SHIFT 31
#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
#define I40E_VFINT_ITR01_MAX_INDEX 2
#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
#define I40E_VFINT_ITRN1_MAX_INDEX 2
#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define I40E_QRX_TAIL1_MAX_INDEX 15
#define I40E_QRX_TAIL1_TAIL_SHIFT 0
#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
#define I40E_QTX_TAIL1_MAX_INDEX 15
#define I40E_QTX_TAIL1_TAIL_SHIFT 0
#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
#define I40E_VFMSIX_TADD_MAX_INDEX 16
#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
#define I40E_VFMSIX_TMSG_MAX_INDEX 16
#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
#define I40E_VFMSIX_TUADD_MAX_INDEX 16
#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
#define I40E_VFQF_HENA_MAX_INDEX 1
#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
#define I40E_VFQF_HKEY_MAX_INDEX 12
#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define I40E_VFQF_HLUT_MAX_INDEX 15
#define I40E_VFQF_HLUT_LUT0_SHIFT 0
#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
#define I40E_VFQF_HLUT_LUT1_SHIFT 8
#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
#define I40E_VFQF_HLUT_LUT2_SHIFT 16
#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
#define I40E_VFQF_HLUT_LUT3_SHIFT 24
#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
#define I40E_VFQF_HREGION_MAX_INDEX 7
#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
#endif /* _I40E_REGISTER_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright(c) 2013 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) Ethernet Adaptive Virtual Function (iavf)
# driver
#
#
ccflags-y += -I$(src)
subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
iavf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o

View file

@ -7,16 +7,6 @@
#include "i40e_adminq.h"
#include "i40e_prototype.h"
/**
* i40e_is_nvm_update_op - return true if this is an NVM update operation
* @desc: API request descriptor
**/
static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
{
return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
(desc->opcode == i40e_aqc_opc_nvm_update);
}
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@ -569,9 +559,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
if (hw->nvm_buff.va)
i40e_free_virt_mem(hw, &hw->nvm_buff);
return ret_code;
}
@ -951,17 +938,3 @@ clean_arq_element_err:
return ret_code;
}
void i40evf_resume_aq(struct i40e_hw *hw)
{
/* Registers are reset after PF reset */
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
i40e_config_asq_regs(hw);
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
i40e_config_arq_regs(hw);
}

View file

@ -0,0 +1,528 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
/* This header file defines the i40e Admin Queue commands and is shared between
* i40e Firmware and Software.
*
* This file needs to comply with the Linux Kernel coding style.
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
#define I40E_FW_API_VERSION_MINOR_X710 0x0007
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
I40E_FW_API_VERSION_MINOR_X722)
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc {
__le16 flags;
__le16 opcode;
__le16 datalen;
__le16 retval;
__le32 cookie_high;
__le32 cookie_low;
union {
struct {
__le32 param0;
__le32 param1;
__le32 param2;
__le32 param3;
} internal;
struct {
__le32 param0;
__le32 param1;
__le32 addr_high;
__le32 addr_low;
} external;
u8 raw[16];
} params;
};
/* Flags sub-structure
* |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
* |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
*/
/* command flags and offsets*/
#define I40E_AQ_FLAG_DD_SHIFT 0
#define I40E_AQ_FLAG_CMP_SHIFT 1
#define I40E_AQ_FLAG_ERR_SHIFT 2
#define I40E_AQ_FLAG_VFE_SHIFT 3
#define I40E_AQ_FLAG_LB_SHIFT 9
#define I40E_AQ_FLAG_RD_SHIFT 10
#define I40E_AQ_FLAG_VFC_SHIFT 11
#define I40E_AQ_FLAG_BUF_SHIFT 12
#define I40E_AQ_FLAG_SI_SHIFT 13
#define I40E_AQ_FLAG_EI_SHIFT 14
#define I40E_AQ_FLAG_FE_SHIFT 15
#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
I40E_AQ_RC_OK = 0, /* success */
I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
I40E_AQ_RC_ENOENT = 2, /* No such element */
I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
I40E_AQ_RC_EINTR = 4, /* operation interrupted */
I40E_AQ_RC_EIO = 5, /* I/O error */
I40E_AQ_RC_ENXIO = 6, /* No such resource */
I40E_AQ_RC_E2BIG = 7, /* Arg too long */
I40E_AQ_RC_EAGAIN = 8, /* Try again */
I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
I40E_AQ_RC_EACCES = 10, /* Permission denied */
I40E_AQ_RC_EFAULT = 11, /* Bad address */
I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
I40E_AQ_RC_EEXIST = 13, /* object already exists */
I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
I40E_AQ_RC_EFBIG = 22, /* File too large */
};
/* Admin Queue command opcodes */
enum i40e_admin_queue_opc {
/* aq commands */
i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003,
i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
i40e_aqc_opc_request_resource = 0x0008,
i40e_aqc_opc_release_resource = 0x0009,
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
/* Proxy commands */
i40e_aqc_opc_set_proxy_config = 0x0104,
i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
/* WoL commands */
i40e_aqc_opc_set_wol_filter = 0x0120,
i40e_aqc_opc_get_wake_reason = 0x0121,
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
i40e_aqc_opc_remove_statistics = 0x0202,
i40e_aqc_opc_set_port_parameters = 0x0203,
i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
i40e_aqc_opc_set_switch_config = 0x0205,
i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
i40e_aqc_opc_add_vsi = 0x0210,
i40e_aqc_opc_update_vsi_parameters = 0x0211,
i40e_aqc_opc_get_vsi_parameters = 0x0212,
i40e_aqc_opc_add_pv = 0x0220,
i40e_aqc_opc_update_pv_parameters = 0x0221,
i40e_aqc_opc_get_pv_parameters = 0x0222,
i40e_aqc_opc_add_veb = 0x0230,
i40e_aqc_opc_update_veb_parameters = 0x0231,
i40e_aqc_opc_get_veb_parameters = 0x0232,
i40e_aqc_opc_delete_element = 0x0243,
i40e_aqc_opc_add_macvlan = 0x0250,
i40e_aqc_opc_remove_macvlan = 0x0251,
i40e_aqc_opc_add_vlan = 0x0252,
i40e_aqc_opc_remove_vlan = 0x0253,
i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
i40e_aqc_opc_add_tag = 0x0255,
i40e_aqc_opc_remove_tag = 0x0256,
i40e_aqc_opc_add_multicast_etag = 0x0257,
i40e_aqc_opc_remove_multicast_etag = 0x0258,
i40e_aqc_opc_update_tag = 0x0259,
i40e_aqc_opc_add_control_packet_filter = 0x025A,
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
/* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
i40e_aqc_opc_query_vsi_bw_config = 0x0408,
i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
i40e_aqc_opc_query_port_ets_config = 0x0419,
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
i40e_aqc_opc_set_mac_config = 0x0603,
i40e_aqc_opc_set_link_restart_an = 0x0605,
i40e_aqc_opc_get_link_status = 0x0607,
i40e_aqc_opc_set_phy_int_mask = 0x0613,
i40e_aqc_opc_get_local_advt_reg = 0x0614,
i40e_aqc_opc_set_local_advt_reg = 0x0615,
i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
i40e_aqc_opc_run_phy_activity = 0x0626,
i40e_aqc_opc_set_phy_register = 0x0628,
i40e_aqc_opc_get_phy_register = 0x0629,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
i40e_aqc_opc_nvm_erase = 0x0702,
i40e_aqc_opc_nvm_update = 0x0703,
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
i40e_aqc_opc_oem_post_update = 0x0720,
i40e_aqc_opc_thermal_sensor = 0x0721,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
i40e_aqc_opc_send_msg_to_vf = 0x0802,
i40e_aqc_opc_send_msg_to_peer = 0x0803,
/* alternate structure */
i40e_aqc_opc_alternate_write = 0x0900,
i40e_aqc_opc_alternate_write_indirect = 0x0901,
i40e_aqc_opc_alternate_read = 0x0902,
i40e_aqc_opc_alternate_read_indirect = 0x0903,
i40e_aqc_opc_alternate_write_done = 0x0904,
i40e_aqc_opc_alternate_set_mode = 0x0905,
i40e_aqc_opc_alternate_clear_port = 0x0906,
/* LLDP commands */
i40e_aqc_opc_lldp_get_mib = 0x0A00,
i40e_aqc_opc_lldp_update_mib = 0x0A01,
i40e_aqc_opc_lldp_add_tlv = 0x0A02,
i40e_aqc_opc_lldp_update_tlv = 0x0A03,
i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
i40e_aqc_opc_lldp_stop = 0x0A05,
i40e_aqc_opc_lldp_start = 0x0A06,
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
i40e_aqc_opc_set_rss_key = 0x0B02,
i40e_aqc_opc_set_rss_lut = 0x0B03,
i40e_aqc_opc_get_rss_key = 0x0B04,
i40e_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
/* OEM commands */
i40e_aqc_opc_oem_parameter_change = 0xFE00,
i40e_aqc_opc_oem_device_status_change = 0xFE01,
i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
};
/* command structures and indirect data structures */
/* Structure naming conventions:
* - no suffix for direct command descriptor structures
* - _data for indirect sent data
* - _resp for indirect return data (data which is both will use _data)
* - _completion for direct return data
* - _element_ for repeated elements (may also be _data or _resp)
*
* Command structures are expected to overlay the params.raw member of the basic
* descriptor, and as such cannot exceed 16 bytes in length.
*/
/* This macro is used to generate a compilation error if a structure
* is not exactly the correct length. It gives a divide by zero error if the
* structure is not of the correct size, otherwise it creates an enum that is
* never used.
*/
#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
{ i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
/* This macro is used extensively to ensure that command structures are 16
* bytes in length as they have to map to the raw array of that size.
*/
#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
/* Queue Shutdown (direct 0x0003) */
struct i40e_aqc_queue_shutdown {
__le32 driver_unloading;
#define I40E_AQ_DRIVER_UNLOADING 0x1
u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
struct i40e_aqc_vsi_properties_data {
/* first 96 byte are written by SW */
__le16 valid_sections;
#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
/* switch section */
__le16 switch_id; /* 12bit id combined with flags below */
#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
u8 sw_reserved[2];
/* security section */
u8 sec_flags;
#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
u8 sec_reserved;
/* VLAN section */
__le16 pvid; /* VLANS include priority bits */
__le16 fcoe_pvid;
u8 port_vlan_flags;
#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
I40E_AQ_VSI_PVLAN_MODE_SHIFT)
#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
u8 pvlan_reserved[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
__le32 egress_table; /* same defines as for ingress table */
/* cascaded PV section */
__le16 cas_pv_tag;
u8 cas_pv_flags;
#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
u8 cas_pv_reserved;
/* queue mapping section */
__le16 mapping_flags;
#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
__le16 queue_mapping[16];
#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
__le16 tc_mapping[8];
#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
u8 sched_reserved;
/* outer up section */
__le32 outer_up_table; /* same structure and defines as ingress tbl */
u8 cmd_reserved[8];
/* last 32 bytes are written by FW */
__le16 qs_handle[8];
#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
__le16 stat_counter_idx;
__le16 sched_id;
u8 resp_reserved[12];
};
I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
/* Get VEB Parameters (direct 0x0232)
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_veb_parameters_completion {
__le16 seid;
__le16 switch_id;
__le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
__le16 statistic_index;
__le16 vebs_used;
__le16 vebs_free;
u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
};
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
*/
struct i40e_aqc_pf_vf_message {
__le32 id;
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
struct i40e_aqc_get_set_rss_key {
#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
__le16 vsi_id;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
struct i40e_aqc_get_set_rss_key_data {
u8 standard_rss_key[0x28];
u8 extended_hash_key[0xc];
};
I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
struct i40e_aqc_get_set_rss_lut {
#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
__le16 flags;
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
#endif /* _I40E_ADMINQ_CMD_H_ */

View file

@ -525,7 +525,6 @@ i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
* packet type.
@ -891,135 +890,6 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
I40E_PTT_UNUSED_ENTRY(255)
};
/**
* i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: ptr to register value
* @cmd_details: pointer to command details structure or NULL
*
* Use the firmware to read the Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
i40e_status status;
if (!reg_val)
return I40E_ERR_PARAM;
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_rx_ctl_reg_read);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (status == 0)
*reg_val = le32_to_cpu(cmd_resp->value);
return status;
}
/**
* i40evf_read_rx_ctl - read from an Rx control register
* @hw: pointer to the hw struct
* @reg_addr: register address
**/
u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
{
i40e_status status = 0;
bool use_register;
int retry = 5;
u32 val = 0;
use_register = (((hw->aq.api_maj_ver == 1) &&
(hw->aq.api_min_ver < 5)) ||
(hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40evf_aq_rx_ctl_read_register(hw, reg_addr,
&val, NULL);
if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
}
}
/* if the AQ access failed, try the old-fashioned way */
if (status || use_register)
val = rd32(hw, reg_addr);
return val;
}
/**
* i40evf_aq_rx_ctl_write_register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: register value
* @cmd_details: pointer to command details structure or NULL
*
* Use the firmware to write to an Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_rx_ctl_reg_write);
cmd->address = cpu_to_le32(reg_addr);
cmd->value = cpu_to_le32(reg_val);
status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40evf_write_rx_ctl - write to an Rx control register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: register value
**/
void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
{
i40e_status status = 0;
bool use_register;
int retry = 5;
use_register = (((hw->aq.api_maj_ver == 1) &&
(hw->aq.api_min_ver < 5)) ||
(hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40evf_aq_rx_ctl_write_register(hw, reg_addr,
reg_val, NULL);
if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
}
}
/* if the AQ access failed, try the old-fashioned way */
if (status || use_register)
wr32(hw, reg_addr, reg_val);
}
/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
@ -1110,211 +980,3 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
}
/**
* i40evf_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @track_id: package tracking id
* @error_offset: returns error offset
* @error_info: returns error information
* @cmd_details: pointer to command details structure or NULL
**/
enum
i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_write_personalization_profile);
desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
if (buff_size > I40E_AQ_LARGE_BUF)
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
cmd->profile_track_id = cpu_to_le32(track_id);
status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
*error_info = le32_to_cpu(resp->error_info);
}
return status;
}
/**
* i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @flags: AdminQ command flags
* @cmd_details: pointer to command details structure or NULL
**/
enum
i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_applied_profiles *cmd =
(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_personalization_profile_list);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
cmd->flags = flags;
status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
return status;
}
/**
* i40evf_find_segment_in_package
* @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
* @pkg_hdr: pointer to the package header to be searched
*
* This function searches a package file for a particular segment type. On
* success it returns a pointer to the segment header, otherwise it will
* return NULL.
**/
struct i40e_generic_seg_header *
i40evf_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_hdr)
{
struct i40e_generic_seg_header *segment;
u32 i;
/* Search all package segments for the requested segment type */
for (i = 0; i < pkg_hdr->segment_count; i++) {
segment =
(struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
pkg_hdr->segment_offset[i]);
if (segment->type == segment_type)
return segment;
}
return NULL;
}
/**
* i40evf_write_profile
* @hw: pointer to the hardware structure
* @profile: pointer to the profile segment of the package to be downloaded
* @track_id: package tracking id
*
* Handles the download of a complete package.
*/
enum i40e_status_code
i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id)
{
i40e_status status = 0;
struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
u32 dev_cnt;
u32 vendor_dev_id;
u32 *nvm;
u32 section_size = 0;
u32 offset = 0, info = 0;
u32 i;
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
vendor_dev_id = profile->device_table[i].vendor_dev_id;
if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
if (hw->device_id == (vendor_dev_id & 0xFFFF))
break;
}
if (i == dev_cnt) {
i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
nvm = (u32 *)&profile->device_table[dev_cnt];
sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
for (i = 0; i < sec_tbl->section_count; i++) {
sec = (struct i40e_profile_section_header *)((u8 *)profile +
sec_tbl->section_offset[i]);
/* Skip 'AQ', 'note' and 'name' sections */
if (sec->section.type != SECTION_TYPE_MMIO)
continue;
section_size = sec->section.size +
sizeof(struct i40e_profile_section_header);
/* Write profile */
status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
"Failed to write profile: offset %d, info %d",
offset, info);
break;
}
}
return status;
}
/**
* i40evf_add_pinfo_to_list
* @hw: pointer to the hardware structure
* @profile: pointer to the profile segment of the package
* @profile_info_sec: buffer for information section
* @track_id: package tracking id
*
* Register a profile to the list of loaded profiles.
*/
enum i40e_status_code
i40evf_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
i40e_status status = 0;
struct i40e_profile_section_header *sec = NULL;
struct i40e_profile_info *pinfo;
u32 offset = 0, info = 0;
sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
sec->data_end = sizeof(struct i40e_profile_section_header) +
sizeof(struct i40e_profile_info);
sec->section.type = SECTION_TYPE_INFO;
sec->section.offset = sizeof(struct i40e_profile_section_header);
sec->section.size = sizeof(struct i40e_profile_info);
pinfo = (struct i40e_profile_info *)(profile_info_sec +
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
pinfo->op = I40E_DDP_ADD_TRACKID;
memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
}

View file

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
#include "i40e_type.h"
#include "i40e_alloc.h"
#include <linux/avf/virtchnl.h>
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
* mostly because they are needed even before the init
* has happened and will assist in the early SW and FW
* setup.
*/
/* adminq functions */
i40e_status i40evf_init_adminq(struct i40e_hw *hw);
i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
bool i40evf_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
bool i40evf_check_asq_alive(struct i40e_hw *hw);
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{
return i40evf_ptype_lookup[ptype];
}
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */

View file

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
#define I40E_VF_ARQH1_ARQH_SHIFT 0
#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
#define I40E_VFQF_HKEY_MAX_INDEX 12
#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define I40E_VFQF_HLUT_MAX_INDEX 15
#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
#endif /* _I40E_REGISTER_H_ */

View file

@ -1062,7 +1062,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
if (ring->netdev->features & NETIF_F_RXHASH)
if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {

View file

@ -0,0 +1,719 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
#include "i40e_status.h"
#include "i40e_osdep.h"
#include "i40e_register.h"
#include "i40e_adminq.h"
#include "i40e_devids.h"
#define I40E_RXQ_CTX_DBUFF_SHIFT 7
/* I40E_MASK is a macro used on 32 bit registers */
#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5
/* forward declaration */
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
/* bitfields for Tx queue mapping in QTX_CTL */
#define I40E_QTX_CTL_VF_QUEUE 0x0
#define I40E_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2
/* debug masks - set these bits in hw->debug_mask to control output */
enum i40e_debug_mask {
I40E_DEBUG_INIT = 0x00000001,
I40E_DEBUG_RELEASE = 0x00000002,
I40E_DEBUG_LINK = 0x00000010,
I40E_DEBUG_PHY = 0x00000020,
I40E_DEBUG_HMC = 0x00000040,
I40E_DEBUG_NVM = 0x00000080,
I40E_DEBUG_LAN = 0x00000100,
I40E_DEBUG_FLOW = 0x00000200,
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
I40E_DEBUG_PACKAGE = 0x00002000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
I40E_DEBUG_AQ_COMMAND = 0x06000000,
I40E_DEBUG_AQ = 0x0F000000,
I40E_DEBUG_USER = 0xF0000000,
I40E_DEBUG_ALL = 0xFFFFFFFF
};
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
* flexibility of using the same main driver code but adapting to slightly
* different hardware needs as new parts are developed. For this architecture,
* the Firmware and AdminQ are intended to insulate the driver from most of the
* future changes, but these structures will also do part of the job.
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_X722,
I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
enum i40e_vsi_type {
I40E_VSI_MAIN = 0,
I40E_VSI_VMDQ1 = 1,
I40E_VSI_VMDQ2 = 2,
I40E_VSI_CTRL = 3,
I40E_VSI_FCOE = 4,
I40E_VSI_MIRROR = 5,
I40E_VSI_SRIOV = 6,
I40E_VSI_FDIR = 7,
I40E_VSI_TYPE_UNKNOWN
};
enum i40e_queue_type {
I40E_QUEUE_TYPE_RX = 0,
I40E_QUEUE_TYPE_TX,
I40E_QUEUE_TYPE_PE_CEQ,
I40E_QUEUE_TYPE_UNKNOWN
};
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
bool dcb;
bool fcoe;
u32 num_vsis;
u32 num_rx_qp;
u32 num_tx_qp;
u32 base_queue;
u32 num_msix_vectors_vf;
};
struct i40e_mac_info {
enum i40e_mac_type type;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
u8 san_addr[ETH_ALEN];
u16 max_fcoeq;
};
/* PCI bus types */
enum i40e_bus_type {
i40e_bus_type_unknown = 0,
i40e_bus_type_pci,
i40e_bus_type_pcix,
i40e_bus_type_pci_express,
i40e_bus_type_reserved
};
/* PCI bus speeds */
enum i40e_bus_speed {
i40e_bus_speed_unknown = 0,
i40e_bus_speed_33 = 33,
i40e_bus_speed_66 = 66,
i40e_bus_speed_100 = 100,
i40e_bus_speed_120 = 120,
i40e_bus_speed_133 = 133,
i40e_bus_speed_2500 = 2500,
i40e_bus_speed_5000 = 5000,
i40e_bus_speed_8000 = 8000,
i40e_bus_speed_reserved
};
/* PCI bus widths */
enum i40e_bus_width {
i40e_bus_width_unknown = 0,
i40e_bus_width_pcie_x1 = 1,
i40e_bus_width_pcie_x2 = 2,
i40e_bus_width_pcie_x4 = 4,
i40e_bus_width_pcie_x8 = 8,
i40e_bus_width_32 = 32,
i40e_bus_width_64 = 64,
i40e_bus_width_reserved
};
/* Bus parameters */
struct i40e_bus_info {
enum i40e_bus_speed speed;
enum i40e_bus_width width;
enum i40e_bus_type type;
u16 func;
u16 device;
u16 lan_id;
u16 bus_id;
};
#define I40E_MAX_TRAFFIC_CLASS 8
#define I40E_MAX_USER_PRIORITY 8
/* Port hardware description */
struct i40e_hw {
u8 __iomem *hw_addr;
void *back;
/* subsystem structs */
struct i40e_mac_info mac;
struct i40e_bus_info bus;
/* pci info */
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
u16 subsystem_vendor_id;
u8 revision_id;
/* capabilities for entire device and PCI func */
struct i40e_hw_capabilities dev_caps;
/* Admin Queue info */
struct i40e_adminq_info aq;
/* debug mask */
u32 debug_mask;
char err_str[16];
};
static inline bool i40e_is_vf(struct i40e_hw *hw)
{
return (hw->mac.type == I40E_MAC_VF ||
hw->mac.type == I40E_MAC_X722_VF);
}
struct i40e_driver_version {
u8 major_version;
u8 minor_version;
u8 build_version;
u8 subbuild_version;
u8 driver_string[32];
};
/* RX Descriptors */
union i40e_16byte_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
struct {
union {
__le16 mirroring_status;
__le16 fcoe_ctx_id;
} mirr_fcoe;
__le16 l2tag1;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
__le32 fd_id; /* Flow director filter id */
__le32 fcoe_param; /* FCoE DDP Context id */
} hi_dword;
} qword0;
struct {
/* ext status/error/pktype/length */
__le64 status_error_len;
} qword1;
} wb; /* writeback */
};
union i40e_32byte_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_buffer_addr is DD bit */
__le64 rsvd1;
__le64 rsvd2;
} read;
struct {
struct {
struct {
union {
__le16 mirroring_status;
__le16 fcoe_ctx_id;
} mirr_fcoe;
__le16 l2tag1;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
__le32 fcoe_param; /* FCoE DDP Context id */
/* Flow director filter id in case of
* Programming status desc WB
*/
__le32 fd_id;
} hi_dword;
} qword0;
struct {
/* status/error/pktype/length */
__le64 status_error_len;
} qword1;
struct {
__le16 ext_status; /* extended status */
__le16 rsvd;
__le16 l2tag2_1;
__le16 l2tag2_2;
} qword2;
struct {
union {
__le32 flex_bytes_lo;
__le32 pe_status;
} lo_dword;
union {
__le32 flex_bytes_hi;
__le32 fd_id;
} hi_dword;
} qword3;
} wb; /* writeback */
};
enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0,
I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
/* Note: Bit 8 is reserved in X710 and XL710 */
I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
/* Note: For non-tunnel packets INT_UDP_0 is the right status for
* UDP header
*/
I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
#define I40E_RXD_QW1_STATUS_SHIFT 0
#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
I40E_RX_DESC_FLTSTAT_RSV = 2,
I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
};
#define I40E_RXD_QW1_ERROR_SHIFT 19
#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
enum i40e_rx_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
I40E_RX_DESC_ERROR_L3L4E_FC = 2,
I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
};
#define I40E_RXD_QW1_PTYPE_SHIFT 30
#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
/* Packet type non-ip values */
enum i40e_rx_l2_ptype {
I40E_RX_PTYPE_L2_RESERVED = 0,
I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
I40E_RX_PTYPE_L2_ARP = 11,
I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
};
struct i40e_rx_ptype_decoded {
u32 ptype:8;
u32 known:1;
u32 outer_ip:1;
u32 outer_ip_ver:1;
u32 outer_frag:1;
u32 tunnel_type:3;
u32 tunnel_end_prot:2;
u32 tunnel_end_frag:1;
u32 inner_prot:4;
u32 payload_layer:3;
};
enum i40e_rx_ptype_outer_ip {
I40E_RX_PTYPE_OUTER_L2 = 0,
I40E_RX_PTYPE_OUTER_IP = 1
};
enum i40e_rx_ptype_outer_ip_ver {
I40E_RX_PTYPE_OUTER_NONE = 0,
I40E_RX_PTYPE_OUTER_IPV4 = 0,
I40E_RX_PTYPE_OUTER_IPV6 = 1
};
enum i40e_rx_ptype_outer_fragmented {
I40E_RX_PTYPE_NOT_FRAG = 0,
I40E_RX_PTYPE_FRAG = 1
};
enum i40e_rx_ptype_tunnel_type {
I40E_RX_PTYPE_TUNNEL_NONE = 0,
I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
};
enum i40e_rx_ptype_tunnel_end_prot {
I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
};
enum i40e_rx_ptype_inner_prot {
I40E_RX_PTYPE_INNER_PROT_NONE = 0,
I40E_RX_PTYPE_INNER_PROT_UDP = 1,
I40E_RX_PTYPE_INNER_PROT_TCP = 2,
I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
};
enum i40e_rx_ptype_payload_layer {
I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
};
enum i40e_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
};
#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
enum i40e_rx_prog_status_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
};
enum i40e_rx_prog_status_desc_prog_id_masks {
I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
};
enum i40e_rx_prog_status_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
};
/* TX Descriptor */
struct i40e_tx_desc {
__le64 buffer_addr; /* Address of descriptor's data buf */
__le64 cmd_type_offset_bsz;
};
#define I40E_TXD_QW1_DTYPE_SHIFT 0
#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
enum i40e_tx_desc_dtype_value {
I40E_TX_DESC_DTYPE_DATA = 0x0,
I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
};
#define I40E_TXD_QW1_CMD_SHIFT 4
#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
enum i40e_tx_desc_cmd_bits {
I40E_TX_DESC_CMD_EOP = 0x0001,
I40E_TX_DESC_CMD_RS = 0x0002,
I40E_TX_DESC_CMD_ICRC = 0x0004,
I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
I40E_TX_DESC_CMD_DUMMY = 0x0010,
I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
I40E_TX_DESC_CMD_FCOET = 0x0080,
I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
};
#define I40E_TXD_QW1_OFFSET_SHIFT 16
#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
I40E_TXD_QW1_OFFSET_SHIFT)
enum i40e_tx_desc_length_fields {
/* Note: These are predefined bit offsets */
I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
};
#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
#define I40E_TXD_QW1_L2TAG1_SHIFT 48
#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
/* Context descriptors */
struct i40e_tx_context_desc {
__le32 tunneling_params;
__le16 l2tag2;
__le16 rsvd;
__le64 type_cmd_tso_mss;
};
#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
enum i40e_tx_ctx_desc_cmd_bits {
I40E_TX_CTX_DESC_TSO = 0x01,
I40E_TX_CTX_DESC_TSYN = 0x02,
I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
I40E_TX_CTX_DESC_SWPE = 0x40
};
#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
I40E_TXD_CTX_QW1_MSS_SHIFT)
#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
enum i40e_tx_ctx_desc_eipt_offload {
I40E_TX_CTX_EXT_IP_NONE = 0x0,
I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
I40E_TX_CTX_EXT_IP_IPV4 = 0x3
};
#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
I40E_TXD_CTX_QW0_NATLEN_SHIFT)
#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
/* Note: Values 0-28 are reserved for future use.
* Value 29, 30, 32 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
/* Note: Values 37-38 are reserved for future use.
* Value 39, 40, 42 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
/* Note: Value 47 is reserved for future use */
I40E_FILTER_PCTYPE_FCOE_OX = 48,
I40E_FILTER_PCTYPE_FCOE_RX = 49,
I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
/* Note: Values 51-62 are reserved for future use */
I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
};
struct i40e_vsi_context {
u16 seid;
u16 uplink_seid;
u16 vsi_number;
u16 vsis_allocated;
u16 vsis_unallocated;
u16 flags;
u8 pf_num;
u8 vf_num;
u8 connection_type;
struct i40e_aqc_vsi_properties_data info;
};
struct i40e_veb_context {
u16 seid;
u16 uplink_seid;
u16 veb_number;
u16 vebs_allocated;
u16 vebs_unallocated;
u16 flags;
struct i40e_aqc_get_veb_parameters_completion info;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct i40e_eth_stats {
u64 rx_bytes; /* gorc */
u64 rx_unicast; /* uprc */
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
u64 tx_multicast; /* mptc */
u64 tx_broadcast; /* bptc */
u64 tx_discards; /* tdpc */
u64 tx_errors; /* tepc */
};
#endif /* _I40E_TYPE_H_ */

View file

@ -17,20 +17,20 @@ static int i40evf_close(struct net_device *netdev);
char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) 40-10 Gigabit Virtual Function Network Driver";
"Intel(R) Ethernet Adaptive Virtual Function Network Driver";
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 3
#define DRV_VERSION_MINOR 2
#define DRV_VERSION_BUILD 2
#define DRV_VERSION_BUILD 3
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
DRV_KERN
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2015 Intel Corporation.";
"Copyright (c) 2013 - 2018 Intel Corporation.";
/* i40evf_pci_tbl - PCI Device ID Table
*
@ -51,6 +51,7 @@ static const struct pci_device_id i40evf_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
MODULE_ALIAS("i40evf");
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
MODULE_LICENSE("GPL");

View file

@ -1362,8 +1362,15 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
memcpy(adapter->vf_res, msg, min(msglen, len));
i40evf_validate_num_queues(adapter);
i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
/* restore current mac address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
if (is_zero_ether_addr(adapter->hw.mac.addr)) {
/* restore current mac address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
/* refresh current mac address if changed */
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
i40evf_process_config(adapter);
}
break;

View file

@ -3700,9 +3700,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
rtnl_lock();
igb_disable_sriov(pdev);
rtnl_unlock();
#endif
unregister_netdev(netdev);

View file

@ -1070,7 +1070,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
netdev);
if (err)
goto out;
goto free_irq_tx;
adapter->rx_ring->itr_register = E1000_EITR(vector);
adapter->rx_ring->itr_val = adapter->current_itr;
@ -1079,10 +1079,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector,
igbvf_msix_other, 0, netdev->name, netdev);
if (err)
goto out;
goto free_irq_rx;
igbvf_configure_msix(adapter);
return 0;
free_irq_rx:
free_irq(adapter->msix_entries[--vector].vector, netdev);
free_irq_tx:
free_irq(adapter->msix_entries[--vector].vector, netdev);
out:
return err;
}

View file

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */
#include <linux/etherdevice.h>
#include "vf.h"
static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
/* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
if (msgbuf[0] == (E1000_VF_RESET |
E1000_VT_MSGTYPE_ACK))
switch (msgbuf[0]) {
case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
else
break;
case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
eth_zero_addr(hw->mac.perm_addr);
break;
default:
ret_val = -E1000_ERR_MAC_INIT;
}
}
}

View file

@ -542,6 +542,20 @@ struct mvneta_rx_desc {
};
#endif
enum mvneta_tx_buf_type {
MVNETA_TYPE_SKB,
MVNETA_TYPE_XDP_TX,
MVNETA_TYPE_XDP_NDO,
};
struct mvneta_tx_buf {
enum mvneta_tx_buf_type type;
union {
struct xdp_frame *xdpf;
struct sk_buff *skb;
};
};
struct mvneta_tx_queue {
/* Number of this TX queue, in the range 0-7 */
u8 id;
@ -557,8 +571,8 @@ struct mvneta_tx_queue {
int tx_stop_threshold;
int tx_wake_threshold;
/* Array of transmitted skb */
struct sk_buff **tx_skb;
/* Array of transmitted buffers */
struct mvneta_tx_buf *buf;
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@ -1767,14 +1781,9 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
int i;
for (i = 0; i < num; i++) {
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
struct mvneta_tx_desc *tx_desc = txq->descs +
txq->txq_get_index;
struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
if (skb) {
bytes_compl += skb->len;
pkts_compl++;
}
mvneta_txq_inc_get(txq);
@ -1782,9 +1791,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
dma_unmap_single(pp->dev->dev.parent,
tx_desc->buf_phys_addr,
tx_desc->data_size, DMA_TO_DEVICE);
if (!skb)
if (!buf->skb)
continue;
dev_kfree_skb_any(skb);
bytes_compl += buf->skb->len;
pkts_compl++;
dev_kfree_skb_any(buf->skb);
}
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
@ -2238,16 +2250,19 @@ static inline void
mvneta_tso_put_hdr(struct sk_buff *skb,
struct mvneta_port *pp, struct mvneta_tx_queue *txq)
{
struct mvneta_tx_desc *tx_desc;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
txq->tx_skb[txq->txq_put_index] = NULL;
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = hdr_len;
tx_desc->command = mvneta_skb_tx_csum(pp, skb);
tx_desc->command |= MVNETA_TXD_F_DESC;
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
txq->txq_put_index * TSO_HEADER_SIZE;
buf->type = MVNETA_TYPE_SKB;
buf->skb = NULL;
mvneta_txq_inc_put(txq);
}
@ -2256,6 +2271,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
struct sk_buff *skb, char *data, int size,
bool last_tcp, bool is_last)
{
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
@ -2269,7 +2285,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
}
tx_desc->command = 0;
txq->tx_skb[txq->txq_put_index] = NULL;
buf->type = MVNETA_TYPE_SKB;
buf->skb = NULL;
if (last_tcp) {
/* last descriptor in the TCP packet */
@ -2277,7 +2294,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
/* last descriptor in SKB */
if (is_last)
txq->tx_skb[txq->txq_put_index] = skb;
buf->skb = skb;
}
mvneta_txq_inc_put(txq);
return 0;
@ -2362,6 +2379,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
int i, nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nr_frags; i++) {
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = page_address(frag->page.p) + frag->page_offset;
@ -2381,12 +2399,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
if (i == nr_frags - 1) {
/* Last descriptor */
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
txq->tx_skb[txq->txq_put_index] = skb;
buf->skb = skb;
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
txq->tx_skb[txq->txq_put_index] = NULL;
buf->skb = NULL;
}
buf->type = MVNETA_TYPE_SKB;
mvneta_txq_inc_put(txq);
}
@ -2414,6 +2433,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
struct mvneta_port *pp = netdev_priv(dev);
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
int len = skb->len;
int frags = 0;
@ -2446,16 +2466,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
}
buf->type = MVNETA_TYPE_SKB;
if (frags == 1) {
/* First and Last descriptor */
tx_cmd |= MVNETA_TXD_FLZ_DESC;
tx_desc->command = tx_cmd;
txq->tx_skb[txq->txq_put_index] = skb;
buf->skb = skb;
mvneta_txq_inc_put(txq);
} else {
/* First but not Last */
tx_cmd |= MVNETA_TXD_F_DESC;
txq->tx_skb[txq->txq_put_index] = NULL;
buf->skb = NULL;
mvneta_txq_inc_put(txq);
tx_desc->command = tx_cmd;
/* Continue with other skb fragments */
@ -3000,9 +3021,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->last_desc = txq->size - 1;
txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
GFP_KERNEL);
if (!txq->tx_skb) {
txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
if (!txq->buf) {
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@ -3014,7 +3034,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->size * TSO_HEADER_SIZE,
&txq->tso_hdrs_phys, GFP_KERNEL);
if (!txq->tso_hdrs) {
kfree(txq->tx_skb);
kfree(txq->buf);
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@ -3069,7 +3089,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
{
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
kfree(txq->tx_skb);
kfree(txq->buf);
if (txq->tso_hdrs)
dma_free_coherent(pp->dev->dev.parent,

View file

@ -109,12 +109,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets->ets_cap; i++) {
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
}
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets->ets_cap; i++) {
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
if (err)
return err;

View file

@ -62,6 +62,8 @@ mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
if (!multi)
return NULL;
tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
}

View file

@ -255,7 +255,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
*/
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
if (!laddr) {
if (dma_mapping_error(lp->device, laddr)) {
pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@ -473,7 +473,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
if (!*new_addr) {
if (dma_mapping_error(lp->device, *new_addr)) {
dev_kfree_skb(*new_skb);
*new_skb = NULL;
return false;

View file

@ -4403,6 +4403,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
}
vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
if (!vf)
return -EINVAL;
vport_id = vf->vport_id;
return qed_configure_vport_wfq(cdev, vport_id, rate);
@ -5142,7 +5145,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
/* Validate that the VF has a configured vport */
vf = qed_iov_get_vf_info(hwfn, i, true);
if (!vf->vport_instance)
if (!vf || !vf->vport_instance)
continue;
memset(&params, 0, sizeof(params));

View file

@ -629,7 +629,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
int i, err, ring;
if (dev->flags & QLCNIC_NEED_FLR) {
pci_reset_function(dev->pdev);
err = pci_reset_function(dev->pdev);
if (err) {
dev_err(&dev->pdev->dev,
"Adapter reset failed (%d). Please reboot\n",
err);
return err;
}
dev->flags &= ~QLCNIC_NEED_FLR;
}

View file

@ -752,9 +752,15 @@ static int emac_remove(struct platform_device *pdev)
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
struct emac_adapter *adpt = netdev_priv(netdev);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
unregister_netdev(netdev);
netif_napi_del(&adpt->rx_q.napi);
free_irq(adpt->irq.irq, &adpt->irq);
cancel_work_sync(&adpt->work_thread);
emac_clks_teardown(adpt);
put_device(&adpt->phydev->mdio.dev);

View file

@ -4505,7 +4505,7 @@ static int niu_alloc_channels(struct niu *np)
err = niu_rbr_fill(np, rp, GFP_KERNEL);
if (err)
return err;
goto out_err;
}
tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),

View file

@ -330,15 +330,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
/* set up the hardware pointers in each descriptor */
for (i = 0; i < no; i++, descr++) {
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
descr->bus_addr =
dma_map_single(ctodev(card), descr,
GELIC_DESCR_SIZE,
DMA_BIDIRECTIONAL);
dma_addr_t cpu_addr;
if (!descr->bus_addr)
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
cpu_addr = dma_map_single(ctodev(card), descr,
GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(ctodev(card), cpu_addr))
goto iommu_error;
descr->bus_addr = cpu_to_be32(cpu_addr);
descr->next = descr + 1;
descr->prev = descr - 1;
}
@ -378,28 +380,30 @@ iommu_error:
*
* allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
* Activate the descriptor state-wise
*
* Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
* must be a multiple of GELIC_NET_RXBUF_ALIGN.
*/
static int gelic_descr_prepare_rx(struct gelic_card *card,
struct gelic_descr *descr)
{
static const unsigned int rx_skb_size =
ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
GELIC_NET_RXBUF_ALIGN - 1;
dma_addr_t cpu_addr;
int offset;
unsigned int bufsize;
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
/* we need to round up the buffer size to a multiple of 128 */
bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
/* and we need to have it 128 byte aligned, therefore we allocate a
* bit more */
descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
if (!descr->skb) {
descr->buf_addr = 0; /* tell DMAC don't touch memory */
dev_info(ctodev(card),
"%s:allocate skb failed !!\n", __func__);
return -ENOMEM;
}
descr->buf_size = cpu_to_be32(bufsize);
descr->buf_size = cpu_to_be32(rx_skb_size);
descr->dmac_cmd_status = 0;
descr->result_size = 0;
descr->valid_size = 0;
@ -410,11 +414,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (offset)
skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
/* io-mmu-map the skb */
descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
descr->skb->data,
GELIC_NET_MAX_MTU,
DMA_FROM_DEVICE));
if (!descr->buf_addr) {
cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
descr->buf_addr = cpu_to_be32(cpu_addr);
if (dma_mapping_error(ctodev(card), cpu_addr)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
dev_info(ctodev(card),
@ -794,7 +797,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
if (!buf) {
if (dma_mapping_error(ctodev(card), buf)) {
dev_err(ctodev(card),
"dma map 2 failed (%p, %i). Dropping packet\n",
skb->data, skb->len);
@ -930,7 +933,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
data_error = be32_to_cpu(descr->data_error);
/* unmap skb buffer */
dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
GELIC_NET_MAX_MTU,
GELIC_NET_MAX_FRAME,
DMA_FROM_DEVICE);
skb_put(skb, be32_to_cpu(descr->valid_size)?

View file

@ -32,8 +32,9 @@
#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */
#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */
#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
#define GELIC_NET_MAX_FRAME 2312
#define GELIC_NET_MAX_MTU 2294
#define GELIC_NET_MIN_MTU 64
#define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ

View file

@ -503,6 +503,11 @@ static void
xirc2ps_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct local_info *local = netdev_priv(dev);
netif_carrier_off(dev);
netif_tx_disable(dev);
cancel_work_sync(&local->tx_timeout_task);
dev_dbg(&link->dev, "detach\n");

Some files were not shown because too many files have changed in this diff Show more