]> git.droids-corp.org - dpdk.git/commitdiff
net/ngbe: support VLAN offload and VLAN filter
authorJiawen Wu <jiawenwu@trustnetic.com>
Thu, 21 Oct 2021 09:50:05 +0000 (17:50 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 29 Oct 2021 22:53:19 +0000 (00:53 +0200)
Support to set VLAN and QinQ offload, and filter of a VLAN tag
identifier.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
doc/guides/nics/features/ngbe.ini
doc/guides/nics/ngbe.rst
doc/guides/rel_notes/release_21_11.rst
drivers/net/ngbe/base/ngbe_dummy.h
drivers/net/ngbe/base/ngbe_hw.c
drivers/net/ngbe/base/ngbe_hw.h
drivers/net/ngbe/base/ngbe_type.h
drivers/net/ngbe/ngbe_ethdev.c
drivers/net/ngbe/ngbe_ethdev.h
drivers/net/ngbe/ngbe_rxtx.c
drivers/net/ngbe/ngbe_rxtx.h

index 053dff3a2fcbb37b5f0ee5cf6cab3225e26c1eea..b63a5f23c27024af7d841cca62b86675a23d33cb 100644 (file)
@@ -11,7 +11,10 @@ Queue start/stop     = Y
 Burst mode info      = Y
 Scattered Rx         = Y
 TSO                  = Y
+VLAN filter          = Y
 CRC offload          = Y
+VLAN offload         = Y
+QinQ offload         = Y
 L3 checksum offload  = Y
 L4 checksum offload  = Y
 Inner L3 checksum    = Y
index 702a4550418a402c4ae55cc7b7b75e4389b1ebc0..be70b0f51c948a4b76dd790b3d30b6af5834c895 100644 (file)
@@ -11,8 +11,10 @@ for Wangxun 1 Gigabit Ethernet NICs.
 Features
 --------
 
+- VLAN filtering
 - Packet type information
 - Checksum offload
+- VLAN/QinQ stripping and inserting
 - TSO offload
 - Jumbo frames
 - Link state information
index 43bb211f363f6ea8abf40c155b010a0cc9eecce2..cf95e67ac2d56a5b03108a658786d53d32b26843 100644 (file)
@@ -218,6 +218,7 @@ New Features
 * **Updated Wangxun ngbe driver.**
 
   * Added offloads and packet type on RxTx.
+  * Added VLAN filters.
 
 * **Updated Marvell cnxk crypto PMD.**
 
index 8863acef0d1c5caa4f2c52fb8b3401696ac5595a..fe0596887cbbb3ac5198d35c7a91b4cdd52fdbee 100644 (file)
@@ -118,6 +118,10 @@ static inline s32 ngbe_mac_init_rx_addrs_dummy(struct ngbe_hw *TUP0)
 {
        return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_mac_clear_vfta_dummy(struct ngbe_hw *TUP0)
+{
+       return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_mac_init_thermal_ssth_dummy(struct ngbe_hw *TUP0)
 {
        return NGBE_ERR_OPS_DUMMY;
@@ -192,6 +196,7 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
        hw->mac.set_vmdq = ngbe_mac_set_vmdq_dummy;
        hw->mac.clear_vmdq = ngbe_mac_clear_vmdq_dummy;
        hw->mac.init_rx_addrs = ngbe_mac_init_rx_addrs_dummy;
+       hw->mac.clear_vfta = ngbe_mac_clear_vfta_dummy;
        hw->mac.init_thermal_sensor_thresh = ngbe_mac_init_thermal_ssth_dummy;
        hw->mac.check_overtemp = ngbe_mac_check_overtemp_dummy;
        hw->phy.identify = ngbe_phy_identify_dummy;
index 6b575fc67b7e1759e50115d700ac233e036721ae..bfd744fa6659ccdac9a1b54e79974588bc66a2d7 100644 (file)
@@ -19,6 +19,9 @@ s32 ngbe_start_hw(struct ngbe_hw *hw)
 {
        DEBUGFUNC("ngbe_start_hw");
 
+       /* Clear the VLAN filter table */
+       hw->mac.clear_vfta(hw);
+
        /* Clear adapter stopped flag */
        hw->adapter_stopped = false;
 
@@ -676,6 +679,30 @@ s32 ngbe_init_uta_tables(struct ngbe_hw *hw)
        return 0;
 }
 
+/**
+ *  ngbe_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ngbe_clear_vfta(struct ngbe_hw *hw)
+{
+       u32 offset;
+
+       DEBUGFUNC("ngbe_clear_vfta");
+
+       for (offset = 0; offset < hw->mac.vft_size; offset++)
+               wr32(hw, NGBE_VLANTBL(offset), 0);
+
+       for (offset = 0; offset < NGBE_NUM_POOL; offset++) {
+               wr32(hw, NGBE_PSRVLANIDX, offset);
+               wr32(hw, NGBE_PSRVLAN, 0);
+               wr32(hw, NGBE_PSRVLANPLM(0), 0);
+       }
+
+       return 0;
+}
+
 /**
  *  ngbe_check_mac_link_em - Determine link and speed status
  *  @hw: pointer to hardware structure
@@ -996,12 +1023,13 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
 
        mac->disable_sec_rx_path = ngbe_disable_sec_rx_path;
        mac->enable_sec_rx_path = ngbe_enable_sec_rx_path;
-       /* RAR */
+       /* RAR, VLAN */
        mac->set_rar = ngbe_set_rar;
        mac->clear_rar = ngbe_clear_rar;
        mac->init_rx_addrs = ngbe_init_rx_addrs;
        mac->set_vmdq = ngbe_set_vmdq;
        mac->clear_vmdq = ngbe_clear_vmdq;
+       mac->clear_vfta = ngbe_clear_vfta;
 
        /* Link */
        mac->get_link_capabilities = ngbe_get_link_capabilities_em;
@@ -1017,6 +1045,7 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
        rom->validate_checksum = ngbe_validate_eeprom_checksum_em;
 
        mac->mcft_size          = NGBE_EM_MC_TBL_SIZE;
+       mac->vft_size           = NGBE_EM_VFT_TBL_SIZE;
        mac->num_rar_entries    = NGBE_EM_RAR_ENTRIES;
        mac->max_rx_queues      = NGBE_EM_MAX_RX_QUEUES;
        mac->max_tx_queues      = NGBE_EM_MAX_TX_QUEUES;
index 17a0a03c887003b2bcda575599a5ed4734c4b40d..3f9eee84e9a4654a6b0ecfb810799d857cc9862a 100644 (file)
@@ -12,6 +12,7 @@
 #define NGBE_EM_MAX_RX_QUEUES 8
 #define NGBE_EM_RAR_ENTRIES   32
 #define NGBE_EM_MC_TBL_SIZE   32
+#define NGBE_EM_VFT_TBL_SIZE  128
 
 s32 ngbe_init_hw(struct ngbe_hw *hw);
 s32 ngbe_start_hw(struct ngbe_hw *hw);
@@ -44,6 +45,7 @@ void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask);
 s32 ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq);
 s32 ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq);
 s32 ngbe_init_uta_tables(struct ngbe_hw *hw);
+s32 ngbe_clear_vfta(struct ngbe_hw *hw);
 
 s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw);
 s32 ngbe_mac_check_overtemp(struct ngbe_hw *hw);
index 28540e4ba0facae82c020c3471f6dfc186abbc61..68f82e1efb830e257f0c68242c0faf5bd0c9e9f6 100644 (file)
@@ -9,6 +9,7 @@
 #define NGBE_LINK_UP_TIME      90 /* 9.0 Seconds */
 
 #define NGBE_FRAME_SIZE_DFT       (1522) /* Default frame size, +FCS */
+#define NGBE_NUM_POOL             (32)
 
 #define NGBE_ALIGN             128 /* as intel did */
 #define NGBE_ISB_SIZE          16
@@ -118,6 +119,7 @@ struct ngbe_mac_info {
        s32 (*set_vmdq)(struct ngbe_hw *hw, u32 rar, u32 vmdq);
        s32 (*clear_vmdq)(struct ngbe_hw *hw, u32 rar, u32 vmdq);
        s32 (*init_rx_addrs)(struct ngbe_hw *hw);
+       s32 (*clear_vfta)(struct ngbe_hw *hw);
 
        /* Manageability interface */
        s32 (*init_thermal_sensor_thresh)(struct ngbe_hw *hw);
@@ -128,6 +130,7 @@ struct ngbe_mac_info {
        u8 perm_addr[ETH_ADDR_LEN];
        s32 mc_filter_type;
        u32 mcft_size;
+       u32 vft_size;
        u32 num_rar_entries;
        u32 max_tx_queues;
        u32 max_rx_queues;
index b12851d30973080954093964ed999968cc99f2d3..46180f70506cbd3ed7e4da31bf8f8bf403e12dee 100644 (file)
@@ -17,6 +17,9 @@
 static int ngbe_dev_close(struct rte_eth_dev *dev);
 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
                                int wait_to_complete);
+static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
+static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
+                                       uint16_t queue);
 
 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
@@ -27,6 +30,24 @@ static void ngbe_dev_interrupt_handler(void *param);
 static void ngbe_dev_interrupt_delayed_handler(void *param);
 static void ngbe_configure_msix(struct rte_eth_dev *dev);
 
+#define NGBE_SET_HWSTRIP(h, q) do {\
+               uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+               uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+               (h)->bitmap[idx] |= 1 << bit;\
+       } while (0)
+
+#define NGBE_CLEAR_HWSTRIP(h, q) do {\
+               uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+               uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+               (h)->bitmap[idx] &= ~(1 << bit);\
+       } while (0)
+
+#define NGBE_GET_HWSTRIP(h, q, r) do {\
+               uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+               uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+               (r) = (h)->bitmap[idx] >> bit & 1;\
+       } while (0)
+
 /*
  * The set of PCI devices this driver supports
  */
@@ -129,6 +150,8 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
+       struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
+       struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
        struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        const struct rte_memzone *mz;
        uint32_t ctrl_ext;
@@ -242,6 +265,12 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
                return -ENOMEM;
        }
 
+       /* initialize the vfta */
+       memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+       /* initialize the hw strip bitmap*/
+       memset(hwstrip, 0, sizeof(*hwstrip));
+
        ctrl_ext = rd32(hw, NGBE_PORTCTL);
        /* let hardware know driver is loaded */
        ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
@@ -311,6 +340,362 @@ static struct rte_pci_driver rte_ngbe_pmd = {
        .remove = eth_ngbe_pci_remove,
 };
 
+static int
+ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
+       uint32_t vfta;
+       uint32_t vid_idx;
+       uint32_t vid_bit;
+
+       vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
+       vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
+       vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
+       if (on)
+               vfta |= vid_bit;
+       else
+               vfta &= ~vid_bit;
+       wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
+
+       /* update local VFTA copy */
+       shadow_vfta->vfta[vid_idx] = vfta;
+
+       return 0;
+}
+
+static void
+ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       struct ngbe_rx_queue *rxq;
+       bool restart;
+       uint32_t rxcfg, rxbal, rxbah;
+
+       if (on)
+               ngbe_vlan_hw_strip_enable(dev, queue);
+       else
+               ngbe_vlan_hw_strip_disable(dev, queue);
+
+       rxq = dev->data->rx_queues[queue];
+       rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
+       rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
+       rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
+       if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+               restart = (rxcfg & NGBE_RXCFG_ENA) &&
+                       !(rxcfg & NGBE_RXCFG_VLAN);
+               rxcfg |= NGBE_RXCFG_VLAN;
+       } else {
+               restart = (rxcfg & NGBE_RXCFG_ENA) &&
+                       (rxcfg & NGBE_RXCFG_VLAN);
+               rxcfg &= ~NGBE_RXCFG_VLAN;
+       }
+       rxcfg &= ~NGBE_RXCFG_ENA;
+
+       if (restart) {
+               /* set vlan strip for ring */
+               ngbe_dev_rx_queue_stop(dev, queue);
+               wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
+               wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
+               wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
+               ngbe_dev_rx_queue_start(dev, queue);
+       }
+}
+
+static int
+ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
+                   enum rte_vlan_type vlan_type,
+                   uint16_t tpid)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       int ret = 0;
+       uint32_t portctrl, vlan_ext, qinq;
+
+       portctrl = rd32(hw, NGBE_PORTCTL);
+
+       vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
+       qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
+       switch (vlan_type) {
+       case RTE_ETH_VLAN_TYPE_INNER:
+               if (vlan_ext) {
+                       wr32m(hw, NGBE_VLANCTL,
+                               NGBE_VLANCTL_TPID_MASK,
+                               NGBE_VLANCTL_TPID(tpid));
+                       wr32m(hw, NGBE_DMATXCTRL,
+                               NGBE_DMATXCTRL_TPID_MASK,
+                               NGBE_DMATXCTRL_TPID(tpid));
+               } else {
+                       ret = -ENOTSUP;
+                       PMD_DRV_LOG(ERR,
+                               "Inner type is not supported by single VLAN");
+               }
+
+               if (qinq) {
+                       wr32m(hw, NGBE_TAGTPID(0),
+                               NGBE_TAGTPID_LSB_MASK,
+                               NGBE_TAGTPID_LSB(tpid));
+               }
+               break;
+       case RTE_ETH_VLAN_TYPE_OUTER:
+               if (vlan_ext) {
+                       /* Only the high 16-bits is valid */
+                       wr32m(hw, NGBE_EXTAG,
+                               NGBE_EXTAG_VLAN_MASK,
+                               NGBE_EXTAG_VLAN(tpid));
+               } else {
+                       wr32m(hw, NGBE_VLANCTL,
+                               NGBE_VLANCTL_TPID_MASK,
+                               NGBE_VLANCTL_TPID(tpid));
+                       wr32m(hw, NGBE_DMATXCTRL,
+                               NGBE_DMATXCTRL_TPID_MASK,
+                               NGBE_DMATXCTRL_TPID(tpid));
+               }
+
+               if (qinq) {
+                       wr32m(hw, NGBE_TAGTPID(0),
+                               NGBE_TAGTPID_MSB_MASK,
+                               NGBE_TAGTPID_MSB(tpid));
+               }
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+void
+ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t vlnctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Filter Table Disable */
+       vlnctrl = rd32(hw, NGBE_VLANCTL);
+       vlnctrl &= ~NGBE_VLANCTL_VFE;
+       wr32(hw, NGBE_VLANCTL, vlnctrl);
+}
+
+void
+ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
+       uint32_t vlnctrl;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Filter Table Enable */
+       vlnctrl = rd32(hw, NGBE_VLANCTL);
+       vlnctrl &= ~NGBE_VLANCTL_CFIENA;
+       vlnctrl |= NGBE_VLANCTL_VFE;
+       wr32(hw, NGBE_VLANCTL, vlnctrl);
+
+       /* write whatever is in local vfta copy */
+       for (i = 0; i < NGBE_VFTA_SIZE; i++)
+               wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
+}
+
+void
+ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
+{
+       struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
+       struct ngbe_rx_queue *rxq;
+
+       if (queue >= NGBE_MAX_RX_QUEUE_NUM)
+               return;
+
+       if (on)
+               NGBE_SET_HWSTRIP(hwstrip, queue);
+       else
+               NGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+       if (queue >= dev->data->nb_rx_queues)
+               return;
+
+       rxq = dev->data->rx_queues[queue];
+
+       if (on) {
+               rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+               rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+       } else {
+               rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
+               rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+       }
+}
+
+static void
+ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, NGBE_RXCFG(queue));
+       ctrl &= ~NGBE_RXCFG_VLAN;
+       wr32(hw, NGBE_RXCFG(queue), ctrl);
+
+       /* record those setting for HW strip per queue */
+       ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
+}
+
+static void
+ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, NGBE_RXCFG(queue));
+       ctrl |= NGBE_RXCFG_VLAN;
+       wr32(hw, NGBE_RXCFG(queue), ctrl);
+
+       /* record those setting for HW strip per queue */
+       ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
+}
+
+static void
+ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, NGBE_PORTCTL);
+       ctrl &= ~NGBE_PORTCTL_VLANEXT;
+       ctrl &= ~NGBE_PORTCTL_QINQ;
+       wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+static void
+ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl  = rd32(hw, NGBE_PORTCTL);
+       ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
+       wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+static void
+ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, NGBE_PORTCTL);
+       ctrl &= ~NGBE_PORTCTL_QINQ;
+       wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+static void
+ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl  = rd32(hw, NGBE_PORTCTL);
+       ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
+       wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+void
+ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+       struct ngbe_rx_queue *rxq;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+
+               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+                       ngbe_vlan_hw_strip_enable(dev, i);
+               else
+                       ngbe_vlan_hw_strip_disable(dev, i);
+       }
+}
+
+void
+ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
+{
+       uint16_t i;
+       struct rte_eth_rxmode *rxmode;
+       struct ngbe_rx_queue *rxq;
+
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               rxmode = &dev->data->dev_conf.rxmode;
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                               rxq = dev->data->rx_queues[i];
+                               rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+                       }
+               else
+                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                               rxq = dev->data->rx_queues[i];
+                               rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+                       }
+       }
+}
+
+static int
+ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+       struct rte_eth_rxmode *rxmode;
+       rxmode = &dev->data->dev_conf.rxmode;
+
+       if (mask & RTE_ETH_VLAN_STRIP_MASK)
+               ngbe_vlan_hw_strip_config(dev);
+
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+                       ngbe_vlan_hw_filter_enable(dev);
+               else
+                       ngbe_vlan_hw_filter_disable(dev);
+       }
+
+       if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+                       ngbe_vlan_hw_extend_enable(dev);
+               else
+                       ngbe_vlan_hw_extend_disable(dev);
+       }
+
+       if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+                       ngbe_qinq_hw_strip_enable(dev);
+               else
+                       ngbe_qinq_hw_strip_disable(dev);
+       }
+
+       return 0;
+}
+
+static int
+ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+       ngbe_config_vlan_strip_on_all_queues(dev, mask);
+
+       ngbe_vlan_offload_config(dev, mask);
+
+       return 0;
+}
+
 static int
 ngbe_dev_configure(struct rte_eth_dev *dev)
 {
@@ -363,6 +748,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
        bool link_up = false, negotiate = false;
        uint32_t speed = 0;
        uint32_t allowed_speeds = 0;
+       int mask = 0;
        int status;
        uint32_t *link_speeds;
 
@@ -418,6 +804,16 @@ ngbe_dev_start(struct rte_eth_dev *dev)
                goto error;
        }
 
+       mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+               RTE_ETH_VLAN_EXTEND_MASK;
+       err = ngbe_vlan_offload_config(dev, mask);
+       if (err != 0) {
+               PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+               goto error;
+       }
+
+       ngbe_configure_port(dev);
+
        err = ngbe_dev_rxtx_start(dev);
        if (err < 0) {
                PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
@@ -649,6 +1045,7 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
        dev_info->min_rx_bufsize = 1024;
        dev_info->max_rx_pktlen = 15872;
+       dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
        dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
                                     dev_info->rx_queue_offload_capa);
        dev_info->tx_queue_offload_capa = 0;
@@ -1188,6 +1585,10 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
        .dev_reset                  = ngbe_dev_reset,
        .link_update                = ngbe_dev_link_update,
        .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
+       .vlan_filter_set            = ngbe_vlan_filter_set,
+       .vlan_tpid_set              = ngbe_vlan_tpid_set,
+       .vlan_offload_set           = ngbe_vlan_offload_set,
+       .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
        .rx_queue_start             = ngbe_dev_rx_queue_start,
        .rx_queue_stop              = ngbe_dev_rx_queue_stop,
        .tx_queue_start             = ngbe_dev_tx_queue_start,
index b270343a3eed104cced792a6f368c5179724df09..5ca093ab4c988eacdeac33ed1e58703e4581680d 100644 (file)
 #define NGBE_FLAG_MACSEC            ((uint32_t)(1 << 3))
 #define NGBE_FLAG_NEED_LINK_CONFIG  ((uint32_t)(1 << 4))
 
+#define NGBE_VFTA_SIZE 128
+#define NGBE_VLAN_TAG_SIZE 4
+/*Default value of Max Rx Queue*/
+#define NGBE_MAX_RX_QUEUE_NUM  8
+
+#ifndef NBBY
+#define NBBY   8       /* number of bits in a byte */
+#endif
+#define NGBE_HWSTRIP_BITMAP_SIZE \
+       (NGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
+
 #define NGBE_QUEUE_ITR_INTERVAL_DEFAULT        500 /* 500us */
 
 /* The overhead from MTU to max frame size. */
@@ -32,12 +43,22 @@ struct ngbe_interrupt {
        uint64_t mask_orig; /* save mask during delayed handler */
 };
 
+struct ngbe_vfta {
+       uint32_t vfta[NGBE_VFTA_SIZE];
+};
+
+struct ngbe_hwstrip {
+       uint32_t bitmap[NGBE_HWSTRIP_BITMAP_SIZE];
+};
+
 /*
  * Structure to store private data for each driver instance (for each port).
  */
 struct ngbe_adapter {
        struct ngbe_hw             hw;
        struct ngbe_interrupt      intr;
+       struct ngbe_vfta           shadow_vfta;
+       struct ngbe_hwstrip        hwstrip;
        bool                       rx_bulk_alloc_allowed;
 };
 
@@ -67,6 +88,12 @@ ngbe_dev_intr(struct rte_eth_dev *dev)
        return intr;
 }
 
+#define NGBE_DEV_VFTA(dev) \
+       (&((struct ngbe_adapter *)(dev)->data->dev_private)->shadow_vfta)
+
+#define NGBE_DEV_HWSTRIP(dev) \
+       (&((struct ngbe_adapter *)(dev)->data->dev_private)->hwstrip)
+
 /*
  * Rx/Tx function prototypes
  */
@@ -136,10 +163,21 @@ uint16_t ngbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 void ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
                               uint8_t queue, uint8_t msix_vector);
 
+void ngbe_configure_port(struct rte_eth_dev *dev);
+
 int
 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
                int wait_to_complete);
 
+/*
+ * misc function prototypes
+ */
+void ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+
+void ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+
+void ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
+
 #define NGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
 #define NGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define NGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
@@ -158,5 +196,9 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 #define NGBE_DEFAULT_TX_WTHRESH      0
 
 const uint32_t *ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
+               uint16_t queue, bool on);
+void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
+                                                 int mask);
 
 #endif /* _NGBE_ETHDEV_H_ */
index 48983fedbdc2345990ef24dc93e53a11d13e483d..3315428397ce9cbdabbc543c92b0a10abc5a1477 100644 (file)
@@ -22,6 +22,7 @@ static const u64 NGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM |
                RTE_MBUF_F_TX_OUTER_IPV4 |
                RTE_MBUF_F_TX_IPV6 |
                RTE_MBUF_F_TX_IPV4 |
+               RTE_MBUF_F_TX_VLAN |
                RTE_MBUF_F_TX_L4_MASK |
                RTE_MBUF_F_TX_TCP_SEG |
                RTE_MBUF_F_TX_TUNNEL_MASK |
@@ -347,6 +348,11 @@ ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
                vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
        }
 
+       if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+               tx_offload_mask.vlan_tci |= ~0;
+               vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
+       }
+
        txq->ctx_cache[ctx_idx].flags = ol_flags;
        txq->ctx_cache[ctx_idx].tx_offload.data[0] =
                tx_offload_mask.data[0] & tx_offload.data[0];
@@ -417,6 +423,8 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
                        tmp |= NGBE_TXD_IPCS;
                tmp |= NGBE_TXD_L4CS;
        }
+       if (ol_flags & RTE_MBUF_F_TX_VLAN)
+               tmp |= NGBE_TXD_CC;
 
        return tmp;
 }
@@ -426,6 +434,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 {
        uint32_t cmdtype = 0;
 
+       if (ol_flags & RTE_MBUF_F_TX_VLAN)
+               cmdtype |= NGBE_TXD_VLE;
        if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
                cmdtype |= NGBE_TXD_TSE;
        return cmdtype;
@@ -444,6 +454,8 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
 
        /* L2 level */
        ptype = RTE_PTYPE_L2_ETHER;
+       if (oflags & RTE_MBUF_F_TX_VLAN)
+               ptype |= RTE_PTYPE_L2_ETHER_VLAN;
 
        /* L3 level */
        if (oflags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM))
@@ -607,6 +619,7 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        tx_offload.l2_len = tx_pkt->l2_len;
                        tx_offload.l3_len = tx_pkt->l3_len;
                        tx_offload.l4_len = tx_pkt->l4_len;
+                       tx_offload.vlan_tci = tx_pkt->vlan_tci;
                        tx_offload.tso_segsz = tx_pkt->tso_segsz;
                        tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
                        tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
@@ -885,6 +898,23 @@ ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
        return ngbe_decode_ptype(ptid);
 }
 
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
+{
+       uint64_t pkt_flags;
+
+       /*
+        * Check if VLAN present only.
+        * Do not check whether L3/L4 rx checksum done by NIC or not,
+        * That can be found from rte_eth_rxmode.offloads flag
+        */
+       pkt_flags = (rx_status & NGBE_RXD_STAT_VLAN &&
+                    vlan_flags & RTE_MBUF_F_RX_VLAN_STRIPPED)
+                   ? vlan_flags : 0;
+
+       return pkt_flags;
+}
+
 static inline uint64_t
 rx_desc_error_to_pkt_flags(uint32_t rx_status)
 {
@@ -969,9 +999,12 @@ ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
                                  rxq->crc_len;
                        mb->data_len = pkt_len;
                        mb->pkt_len = pkt_len;
+                       mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
 
                        /* convert descriptor fields to rte mbuf flags */
-                       pkt_flags = rx_desc_error_to_pkt_flags(s[j]);
+                       pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+                                       rxq->vlan_flags);
+                       pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
                        mb->ol_flags = pkt_flags;
                        mb->packet_type =
                                ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
@@ -1267,6 +1300,7 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 *    - Rx port identifier.
                 * 2) integrate hardware offload data, if any:
                 *    - IP checksum flag,
+                *    - VLAN TCI, if any,
                 *    - error flags.
                 */
                pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
@@ -1280,7 +1314,12 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                rxm->port = rxq->port_id;
 
                pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
-               pkt_flags = rx_desc_error_to_pkt_flags(staterr);
+               /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
+               rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
+
+               pkt_flags = rx_desc_status_to_pkt_flags(staterr,
+                                       rxq->vlan_flags);
+               pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
                rxm->ol_flags = pkt_flags;
                rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
                                                       NGBE_PTID_MASK);
@@ -1325,6 +1364,7 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
  *    - RX port identifier
  *    - hardware offload data, if any:
  *      - IP checksum flag
+ *      - VLAN TCI, if any
  *      - error flags
  * @head HEAD of the packet cluster
  * @desc HW descriptor to get data from
@@ -1339,8 +1379,13 @@ ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
 
        head->port = rxq->port_id;
 
+       /* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
+        * set in the pkt_flags field.
+        */
+       head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
        pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
-       pkt_flags = rx_desc_error_to_pkt_flags(staterr);
+       pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
+       pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
        head->ol_flags = pkt_flags;
        head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
                                                NGBE_PTID_MASK);
@@ -1739,10 +1784,10 @@ uint64_t
 ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
        uint64_t tx_offload_capa;
-
-       RTE_SET_USED(dev);
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
 
        tx_offload_capa =
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
                RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
                RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
                RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
@@ -1755,6 +1800,9 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
                RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
                RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
+       if (hw->is_pf)
+               tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
+
        return tx_offload_capa;
 }
 
@@ -2025,16 +2073,28 @@ ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
 }
 
 uint64_t
-ngbe_get_rx_port_offloads(struct rte_eth_dev *dev __rte_unused)
+ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
+{
+       return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+}
+
+uint64_t
+ngbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 {
        uint64_t offloads;
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
 
        offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
                   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
                   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
                   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+                  RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
                   RTE_ETH_RX_OFFLOAD_SCATTER;
 
+       if (hw->is_pf)
+               offloads |= (RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+                            RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
+
        return offloads;
 }
 
@@ -2051,10 +2111,13 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        struct ngbe_hw     *hw;
        uint16_t len;
        struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+       uint64_t offloads;
 
        PMD_INIT_FUNC_TRACE();
        hw = ngbe_dev_hw(dev);
 
+       offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
        /* Free memory prior to re-allocation if needed... */
        if (dev->data->rx_queues[queue_idx] != NULL) {
                ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
@@ -2079,6 +2142,7 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                rxq->crc_len = 0;
        rxq->drop_en = rx_conf->rx_drop_en;
        rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+       rxq->offloads = offloads;
 
        /*
         * Allocate Rx ring hardware descriptors. A memzone large enough to
@@ -2208,6 +2272,40 @@ ngbe_dev_free_queues(struct rte_eth_dev *dev)
        dev->data->nb_tx_queues = 0;
 }
 
+void ngbe_configure_port(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       int i = 0;
+       uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
+                               0x9100, 0x9200,
+                               0x0000, 0x0000,
+                               0x0000, 0x0000};
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* default outer vlan tpid */
+       wr32(hw, NGBE_EXTAG,
+               NGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
+               NGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
+
+       /* default inner vlan tpid */
+       wr32m(hw, NGBE_VLANCTL,
+               NGBE_VLANCTL_TPID_MASK,
+               NGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
+       wr32m(hw, NGBE_DMATXCTRL,
+               NGBE_DMATXCTRL_TPID_MASK,
+               NGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
+
+       /* default vlan tpid filters */
+       for (i = 0; i < 8; i++) {
+               wr32m(hw, NGBE_TAGTPID(i / 2),
+                       (i % 2 ? NGBE_TAGTPID_MSB_MASK
+                              : NGBE_TAGTPID_LSB_MASK),
+                       (i % 2 ? NGBE_TAGTPID_MSB(tpids[i])
+                              : NGBE_TAGTPID_LSB(tpids[i])));
+       }
+}
+
 static int
 ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
 {
@@ -2370,6 +2468,12 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
        wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
                NGBE_FRMSZ_MAX(dev->data->mtu + NGBE_ETH_OVERHEAD));
 
+       /*
+        * Assume no header split and no VLAN strip support
+        * on any Rx queue first .
+        */
+       rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+
        /* Setup Rx queues */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
@@ -2410,6 +2514,13 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
                srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
 
                wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);
+
+               /* It adds dual VLAN length for supporting dual VLAN */
+               if (dev->data->mtu + NGBE_ETH_OVERHEAD +
+                               2 * NGBE_VLAN_TAG_SIZE > buf_size)
+                       dev->data->scattered_rx = 1;
+               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+                       rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
        }
 
        if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
index 6364214ff525860a6c8f321da29623ffdedd6e53..93a889118bc4d6a73d005084d7fe78fcf6714e05 100644 (file)
@@ -269,6 +269,9 @@ struct ngbe_rx_queue {
        uint8_t         crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
        uint8_t         drop_en;  /**< If not 0, set SRRCTL.Drop_En */
        uint8_t         rx_deferred_start; /**< not in global dev start */
+       /** flags to set in mbuf when a vlan is detected */
+       uint64_t        vlan_flags;
+       uint64_t        offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
        /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
        struct rte_mbuf fake_mbuf;
        /** hold packets to return to application */
@@ -367,6 +370,7 @@ void ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq);
 void ngbe_set_rx_function(struct rte_eth_dev *dev);
 
 uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
 uint64_t ngbe_get_rx_port_offloads(struct rte_eth_dev *dev);
 
 #endif /* _NGBE_RXTX_H_ */