drivers/net: advertise no support for keeping flow rules
authorDmitry Kozlyuk <dkozlyuk@nvidia.com>
Tue, 2 Nov 2021 17:01:32 +0000 (19:01 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 2 Nov 2021 17:59:17 +0000 (18:59 +0100)
When RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP capability bit is zero,
the specified behavior is the same as it had been before
this bit was introduced. Explicitly reset it in all PMDs
supporting rte_flow API in order to attract the attention
of maintainers, who should eventually choose to advertise
the new capability or not. It is already known that
mlx4 and mlx5 will not support this capability.

For RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP
similar action is not performed,
because no PMD except mlx5 supports indirect actions.
Any PMD that starts doing so will anyway have to consider
all relevant API, including this capability.

Suggested-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
26 files changed:
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_reps.c
drivers/net/cnxk/cnxk_ethdev_ops.c
drivers/net/cxgbe/cxgbe_ethdev.c
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/e1000/em_ethdev.c
drivers/net/e1000/igb_ethdev.c
drivers/net/enic/enic_ethdev.c
drivers/net/failsafe/failsafe_ops.c
drivers/net/hinic/hinic_pmd_ethdev.c
drivers/net/hns3/hns3_ethdev.c
drivers/net/hns3/hns3_ethdev_vf.c
drivers/net/i40e/i40e_ethdev.c
drivers/net/i40e/i40e_vf_representor.c
drivers/net/iavf/iavf_ethdev.c
drivers/net/ice/ice_dcf_ethdev.c
drivers/net/igc/igc_ethdev.c
drivers/net/ipn3ke/ipn3ke_representor.c
drivers/net/mvpp2/mrvl_ethdev.c
drivers/net/octeontx2/otx2_ethdev_ops.c
drivers/net/qede/qede_ethdev.c
drivers/net/sfc/sfc_ethdev.c
drivers/net/softnic/rte_eth_softnic.c
drivers/net/tap/rte_eth_tap.c
drivers/net/txgbe/txgbe_ethdev.c
drivers/net/txgbe/txgbe_ethdev_vf.c

index c8dad8a..257e6b0 100644 (file)
@@ -1000,6 +1000,7 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
        dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
        dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                             RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
index 92beea3..19da24b 100644 (file)
@@ -546,6 +546,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
        dev_info->max_tx_queues = max_rx_rings;
        dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
        dev_info->hash_key_size = 40;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        /* MTU specifics */
        dev_info->min_mtu = RTE_ETHER_MIN_MTU;
index 6746430..62306b6 100644 (file)
@@ -68,6 +68,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
        devinfo->speed_capa = dev->speed_capa;
        devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                            RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
        return 0;
 }
 
index 4758321..e7ea761 100644 (file)
@@ -131,6 +131,8 @@ int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
        device_info->max_vfs = adapter->params.arch.vfcount;
        device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
 
+       device_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
        device_info->rx_queue_offload_capa = 0UL;
        device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
 
index 73d17f7..a370643 100644 (file)
@@ -254,6 +254,7 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
                        RTE_ETH_LINK_SPEED_2_5G |
                        RTE_ETH_LINK_SPEED_10G;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        dev_info->max_hash_mac_addrs = 0;
        dev_info->max_vfs = 0;
index 18fea4e..31c4870 100644 (file)
@@ -1101,6 +1101,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                        RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
                        RTE_ETH_LINK_SPEED_1G;
 
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
        /* Preferred queue parameters */
        dev_info->default_rxportconf.nb_queues = 1;
        dev_info->default_txportconf.nb_queues = 1;
index ff06575..d0e2bc9 100644 (file)
@@ -2168,6 +2168,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
        dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
                                    dev_info->tx_queue_offload_capa;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        switch (hw->mac.type) {
        case e1000_82575:
index c8bdaf1..163be09 100644 (file)
@@ -469,6 +469,7 @@ static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
        device_info->rx_offload_capa = enic->rx_offload_capa;
        device_info->tx_offload_capa = enic->tx_offload_capa;
        device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
+       device_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
        device_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
        };
index 822883b..55e21d6 100644 (file)
@@ -1227,6 +1227,7 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
        infos->dev_capa =
                RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       infos->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
                struct rte_eth_dev_info sub_info;
index 9cabd3e..1853511 100644 (file)
@@ -751,6 +751,8 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
                                RTE_ETH_TX_OFFLOAD_TCP_TSO |
                                RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
+       info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
        info->hash_key_size = HINIC_RSS_KEY_SIZE;
        info->reta_size = HINIC_RSS_INDIR_SIZE;
        info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL;
index 56eca03..03447c8 100644 (file)
@@ -2598,6 +2598,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
        if (hns3_dev_get_support(hw, INDEP_TXRX))
                info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        if (hns3_dev_get_support(hw, PTP))
                info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
index 675db44..4a0d73f 100644 (file)
@@ -699,6 +699,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
        if (hns3_dev_get_support(hw, INDEP_TXRX))
                info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        info->rx_desc_lim = (struct rte_eth_desc_lim) {
                .nb_max = HNS3_MAX_RING_DESC,
index 62e374d..9ea5f30 100644 (file)
@@ -3750,6 +3750,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->dev_capa =
                RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
                                                sizeof(uint32_t);
index 663c46b..7f8e818 100644 (file)
@@ -35,6 +35,8 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
        /* get dev info for the vdev */
        dev_info->device = ethdev->device;
 
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
        dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
        dev_info->max_tx_queues = ethdev->data->nb_tx_queues;
 
index d2719d3..4677c69 100644 (file)
@@ -1056,6 +1056,7 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->reta_size = vf->vf_res->rss_lut_size;
        dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
        dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
        dev_info->rx_offload_capa =
                RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
                RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
index 4d9484e..d1e6757 100644 (file)
@@ -663,6 +663,7 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
        dev_info->hash_key_size = hw->vf_res->rss_key_size;
        dev_info->reta_size = hw->vf_res->rss_lut_size;
        dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        dev_info->rx_offload_capa =
                RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
index 8189ad4..3e2bf14 100644 (file)
@@ -1477,6 +1477,7 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
        dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
        dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
        dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
        dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
        dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
index 1708858..de325c7 100644 (file)
@@ -96,6 +96,7 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
        dev_info->dev_capa =
                RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        dev_info->switch_info.name = ethdev->device->name;
        dev_info->switch_info.domain_id = rpst->switch_domain_id;
index 25f213b..9c7fe13 100644 (file)
@@ -1709,6 +1709,8 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 {
        struct mrvl_priv *priv = dev->data->dev_private;
 
+       info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
        info->speed_capa = RTE_ETH_LINK_SPEED_10M |
                           RTE_ETH_LINK_SPEED_100M |
                           RTE_ETH_LINK_SPEED_1G |
index d5caaa3..4878151 100644 (file)
@@ -583,6 +583,7 @@ otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 
        devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                                RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        return 0;
 }
index 8ca00e7..3e9aaee 100644 (file)
@@ -1367,6 +1367,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
        dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
        dev_info->rx_desc_lim = qede_rx_desc_lim;
        dev_info->tx_desc_lim = qede_tx_desc_lim;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        if (IS_PF(edev))
                dev_info->max_rx_queues = (uint16_t)RTE_MIN(
index 833d833..6b0a7e6 100644 (file)
@@ -186,6 +186,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                             RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        if (mae->status == SFC_MAE_STATUS_SUPPORTED ||
            mae->status == SFC_MAE_STATUS_ADMIN) {
index 3ef3381..8c098ca 100644 (file)
@@ -93,6 +93,7 @@ pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
        dev_info->max_rx_pktlen = UINT32_MAX;
        dev_info->max_rx_queues = UINT16_MAX;
        dev_info->max_tx_queues = UINT16_MAX;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        return 0;
 }
index a9a7658..37ac18f 100644 (file)
@@ -1006,6 +1006,7 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
         * functions together and not in partial combinations
         */
        dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        return 0;
 }
index fde9914..5c31ba5 100644 (file)
@@ -2597,6 +2597,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_vfs = pci_dev->max_vfs;
        dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
        dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
        dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
        dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
                                     dev_info->rx_queue_offload_capa);
index 4dda55b..67ae69d 100644 (file)
@@ -487,6 +487,7 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,
        dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
        dev_info->max_vfs = pci_dev->max_vfs;
        dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
        dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
        dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
                                     dev_info->rx_queue_offload_capa);