ethdev: query supported packet types
authorJianfeng Tan <jianfeng.tan@intel.com>
Mon, 14 Mar 2016 20:50:50 +0000 (04:50 +0800)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Fri, 25 Mar 2016 17:56:43 +0000 (18:56 +0100)
Add a new API rte_eth_dev_get_supported_ptypes to query what packet types
can be filled by a given device. The device should be already started or
its PMD RX burst function already decided, since the packet types supported
may vary depending on RX function.

Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
23 files changed:
drivers/net/cxgbe/cxgbe_ethdev.c
drivers/net/e1000/igb_ethdev.c
drivers/net/enic/enic_ethdev.c
drivers/net/fm10k/fm10k_ethdev.c
drivers/net/fm10k/fm10k_rxtx.c
drivers/net/fm10k/fm10k_rxtx_vec.c
drivers/net/i40e/i40e_ethdev.c
drivers/net/i40e/i40e_ethdev_vf.c
drivers/net/i40e/i40e_rxtx.c
drivers/net/i40e/i40e_rxtx.h
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/ixgbe/ixgbe_ethdev.h
drivers/net/ixgbe/ixgbe_rxtx.c
drivers/net/mlx4/mlx4.c
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/mlx5/mlx5_rxtx.c
drivers/net/nfp/nfp_net.c
drivers/net/vmxnet3/vmxnet3_ethdev.c
lib/librte_ether/rte_ethdev.c
lib/librte_ether/rte_ethdev.h
lib/librte_ether/rte_ether_version.map

index ecd8dc3..c4f4a34 100644 (file)
@@ -767,6 +767,20 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
                             &pi->link_cfg);
 }
 
+static const uint32_t *
+cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+{
+       static const uint32_t ptypes[] = {
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
+               return ptypes;
+       return NULL;
+}
+
 static struct eth_dev_ops cxgbe_eth_dev_ops = {
        .dev_start              = cxgbe_dev_start,
        .dev_stop               = cxgbe_dev_stop,
@@ -777,6 +791,7 @@ static struct eth_dev_ops cxgbe_eth_dev_ops = {
        .allmulticast_disable   = cxgbe_dev_allmulticast_disable,
        .dev_configure          = cxgbe_dev_configure,
        .dev_infos_get          = cxgbe_dev_info_get,
+       .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
        .link_update            = cxgbe_dev_link_update,
        .mtu_set                = cxgbe_dev_mtu_set,
        .tx_queue_setup         = cxgbe_dev_tx_queue_setup,
index e899449..bd0ae26 100644 (file)
@@ -104,6 +104,7 @@ static void eth_igb_stats_reset(struct rte_eth_dev *dev);
 static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
 static void eth_igb_infos_get(struct rte_eth_dev *dev,
                              struct rte_eth_dev_info *dev_info);
+static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
                                struct rte_eth_dev_info *dev_info);
 static int  eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
@@ -326,6 +327,7 @@ static const struct eth_dev_ops eth_igb_ops = {
        .stats_reset          = eth_igb_stats_reset,
        .xstats_reset         = eth_igb_xstats_reset,
        .dev_infos_get        = eth_igb_infos_get,
+       .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
        .mtu_set              = eth_igb_mtu_set,
        .vlan_filter_set      = eth_igb_vlan_filter_set,
        .vlan_tpid_set        = eth_igb_vlan_tpid_set,
@@ -387,6 +389,7 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
        .xstats_reset         = eth_igbvf_stats_reset,
        .vlan_filter_set      = igbvf_vlan_filter_set,
        .dev_infos_get        = eth_igbvf_infos_get,
+       .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
        .rx_queue_setup       = eth_igb_rx_queue_setup,
        .rx_queue_release     = eth_igb_rx_queue_release,
        .tx_queue_setup       = eth_igb_tx_queue_setup,
@@ -1920,6 +1923,33 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->tx_desc_lim = tx_desc_lim;
 }
 
+static const uint32_t *
+eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* refers to igb_rxd_pkt_info_to_pkt_type() */
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV4_EXT,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_L3_IPV6_EXT,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_L4_SCTP,
+               RTE_PTYPE_TUNNEL_IP,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV6_EXT,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
+           dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
+               return ptypes;
+       return NULL;
+}
+
 static void
 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
index 6f2ada5..bab0f7d 100644 (file)
@@ -438,6 +438,19 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
        };
 }
 
+static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == enic_recv_pkts)
+               return ptypes;
+       return NULL;
+}
+
 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
 {
        struct enic *enic = pmd_priv(eth_dev);
@@ -561,6 +574,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
        .stats_reset          = enicpmd_dev_stats_reset,
        .queue_stats_mapping_set = NULL,
        .dev_infos_get        = enicpmd_dev_info_get,
+       .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
        .mtu_set              = NULL,
        .vlan_filter_set      = enicpmd_vlan_filter_set,
        .vlan_tpid_set        = NULL,
index 4b07a8b..b510487 100644 (file)
@@ -1412,6 +1412,55 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
        };
 }
 
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+static const uint32_t *
+fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       if (dev->rx_pkt_burst == fm10k_recv_pkts ||
+           dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
+               static uint32_t ptypes[] = {
+                       /* refers to rx_desc_to_ol_flags() */
+                       RTE_PTYPE_L2_ETHER,
+                       RTE_PTYPE_L3_IPV4,
+                       RTE_PTYPE_L3_IPV4_EXT,
+                       RTE_PTYPE_L3_IPV6,
+                       RTE_PTYPE_L3_IPV6_EXT,
+                       RTE_PTYPE_L4_TCP,
+                       RTE_PTYPE_L4_UDP,
+                       RTE_PTYPE_UNKNOWN
+               };
+
+               return ptypes;
+       } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
+                  dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
+               static uint32_t ptypes_vec[] = {
+                       /* refers to fm10k_desc_to_pktype_v() */
+                       RTE_PTYPE_L3_IPV4,
+                       RTE_PTYPE_L3_IPV4_EXT,
+                       RTE_PTYPE_L3_IPV6,
+                       RTE_PTYPE_L3_IPV6_EXT,
+                       RTE_PTYPE_L4_TCP,
+                       RTE_PTYPE_L4_UDP,
+                       RTE_PTYPE_TUNNEL_GENEVE,
+                       RTE_PTYPE_TUNNEL_NVGRE,
+                       RTE_PTYPE_TUNNEL_VXLAN,
+                       RTE_PTYPE_TUNNEL_GRE,
+                       RTE_PTYPE_UNKNOWN
+               };
+
+               return ptypes_vec;
+       }
+
+       return NULL;
+}
+#else
+static const uint32_t *
+fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+       return NULL;
+}
+#endif
+
 static int
 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
@@ -2578,6 +2627,7 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = {
        .xstats_reset           = fm10k_stats_reset,
        .link_update            = fm10k_link_update,
        .dev_infos_get          = fm10k_dev_infos_get,
+       .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
        .vlan_filter_set        = fm10k_vlan_filter_set,
        .vlan_offload_set       = fm10k_vlan_offload_set,
        .mac_addr_add           = fm10k_macaddr_add,
index 66db5b6..81ed4e7 100644 (file)
@@ -65,6 +65,9 @@ static inline void dump_rxd(union fm10k_rx_desc *rxd)
 }
 #endif
 
+/* @note: When this function is changed, make corresponding change to
+ * fm10k_dev_supported_ptypes_get()
+ */
 static inline void
 rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
 {
index 1c78725..f8efe8f 100644 (file)
@@ -149,6 +149,9 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
        rx_pkts[3]->ol_flags = vol.e[3];
 }
 
+/* @note: When this function is changed, make corresponding change to
+ * fm10k_dev_supported_ptypes_get().
+ */
 static inline void
 fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
 {
index 6fdae57..e91e966 100644 (file)
@@ -461,6 +461,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
        .xstats_reset                 = i40e_dev_stats_reset,
        .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
        .dev_infos_get                = i40e_dev_info_get,
+       .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
        .vlan_filter_set              = i40e_vlan_filter_set,
        .vlan_tpid_set                = i40e_vlan_tpid_set,
        .vlan_offload_set             = i40e_vlan_offload_set,
index 6b7b350..510191e 100644 (file)
@@ -201,6 +201,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
        .xstats_reset         = i40evf_dev_xstats_reset,
        .dev_close            = i40evf_dev_close,
        .dev_infos_get        = i40evf_dev_info_get,
+       .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
        .vlan_filter_set      = i40evf_vlan_filter_set,
        .vlan_offload_set     = i40evf_vlan_offload_set,
        .vlan_pvid_set        = i40evf_vlan_pvid_set,
index 81cde6c..80847ab 100644 (file)
@@ -188,7 +188,10 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
 }
 #endif
 
-/* For each value it means, datasheet of hardware can tell more details */
+/* For each value it means, datasheet of hardware can tell more details
+ *
+ * @note: fix i40e_dev_supported_ptypes_get() if any change here.
+ */
 static inline uint32_t
 i40e_rxd_pkt_type_mapping(uint8_t ptype)
 {
@@ -2087,6 +2090,47 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
        return 0;
 }
 
+const uint32_t *
+i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* refers to i40e_rxd_pkt_type_mapping() */
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L2_ETHER_TIMESYNC,
+               RTE_PTYPE_L2_ETHER_LLDP,
+               RTE_PTYPE_L2_ETHER_ARP,
+               RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+               RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+               RTE_PTYPE_L4_FRAG,
+               RTE_PTYPE_L4_ICMP,
+               RTE_PTYPE_L4_NONFRAG,
+               RTE_PTYPE_L4_SCTP,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_TUNNEL_GRENAT,
+               RTE_PTYPE_TUNNEL_IP,
+               RTE_PTYPE_INNER_L2_ETHER,
+               RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+               RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+               RTE_PTYPE_INNER_L4_FRAG,
+               RTE_PTYPE_INNER_L4_ICMP,
+               RTE_PTYPE_INNER_L4_NONFRAG,
+               RTE_PTYPE_INNER_L4_SCTP,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == i40e_recv_pkts ||
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+           dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
+#endif
+           dev->rx_pkt_burst == i40e_recv_scattered_pkts)
+               return ptypes;
+       return NULL;
+}
+
 int
 i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        uint16_t queue_idx,
index 5c2f5c2..98179f0 100644 (file)
@@ -200,6 +200,7 @@ int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+const uint32_t *i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                            uint16_t queue_idx,
                            uint16_t nb_desc,
index d4d883a..c03d53c 100644 (file)
@@ -185,6 +185,7 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
                               struct rte_eth_dev_info *dev_info);
+static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                                 struct rte_eth_dev_info *dev_info);
 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -467,6 +468,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .xstats_reset         = ixgbe_dev_xstats_reset,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
        .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
@@ -557,6 +559,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
        .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
        .dev_infos_get        = ixgbevf_dev_info_get,
+       .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
        .mtu_set              = ixgbevf_dev_set_mtu,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
@@ -2930,6 +2933,41 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 }
 
+static const uint32_t *
+ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* For non-vec functions,
+                * refers to ixgbe_rxd_pkt_info_to_pkt_type();
+                * for vec functions,
+                * refers to _recv_raw_pkts_vec().
+                */
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV4_EXT,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_L3_IPV6_EXT,
+               RTE_PTYPE_L4_SCTP,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_TUNNEL_IP,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV6_EXT,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
+           dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
+               return ptypes;
+       return NULL;
+}
+
 static void
 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                     struct rte_eth_dev_info *dev_info)
index 5c3aa16..b75e795 100644 (file)
@@ -380,6 +380,9 @@ void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
 uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts);
 
+uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+                                   uint16_t nb_pkts);
+
 uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
index ff6ddb8..89c0eb9 100644 (file)
@@ -941,6 +941,8 @@ end_of_tx:
 #define IXGBE_PACKET_TYPE_MAX               0X80
 #define IXGBE_PACKET_TYPE_MASK              0X7F
 #define IXGBE_PACKET_TYPE_SHIFT             0X04
+
+/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
 static inline uint32_t
 ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
 {
@@ -1298,7 +1300,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 }
 
 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
-static uint16_t
+uint16_t
 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
                           uint16_t nb_pkts)
 {
index cc4e9aa..65d93ce 100644 (file)
@@ -2876,6 +2876,8 @@ rxq_cleanup(struct rxq *rxq)
  * @param flags
  *   RX completion flags returned by poll_length_flags().
  *
+ * @note: fix mlx4_dev_supported_ptypes_get() if any change here.
+ *
  * @return
  *   Packet type for struct rte_mbuf.
  */
@@ -4304,6 +4306,24 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
        priv_unlock(priv);
 }
 
+static const uint32_t *
+mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* refers to rxq_cq_to_pkt_type() */
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV4,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == mlx4_rx_burst ||
+           dev->rx_pkt_burst == mlx4_rx_burst_sp)
+               return ptypes;
+       return NULL;
+}
+
 /**
  * DPDK callback to get device statistics.
  *
@@ -5041,6 +5061,7 @@ static const struct eth_dev_ops mlx4_dev_ops = {
        .stats_reset = mlx4_stats_reset,
        .queue_stats_mapping_set = NULL,
        .dev_infos_get = mlx4_dev_infos_get,
+       .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
        .vlan_filter_set = mlx4_vlan_filter_set,
        .vlan_tpid_set = NULL,
        .vlan_strip_queue_set = NULL,
index ad69ec2..8e2c909 100644 (file)
@@ -157,6 +157,7 @@ static const struct eth_dev_ops mlx5_dev_ops = {
        .stats_get = mlx5_stats_get,
        .stats_reset = mlx5_stats_reset,
        .dev_infos_get = mlx5_dev_infos_get,
+       .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
        .vlan_filter_set = mlx5_vlan_filter_set,
        .rx_queue_setup = mlx5_rx_queue_setup,
        .tx_queue_setup = mlx5_tx_queue_setup,
index 43b24fb..8b9b96e 100644 (file)
@@ -158,6 +158,7 @@ int priv_get_mtu(struct priv *, uint16_t *);
 int priv_set_flags(struct priv *, unsigned int, unsigned int);
 int mlx5_dev_configure(struct rte_eth_dev *);
 void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *);
+const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 int mlx5_link_update(struct rte_eth_dev *, int);
 int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t);
 int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
index 6704382..fc8610a 100644 (file)
@@ -525,6 +525,25 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
        priv_unlock(priv);
 }
 
+const uint32_t *
+mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* refers to rxq_cq_to_pkt_type() */
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV4,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_UNKNOWN
+
+       };
+
+       if (dev->rx_pkt_burst == mlx5_rx_burst ||
+           dev->rx_pkt_burst == mlx5_rx_burst_sp)
+               return ptypes;
+       return NULL;
+}
+
 /**
  * DPDK callback to retrieve physical link information (unlocked version).
  *
index 4919189..0128139 100644 (file)
@@ -670,6 +670,8 @@ stop:
  * @param flags
  *   RX completion flags returned by poll_length_flags().
  *
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ *
  * @return
  *   Packet type for struct rte_mbuf.
  */
index 8810704..e856b2f 100644 (file)
@@ -1063,6 +1063,23 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 }
 
+static const uint32_t *
+nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* refers to nfp_net_set_hash() */
+               RTE_PTYPE_INNER_L3_IPV4,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV6_EXT,
+               RTE_PTYPE_INNER_L4_MASK,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == nfp_net_recv_pkts)
+               return ptypes;
+       return NULL;
+}
+
 static uint32_t
 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
@@ -2283,6 +2300,7 @@ static struct eth_dev_ops nfp_net_eth_dev_ops = {
        .stats_get              = nfp_net_stats_get,
        .stats_reset            = nfp_net_stats_reset,
        .dev_infos_get          = nfp_net_infos_get,
+       .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
        .mtu_set                = nfp_net_dev_mtu_set,
        .vlan_offload_set       = nfp_net_vlan_offload_set,
        .reta_update            = nfp_net_reta_update,
index a5c9ba5..f2b6b92 100644 (file)
@@ -86,6 +86,8 @@ static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *stats);
 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
                                struct rte_eth_dev_info *dev_info);
+static const uint32_t *
+vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
                                       uint16_t vid, int on);
 static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
@@ -119,6 +121,7 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
        .stats_get            = vmxnet3_dev_stats_get,
        .mac_addr_set         = vmxnet3_mac_addr_set,
        .dev_infos_get        = vmxnet3_dev_info_get,
+       .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
        .vlan_filter_set      = vmxnet3_dev_vlan_filter_set,
        .vlan_offload_set     = vmxnet3_dev_vlan_offload_set,
        .rx_queue_setup       = vmxnet3_dev_rx_queue_setup,
@@ -734,6 +737,20 @@ vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev,
                DEV_TX_OFFLOAD_TCP_TSO;
 }
 
+static const uint32_t *
+vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               RTE_PTYPE_L3_IPV4_EXT,
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
+               return ptypes;
+       return NULL;
+}
+
 static void
 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
 {
index 8721a6b..c0d85d6 100644 (file)
@@ -1627,6 +1627,33 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
        dev_info->driver_name = dev->data->drv_name;
 }
 
+int
+rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
+                                uint32_t *ptypes, int num)
+{
+       int i, j;
+       struct rte_eth_dev *dev;
+       const uint32_t *all_ptypes;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+       dev = &rte_eth_devices[port_id];
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get,
+                               -ENOTSUP);
+       all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
+
+       if (!all_ptypes)
+               return 0;
+
+       for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
+               if (all_ptypes[i] & ptype_mask) {
+                       if (j < num)
+                               ptypes[j] = all_ptypes[i];
+                       j++;
+               }
+
+       return j;
+}
+
 void
 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
 {
index b5704e1..ef9d02b 100644 (file)
@@ -1050,6 +1050,9 @@ typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
                                    struct rte_eth_dev_info *dev_info);
 /**< @internal Get specific informations of an Ethernet device. */
 
+typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
+/**< @internal Get supported ptypes of an Ethernet device. */
+
 typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
                                    uint16_t queue_id);
 /**< @internal Start rx and tx of a queue of an Ethernet device. */
@@ -1387,6 +1390,8 @@ struct eth_dev_ops {
        eth_queue_stats_mapping_set_t queue_stats_mapping_set;
        /**< Configure per queue stat counter mapping. */
        eth_dev_infos_get_t        dev_infos_get; /**< Get device info. */
+       eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
+       /**< Get packet types supported and identified by device*/
        mtu_set_t                  mtu_set; /**< Set MTU. */
        vlan_filter_set_t          vlan_filter_set;  /**< Filter VLAN Setup. */
        vlan_tpid_set_t            vlan_tpid_set;      /**< Outer VLAN TPID Setup. */
@@ -2315,6 +2320,29 @@ void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
  */
 void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
 
+/**
+ * Retrieve the supported packet types of an Ethernet device.
+ *
+ * @note
+ *   Better to invoke this API after the device is already started or rx burst
+ *   function is decided, to obtain correct supported ptypes.
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param ptype_mask
+ *   A hint of what kind of packet type which the caller is interested in.
+ * @param ptypes
+ *   An array pointer to store adequent packet types, allocated by caller.
+ * @param num
+ *  Size of the array pointed by param ptypes.
+ * @return
+ *   - (>0) Number of supported ptypes. If it exceeds param num, exceeding
+ *          packet types will not be filled in the given array.
+ *   - (0 or -ENOTSUP) if PMD does not fill the specified ptype.
+ *   - (-ENODEV) if *port_id* invalid.
+ */
+int rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
+                                    uint32_t *ptypes, int num);
+
 /**
  * Retrieve the MTU of an Ethernet device.
  *
index 5cb4d79..b1f4475 100644 (file)
@@ -119,6 +119,7 @@ DPDK_2.2 {
 DPDK_16.04 {
        global:
 
+       rte_eth_dev_get_supported_ptypes;
        rte_eth_dev_l2_tunnel_eth_type_conf;
        rte_eth_dev_l2_tunnel_offload_set;
        rte_eth_dev_set_vlan_ether_type;