&pi->link_cfg);
}
+static const uint32_t *
+cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
static struct eth_dev_ops cxgbe_eth_dev_ops = {
.dev_start = cxgbe_dev_start,
.dev_stop = cxgbe_dev_stop,
.allmulticast_disable = cxgbe_dev_allmulticast_disable,
.dev_configure = cxgbe_dev_configure,
.dev_infos_get = cxgbe_dev_info_get,
+ .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
.link_update = cxgbe_dev_link_update,
.mtu_set = cxgbe_dev_mtu_set,
.tx_queue_setup = cxgbe_dev_tx_queue_setup,
static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
static void eth_igb_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
+static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
.stats_reset = eth_igb_stats_reset,
.xstats_reset = eth_igb_xstats_reset,
.dev_infos_get = eth_igb_infos_get,
+ .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
.mtu_set = eth_igb_mtu_set,
.vlan_filter_set = eth_igb_vlan_filter_set,
.vlan_tpid_set = eth_igb_vlan_tpid_set,
.xstats_reset = eth_igbvf_stats_reset,
.vlan_filter_set = igbvf_vlan_filter_set,
.dev_infos_get = eth_igbvf_infos_get,
+ .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
.rx_queue_setup = eth_igb_rx_queue_setup,
.rx_queue_release = eth_igb_rx_queue_release,
.tx_queue_setup = eth_igb_tx_queue_setup,
dev_info->tx_desc_lim = tx_desc_lim;
}
+static const uint32_t *
+eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to igb_rxd_pkt_info_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
+ dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
+ return ptypes;
+ return NULL;
+}
+
static void
eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
};
}
+static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == enic_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
.stats_reset = enicpmd_dev_stats_reset,
.queue_stats_mapping_set = NULL,
.dev_infos_get = enicpmd_dev_info_get,
+ .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
.mtu_set = NULL,
.vlan_filter_set = enicpmd_vlan_filter_set,
.vlan_tpid_set = NULL,
};
}
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+static const uint32_t *
+fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ if (dev->rx_pkt_burst == fm10k_recv_pkts ||
+ dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
+ static uint32_t ptypes[] = {
+ /* refers to rx_desc_to_ol_flags() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+ } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
+ dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
+ static uint32_t ptypes_vec[] = {
+ /* refers to fm10k_desc_to_pktype_v() */
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_GENEVE,
+ RTE_PTYPE_TUNNEL_NVGRE,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes_vec;
+ }
+
+ return NULL;
+}
+#else
+static const uint32_t *
+fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ return NULL;
+}
+#endif
+
static int
fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
.xstats_reset = fm10k_stats_reset,
.link_update = fm10k_link_update,
.dev_infos_get = fm10k_dev_infos_get,
+ .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
.vlan_filter_set = fm10k_vlan_filter_set,
.vlan_offload_set = fm10k_vlan_offload_set,
.mac_addr_add = fm10k_macaddr_add,
}
#endif
+/* @note: When this function is changed, make corresponding change to
+ * fm10k_dev_supported_ptypes_get()
+ */
static inline void
rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
{
rx_pkts[3]->ol_flags = vol.e[3];
}
+/* @note: When this function is changed, make corresponding change to
+ * fm10k_dev_supported_ptypes_get().
+ */
static inline void
fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
{
.xstats_reset = i40e_dev_stats_reset,
.queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
.dev_infos_get = i40e_dev_info_get,
+ .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
.vlan_filter_set = i40e_vlan_filter_set,
.vlan_tpid_set = i40e_vlan_tpid_set,
.vlan_offload_set = i40e_vlan_offload_set,
.xstats_reset = i40evf_dev_xstats_reset,
.dev_close = i40evf_dev_close,
.dev_infos_get = i40evf_dev_info_get,
+ .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
.vlan_filter_set = i40evf_vlan_filter_set,
.vlan_offload_set = i40evf_vlan_offload_set,
.vlan_pvid_set = i40evf_vlan_pvid_set,
}
#endif
-/* For each value it means, datasheet of hardware can tell more details */
+/* For each value it means, datasheet of hardware can tell more details
+ *
+ * @note: fix i40e_dev_supported_ptypes_get() if any change here.
+ */
static inline uint32_t
i40e_rxd_pkt_type_mapping(uint8_t ptype)
{
return 0;
}
+const uint32_t *
+i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to i40e_rxd_pkt_type_mapping() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_TIMESYNC,
+ RTE_PTYPE_L2_ETHER_LLDP,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == i40e_recv_pkts ||
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
+#endif
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts)
+ return ptypes;
+ return NULL;
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+const uint32_t *i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev);
int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
uint8_t is_rx);
static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
+static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
.xstats_reset = ixgbe_dev_xstats_reset,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
.dev_infos_get = ixgbe_dev_info_get,
+ .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
.mtu_set = ixgbe_dev_mtu_set,
.vlan_filter_set = ixgbe_vlan_filter_set,
.vlan_tpid_set = ixgbe_vlan_tpid_set,
.allmulticast_enable = ixgbevf_dev_allmulticast_enable,
.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
.dev_infos_get = ixgbevf_dev_info_get,
+ .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
.mtu_set = ixgbevf_dev_set_mtu,
.vlan_filter_set = ixgbevf_vlan_filter_set,
.vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
}
+static const uint32_t *
+ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* For non-vec functions,
+ * refers to ixgbe_rxd_pkt_info_to_pkt_type();
+ * for vec functions,
+ * refers to _recv_raw_pkts_vec().
+ */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
+ dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
+ return ptypes;
+ return NULL;
+}
+
static void
ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
#define IXGBE_PACKET_TYPE_MAX 0X80
#define IXGBE_PACKET_TYPE_MASK 0X7F
#define IXGBE_PACKET_TYPE_SHIFT 0X04
+
+/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
static inline uint32_t
ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
{
}
/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
-static uint16_t
+uint16_t
ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
* @param flags
* RX completion flags returned by poll_length_flags().
*
+ * @note: fix mlx4_dev_supported_ptypes_get() if any change here.
+ *
* @return
* Packet type for struct rte_mbuf.
*/
priv_unlock(priv);
}
+static const uint32_t *
+mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == mlx4_rx_burst ||
+ dev->rx_pkt_burst == mlx4_rx_burst_sp)
+ return ptypes;
+ return NULL;
+}
+
/**
* DPDK callback to get device statistics.
*
.stats_reset = mlx4_stats_reset,
.queue_stats_mapping_set = NULL,
.dev_infos_get = mlx4_dev_infos_get,
+ .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
.vlan_filter_set = mlx4_vlan_filter_set,
.vlan_tpid_set = NULL,
.vlan_strip_queue_set = NULL,
.stats_get = mlx5_stats_get,
.stats_reset = mlx5_stats_reset,
.dev_infos_get = mlx5_dev_infos_get,
+ .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
.rx_queue_setup = mlx5_rx_queue_setup,
.tx_queue_setup = mlx5_tx_queue_setup,
int priv_set_flags(struct priv *, unsigned int, unsigned int);
int mlx5_dev_configure(struct rte_eth_dev *);
void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *);
+const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
int mlx5_link_update(struct rte_eth_dev *, int);
int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t);
int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
priv_unlock(priv);
}
+const uint32_t *
+mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_UNKNOWN
+
+ };
+
+ if (dev->rx_pkt_burst == mlx5_rx_burst ||
+ dev->rx_pkt_burst == mlx5_rx_burst_sp)
+ return ptypes;
+ return NULL;
+}
+
/**
* DPDK callback to retrieve physical link information (unlocked version).
*
* @param flags
* RX completion flags returned by poll_length_flags().
*
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ *
* @return
* Packet type for struct rte_mbuf.
*/
dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
}
+static const uint32_t *
+nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to nfp_net_set_hash() */
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_MASK,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == nfp_net_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
static uint32_t
nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
{
.stats_get = nfp_net_stats_get,
.stats_reset = nfp_net_stats_reset,
.dev_infos_get = nfp_net_infos_get,
+ .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
.mtu_set = nfp_net_dev_mtu_set,
.vlan_offload_set = nfp_net_vlan_offload_set,
.reta_update = nfp_net_reta_update,
struct rte_eth_stats *stats);
static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
+static const uint32_t *
+vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vid, int on);
static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
.stats_get = vmxnet3_dev_stats_get,
.mac_addr_set = vmxnet3_mac_addr_set,
.dev_infos_get = vmxnet3_dev_info_get,
+ .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
.vlan_filter_set = vmxnet3_dev_vlan_filter_set,
.vlan_offload_set = vmxnet3_dev_vlan_offload_set,
.rx_queue_setup = vmxnet3_dev_rx_queue_setup,
DEV_TX_OFFLOAD_TCP_TSO;
}
+static const uint32_t *
+vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
static void
vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
dev_info->driver_name = dev->data->drv_name;
}
+int
+rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
+ uint32_t *ptypes, int num)
+{
+ int i, j;
+ struct rte_eth_dev *dev;
+ const uint32_t *all_ptypes;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get,
+ -ENOTSUP);
+ all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
+
+ if (!all_ptypes)
+ return 0;
+
+ for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
+ if (all_ptypes[i] & ptype_mask) {
+ if (j < num)
+ ptypes[j] = all_ptypes[i];
+ j++;
+ }
+
+ return j;
+}
+
void
rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
{
struct rte_eth_dev_info *dev_info);
/**< @internal Get specific informations of an Ethernet device. */
+typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
+/**< @internal Get supported ptypes of an Ethernet device. */
+
typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
uint16_t queue_id);
/**< @internal Start rx and tx of a queue of an Ethernet device. */
eth_queue_stats_mapping_set_t queue_stats_mapping_set;
/**< Configure per queue stat counter mapping. */
eth_dev_infos_get_t dev_infos_get; /**< Get device info. */
+ eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
+ /**< Get packet types supported and identified by device*/
mtu_set_t mtu_set; /**< Set MTU. */
vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */
vlan_tpid_set_t vlan_tpid_set; /**< Outer VLAN TPID Setup. */
*/
void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
+/**
+ * Retrieve the supported packet types of an Ethernet device.
+ *
+ * @note
+ * Better to invoke this API after the device is already started or rx burst
+ * function is decided, to obtain correct supported ptypes.
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param ptype_mask
+ * A hint of what kind of packet type which the caller is interested in.
+ * @param ptypes
+ * An array pointer to store adequent packet types, allocated by caller.
+ * @param num
+ * Size of the array pointed by param ptypes.
+ * @return
+ * - (>0) Number of supported ptypes. If it exceeds param num, exceeding
+ * packet types will not be filled in the given array.
+ * - (0 or -ENOTSUP) if PMD does not fill the specified ptype.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+int rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
+ uint32_t *ptypes, int num);
+
/**
* Retrieve the MTU of an Ethernet device.
*
DPDK_16.04 {
global:
+ rte_eth_dev_get_supported_ptypes;
rte_eth_dev_l2_tunnel_eth_type_conf;
rte_eth_dev_l2_tunnel_offload_set;
rte_eth_dev_set_vlan_ether_type;