``rx_descriptor_done`` is used, status can be "DD bit is set" or "DD bit is
not set".
-* **[implements] eth_dev_ops**: ``rx_descriptor_status``.
+* **[implements] rte_eth_dev**: ``rx_descriptor_status``.
* **[related] API**: ``rte_eth_rx_descriptor_status()``.
-* **[implements] eth_dev_ops**: ``rx_descriptor_done``.
+* **[implements] rte_eth_dev**: ``rx_descriptor_done``.
* **[related] API**: ``rte_eth_rx_descriptor_done()``.
Supports checking the status of a Tx descriptor. Status can be "Full", "Done"
or "Unavailable."
-* **[implements] eth_dev_ops**: ``tx_descriptor_status``.
+* **[implements] rte_eth_dev**: ``tx_descriptor_status``.
* **[related] API**: ``rte_eth_tx_descriptor_status()``.
Also, make sure to start the actual text at the margin.
=======================================================
+* ``ethdev`` changes
+
+ * Following device operation function pointers moved
+ from ``struct eth_dev_ops`` to ``struct rte_eth_dev``:
+
+ * ``eth_rx_queue_count_t rx_queue_count;``
+ * ``eth_rx_descriptor_done_t rx_descriptor_done;``
+ * ``eth_rx_descriptor_status_t rx_descriptor_status;``
+ * ``eth_tx_descriptor_status_t tx_descriptor_status;``
+
Known Issues
------------
.dev_infos_get = eth_ark_dev_info_get,
.rx_queue_setup = eth_ark_dev_rx_queue_setup,
- .rx_queue_count = eth_ark_dev_rx_queue_count,
.tx_queue_setup = eth_ark_tx_queue_setup,
.link_update = eth_ark_dev_link_update,
return -1;
dev->dev_ops = &ark_eth_dev_ops;
+ dev->rx_queue_count = eth_ark_dev_rx_queue_count;
dev->data->mac_addrs = rte_zmalloc("ark", RTE_ETHER_ADDR_LEN, 0);
if (!dev->data->mac_addrs) {
.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
- .rx_queue_count = atl_rx_queue_count,
- .rx_descriptor_status = atl_dev_rx_descriptor_status,
- .tx_descriptor_status = atl_dev_tx_descriptor_status,
-
/* EEPROM */
.get_eeprom_length = atl_dev_get_eeprom_length,
.get_eeprom = atl_dev_get_eeprom,
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &atl_eth_dev_ops;
+
+ eth_dev->rx_queue_count = atl_rx_queue_count;
+ eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
+
eth_dev->rx_pkt_burst = &atl_recv_pkts;
eth_dev->tx_pkt_burst = &atl_xmit_pkts;
eth_dev->tx_pkt_prepare = &atl_prep_pkts;
.rxq_info_get = axgbe_rxq_info_get,
.txq_info_get = axgbe_txq_info_get,
.dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get,
- .rx_descriptor_status = axgbe_dev_rx_descriptor_status,
- .tx_descriptor_status = axgbe_dev_tx_descriptor_status,
.mtu_set = axgb_mtu_set,
};
eth_dev->dev_ops = &axgbe_eth_dev_ops;
+ eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status;
+
/*
* For secondary processes, we don't initialise any further as primary
* has already done this work.
.dev_led_off = bnxt_dev_led_off_op,
.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
.xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
- .rx_queue_count = bnxt_rx_queue_count_op,
- .rx_descriptor_status = bnxt_rx_descriptor_status_op,
- .tx_descriptor_status = bnxt_tx_descriptor_status_op,
.rx_queue_start = bnxt_rx_queue_start,
.rx_queue_stop = bnxt_rx_queue_stop,
.tx_queue_start = bnxt_tx_queue_start,
PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
eth_dev->dev_ops = &bnxt_dev_ops;
+ eth_dev->rx_queue_count = bnxt_rx_queue_count_op;
+ eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op;
+ eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op;
eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
.tx_queue_setup = dpaa_eth_tx_queue_setup,
.rx_queue_release = dpaa_eth_rx_queue_release,
.tx_queue_release = dpaa_eth_tx_queue_release,
- .rx_queue_count = dpaa_dev_rx_queue_count,
.rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
.tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
.rxq_info_get = dpaa_rxq_info_get,
/* Populate ethdev structure */
eth_dev->dev_ops = &dpaa_devops;
+ eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
.tx_queue_release = dpaa2_dev_tx_queue_release,
.rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
.tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
- .rx_queue_count = dpaa2_dev_rx_queue_count,
.flow_ctrl_get = dpaa2_flow_ctrl_get,
.flow_ctrl_set = dpaa2_flow_ctrl_set,
.mac_addr_add = dpaa2_dev_add_mac_addr,
* plugged.
*/
eth_dev->dev_ops = &dpaa2_ethdev_ops;
+ eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count;
if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
else if (dpaa2_get_devargs(dev->devargs,
.vlan_offload_set = eth_em_vlan_offload_set,
.rx_queue_setup = eth_em_rx_queue_setup,
.rx_queue_release = eth_em_rx_queue_release,
- .rx_queue_count = eth_em_rx_queue_count,
- .rx_descriptor_done = eth_em_rx_descriptor_done,
- .rx_descriptor_status = eth_em_rx_descriptor_status,
- .tx_descriptor_status = eth_em_tx_descriptor_status,
.tx_queue_setup = eth_em_tx_queue_setup,
.tx_queue_release = eth_em_tx_queue_release,
.rx_queue_intr_enable = eth_em_rx_queue_intr_enable,
E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
eth_dev->dev_ops = ð_em_ops;
+ eth_dev->rx_queue_count = eth_em_rx_queue_count;
+ eth_dev->rx_descriptor_done = eth_em_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = eth_em_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = eth_em_tx_descriptor_status;
eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts;
eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts;
eth_dev->tx_pkt_prepare = (eth_tx_prep_t)ð_em_prep_pkts;
.rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
.rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
.rx_queue_release = eth_igb_rx_queue_release,
- .rx_queue_count = eth_igb_rx_queue_count,
- .rx_descriptor_done = eth_igb_rx_descriptor_done,
- .rx_descriptor_status = eth_igb_rx_descriptor_status,
- .tx_descriptor_status = eth_igb_tx_descriptor_status,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.tx_done_cleanup = eth_igb_tx_done_cleanup,
.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
.rx_queue_setup = eth_igb_rx_queue_setup,
.rx_queue_release = eth_igb_rx_queue_release,
- .rx_descriptor_done = eth_igb_rx_descriptor_done,
- .rx_descriptor_status = eth_igb_rx_descriptor_status,
- .tx_descriptor_status = eth_igb_tx_descriptor_status,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.tx_done_cleanup = eth_igb_tx_done_cleanup,
uint32_t ctrl_ext;
eth_dev->dev_ops = ð_igb_ops;
+ eth_dev->rx_queue_count = eth_igb_rx_queue_count;
+ eth_dev->rx_descriptor_done = eth_igb_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &igbvf_eth_dev_ops;
+ eth_dev->rx_descriptor_done = eth_igb_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
.tx_queue_stop = enicpmd_dev_tx_queue_stop,
.rx_queue_setup = enicpmd_dev_rx_queue_setup,
.rx_queue_release = enicpmd_dev_rx_queue_release,
- .rx_queue_count = enicpmd_dev_rx_queue_count,
- .rx_descriptor_done = NULL,
.tx_queue_setup = enicpmd_dev_tx_queue_setup,
.tx_queue_release = enicpmd_dev_tx_queue_release,
.rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
ENICPMD_FUNC_TRACE();
eth_dev->dev_ops = &enicpmd_eth_dev_ops;
+ eth_dev->rx_queue_count = enicpmd_dev_rx_queue_count;
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
eth_dev->tx_pkt_prepare = &enic_prep_pkts;
.rx_queue_release = fm10k_rx_queue_release,
.tx_queue_setup = fm10k_tx_queue_setup,
.tx_queue_release = fm10k_tx_queue_release,
- .rx_queue_count = fm10k_dev_rx_queue_count,
- .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
- .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
- .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
.rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
.reta_update = fm10k_reta_update,
PMD_INIT_FUNC_TRACE();
dev->dev_ops = &fm10k_eth_dev_ops;
+ dev->rx_queue_count = fm10k_dev_rx_queue_count;
+ dev->rx_descriptor_done = fm10k_dev_rx_descriptor_done;
+ dev->rx_descriptor_status = fm10k_dev_rx_descriptor_status;
+ dev->tx_descriptor_status = fm10k_dev_tx_descriptor_status;
dev->rx_pkt_burst = &fm10k_recv_pkts;
dev->tx_pkt_burst = &fm10k_xmit_pkts;
dev->tx_pkt_prepare = &fm10k_prep_pkts;
.rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
.rx_queue_release = i40e_dev_rx_queue_release,
- .rx_queue_count = i40e_dev_rx_queue_count,
- .rx_descriptor_done = i40e_dev_rx_descriptor_done,
- .rx_descriptor_status = i40e_dev_rx_descriptor_status,
- .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
.dev_led_on = i40e_dev_led_on,
PMD_INIT_FUNC_TRACE();
dev->dev_ops = &i40e_eth_dev_ops;
+ dev->rx_queue_count = i40e_dev_rx_queue_count;
+ dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
+ dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
+ dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
dev->tx_pkt_prepare = i40e_prep_pkts;
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
- .rx_descriptor_done = i40e_dev_rx_descriptor_done,
- .rx_descriptor_status = i40e_dev_rx_descriptor_status,
- .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
- .rx_queue_count = i40e_dev_rx_queue_count,
.rxq_info_get = i40e_rxq_info_get,
.txq_info_get = i40e_txq_info_get,
.mac_addr_add = i40evf_add_mac_addr,
/* assign ops func pointer */
eth_dev->dev_ops = &i40evf_eth_dev_ops;
+ eth_dev->rx_queue_count = i40e_dev_rx_queue_count;
+ eth_dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &i40e_recv_pkts;
eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
.rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
.rxq_info_get = iavf_dev_rxq_info_get,
.txq_info_get = iavf_dev_txq_info_get,
- .rx_queue_count = iavf_dev_rxq_count,
- .rx_descriptor_status = iavf_dev_rx_desc_status,
- .tx_descriptor_status = iavf_dev_tx_desc_status,
.mtu_set = iavf_dev_mtu_set,
.rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
/* assign ops func pointer */
eth_dev->dev_ops = &iavf_eth_dev_ops;
+ eth_dev->rx_queue_count = iavf_dev_rxq_count;
+ eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status;
+ eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status;
eth_dev->rx_pkt_burst = &iavf_recv_pkts;
eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
.tx_burst_mode_get = ice_tx_burst_mode_get,
.get_eeprom_length = ice_get_eeprom_length,
.get_eeprom = ice_get_eeprom,
- .rx_queue_count = ice_rx_queue_count,
- .rx_descriptor_status = ice_rx_descriptor_status,
- .tx_descriptor_status = ice_tx_descriptor_status,
.stats_get = ice_stats_get,
.stats_reset = ice_stats_reset,
.xstats_get = ice_xstats_get,
int ret;
dev->dev_ops = &ice_eth_dev_ops;
+ dev->rx_queue_count = ice_rx_queue_count;
+ dev->rx_descriptor_status = ice_rx_descriptor_status;
+ dev->tx_descriptor_status = ice_tx_descriptor_status;
dev->rx_pkt_burst = ice_recv_pkts;
dev->tx_pkt_burst = ice_xmit_pkts;
dev->tx_pkt_prepare = ice_prep_pkts;
.rx_queue_setup = eth_igc_rx_queue_setup,
.rx_queue_release = eth_igc_rx_queue_release,
- .rx_queue_count = eth_igc_rx_queue_count,
- .rx_descriptor_done = eth_igc_rx_descriptor_done,
- .rx_descriptor_status = eth_igc_rx_descriptor_status,
- .tx_descriptor_status = eth_igc_tx_descriptor_status,
.tx_queue_setup = eth_igc_tx_queue_setup,
.tx_queue_release = eth_igc_tx_queue_release,
.tx_done_cleanup = eth_igc_tx_done_cleanup,
PMD_INIT_FUNC_TRACE();
dev->dev_ops = ð_igc_ops;
+ dev->rx_descriptor_done = eth_igc_rx_descriptor_done;
+ dev->rx_queue_count = eth_igc_rx_queue_count;
+ dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
+ dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
/*
* for secondary processes, we don't initialize any further as primary
.rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
.rx_queue_release = ixgbe_dev_rx_queue_release,
- .rx_queue_count = ixgbe_dev_rx_queue_count,
- .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
- .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
- .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
.vlan_offload_set = ixgbevf_vlan_offload_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
- .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
- .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
- .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
ixgbe_dev_macsec_setting_reset(eth_dev);
eth_dev->dev_ops = &ixgbe_eth_dev_ops;
+ eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count;
+ eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
+ eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
}
eth_dev->device = dpdk_dev;
eth_dev->dev_ops = &mlx5_os_dev_sec_ops;
+ eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
err = mlx5_proc_priv_init(eth_dev);
if (err)
return NULL;
eth_dev->rx_pkt_burst = removed_rx_burst;
eth_dev->tx_pkt_burst = removed_tx_burst;
eth_dev->dev_ops = &mlx5_os_dev_ops;
+ eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
+ eth_dev->rx_queue_count = mlx5_rx_queue_count;
/* Register MAC address. */
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
if (config->vf && config->vf_nl_en)
.rss_hash_update = mlx5_rss_hash_update,
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
.filter_ctrl = mlx5_dev_filter_ctrl,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
.rxq_info_get = mlx5_rxq_info_get,
.txq_info_get = mlx5_txq_info_get,
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
.tx_burst_mode_get = mlx5_tx_burst_mode_get,
- .rx_queue_count = mlx5_rx_queue_count,
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
.is_removed = mlx5_is_removed,
.rx_queue_stop = mlx5_rx_queue_stop,
.tx_queue_start = mlx5_tx_queue_start,
.tx_queue_stop = mlx5_tx_queue_stop,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
.rxq_info_get = mlx5_rxq_info_get,
.txq_info_get = mlx5_txq_info_get,
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
.filter_ctrl = mlx5_dev_filter_ctrl,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
.rxq_info_get = mlx5_rxq_info_get,
.txq_info_get = mlx5_txq_info_get,
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
dev->dev_ops = &mlx5_os_dev_ops_isolate;
else
dev->dev_ops = &mlx5_os_dev_ops;
+
+ dev->rx_descriptor_status = mlx5_rx_descriptor_status;
+ dev->tx_descriptor_status = mlx5_tx_descriptor_status;
+
return 0;
}
.tx_queue_setup = hn_dev_tx_queue_setup,
.tx_queue_release = hn_dev_tx_queue_release,
.tx_done_cleanup = hn_dev_tx_done_cleanup,
- .tx_descriptor_status = hn_dev_tx_descriptor_status,
.rx_queue_setup = hn_dev_rx_queue_setup,
.rx_queue_release = hn_dev_rx_queue_release,
- .rx_queue_count = hn_dev_rx_queue_count,
- .rx_descriptor_status = hn_dev_rx_queue_status,
.link_update = hn_dev_link_update,
.stats_get = hn_dev_stats_get,
.stats_reset = hn_dev_stats_reset,
vmbus = container_of(device, struct rte_vmbus_device, device);
eth_dev->dev_ops = &hn_eth_dev_ops;
+ eth_dev->rx_queue_count = hn_dev_rx_queue_count;
+ eth_dev->rx_descriptor_status = hn_dev_rx_queue_status;
+ eth_dev->tx_descriptor_status = hn_dev_tx_descriptor_status;
eth_dev->tx_pkt_burst = &hn_xmit_pkts;
eth_dev->rx_pkt_burst = &hn_recv_pkts;
.rss_hash_conf_get = nfp_net_rss_hash_conf_get,
.rx_queue_setup = nfp_net_rx_queue_setup,
.rx_queue_release = nfp_net_rx_queue_release,
- .rx_queue_count = nfp_net_rx_queue_count,
.tx_queue_setup = nfp_net_tx_queue_setup,
.tx_queue_release = nfp_net_tx_queue_release,
.rx_queue_intr_enable = nfp_rx_queue_intr_enable,
}
eth_dev->dev_ops = &nfp_net_eth_dev_ops;
+ eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
.txq_info_get = otx2_nix_txq_info_get,
.rx_burst_mode_get = otx2_rx_burst_mode_get,
.tx_burst_mode_get = otx2_tx_burst_mode_get,
- .rx_queue_count = otx2_nix_rx_queue_count,
- .rx_descriptor_done = otx2_nix_rx_descriptor_done,
- .rx_descriptor_status = otx2_nix_rx_descriptor_status,
- .tx_descriptor_status = otx2_nix_tx_descriptor_status,
.tx_done_cleanup = otx2_nix_tx_done_cleanup,
.set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit,
.pool_ops_supported = otx2_nix_pool_ops_supported,
int rc, max_entries;
eth_dev->dev_ops = &otx2_eth_dev_ops;
+ eth_dev->rx_descriptor_done = otx2_nix_rx_descriptor_done;
+ eth_dev->rx_queue_count = otx2_nix_rx_queue_count;
+ eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
- .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
- .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
}
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
+ eth_dev->rx_descriptor_status = qede_rx_descriptor_status;
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
.tx_queue_stop = sfc_tx_queue_stop,
.rx_queue_setup = sfc_rx_queue_setup,
.rx_queue_release = sfc_rx_queue_release,
- .rx_queue_count = sfc_rx_queue_count,
- .rx_descriptor_done = sfc_rx_descriptor_done,
- .rx_descriptor_status = sfc_rx_descriptor_status,
- .tx_descriptor_status = sfc_tx_descriptor_status,
.rx_queue_intr_enable = sfc_rx_queue_intr_enable,
.rx_queue_intr_disable = sfc_rx_queue_intr_disable,
.tx_queue_setup = sfc_tx_queue_setup,
dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
+ dev->rx_queue_count = sfc_rx_queue_count;
+ dev->rx_descriptor_done = sfc_rx_descriptor_done;
+ dev->rx_descriptor_status = sfc_rx_descriptor_status;
+ dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_ops;
return 0;
static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
.dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
- .rx_queue_count = sfc_rx_queue_count,
- .rx_descriptor_done = sfc_rx_descriptor_done,
- .rx_descriptor_status = sfc_rx_descriptor_status,
- .tx_descriptor_status = sfc_tx_descriptor_status,
.reta_query = sfc_dev_rss_reta_query,
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
.rxq_info_get = sfc_rx_queue_info_get,
dev->rx_pkt_burst = dp_rx->pkt_burst;
dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
+ dev->rx_queue_count = sfc_rx_queue_count;
+ dev->rx_descriptor_done = sfc_rx_descriptor_done;
+ dev->rx_descriptor_status = sfc_rx_descriptor_status;
+ dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_secondary_ops;
return 0;
.tx_queue_stop = nicvf_dev_tx_queue_stop,
.rx_queue_setup = nicvf_dev_rx_queue_setup,
.rx_queue_release = nicvf_dev_rx_queue_release,
- .rx_queue_count = nicvf_dev_rx_queue_count,
.tx_queue_setup = nicvf_dev_tx_queue_setup,
.tx_queue_release = nicvf_dev_tx_queue_release,
.dev_set_link_up = nicvf_dev_set_link_up,
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &nicvf_eth_dev_ops;
+ eth_dev->rx_queue_count = nicvf_dev_rx_queue_count;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
.tx_done_cleanup = eth_tx_done_cleanup,
- .rx_queue_count = eth_rx_queue_count,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
data->all_multicast = 1;
eth_dev->dev_ops = &ops;
+ eth_dev->rx_queue_count = eth_rx_queue_count;
/* finally assign rx and tx ops */
eth_dev->rx_pkt_burst = eth_vhost_rx;
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
.rx_queue_release = virtio_dev_queue_release,
- .rx_descriptor_done = virtio_dev_rx_queue_done,
.tx_queue_setup = virtio_dev_tx_queue_setup,
.tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
}
eth_dev->dev_ops = &virtio_eth_dev_ops;
+ eth_dev->rx_descriptor_done = virtio_dev_rx_queue_done;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP);
if (queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
+ return (int)(*dev->rx_queue_count)(dev, queue_id);
}
/**
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
- return (*dev->dev_ops->rx_descriptor_done)( \
- dev->data->rx_queues[queue_id], offset);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP);
+ return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset);
}
#define RTE_ETH_RX_DESC_AVAIL 0 /**< Desc available for hw. */
if (queue_id >= dev->data->nb_rx_queues)
return -ENODEV;
#endif
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP);
rxq = dev->data->rx_queues[queue_id];
- return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
+ return (*dev->rx_descriptor_status)(rxq, offset);
}
#define RTE_ETH_TX_DESC_FULL 0 /**< Desc filled for hw, waiting xmit. */
if (queue_id >= dev->data->nb_tx_queues)
return -ENODEV;
#endif
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP);
txq = dev->data->tx_queues[queue_id];
- return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
+ return (*dev->tx_descriptor_status)(txq, offset);
}
/**
eth_queue_stop_t tx_queue_stop; /**< Stop TX for a queue. */
eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue. */
eth_queue_release_t rx_queue_release; /**< Release RX queue. */
- eth_rx_queue_count_t rx_queue_count;
- /**< Get the number of used RX descriptors. */
- eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */
- eth_rx_descriptor_status_t rx_descriptor_status;
- /**< Check the status of a Rx descriptor. */
- eth_tx_descriptor_status_t tx_descriptor_status;
- /**< Check the status of a Tx descriptor. */
/*
* Static inline functions use functions ABOVE this comment.
* New dev_ops functions should be added BELOW to avoid breaking ABI.
eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */
eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
+
+ eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */
+ eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */
+ eth_rx_descriptor_status_t rx_descriptor_status; /**< Check the status of a Rx descriptor. */
+ eth_tx_descriptor_status_t tx_descriptor_status; /**< Check the status of a Tx descriptor. */
+
/**
* Next two fields are per-device data but *data is shared between
* primary and secondary processes and *process_private is per-process