struct rte_eth_dev_info *dev_info);
static int ice_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
+static int ice_dev_set_link_up(struct rte_eth_dev *dev);
+static int ice_dev_set_link_down(struct rte_eth_dev *dev);
+
static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
uint16_t vlan_id,
int on);
static int ice_macaddr_set(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr);
+ struct rte_ether_addr *mac_addr);
static int ice_macaddr_add(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
+ struct rte_ether_addr *mac_addr,
__rte_unused uint32_t index,
uint32_t pool);
static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
.dev_stop = ice_dev_stop,
.dev_close = ice_dev_close,
.dev_reset = ice_dev_reset,
+ .dev_set_link_up = ice_dev_set_link_up,
+ .dev_set_link_down = ice_dev_set_link_down,
.rx_queue_start = ice_rx_queue_start,
.rx_queue_stop = ice_rx_queue_stop,
.tx_queue_start = ice_tx_queue_start,
{"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
{"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
{"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
- {"rx_dropped", offsetof(struct ice_eth_stats, rx_discards)},
+ {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
{"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
rx_unknown_protocol)},
{"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
{"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
{"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
- {"tx_dropped", offsetof(struct ice_eth_stats, tx_discards)},
+ {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
};
#define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
{
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!is_unicast_ether_addr
- ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
+ if (!rte_is_unicast_ether_addr
+ ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
PMD_INIT_LOG(ERR, "Invalid MAC address");
return -EINVAL;
}
- ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
- (struct ether_addr *)hw->port_info[0].mac.perm_addr);
+ rte_ether_addr_copy(
+ (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
+ (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
- dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
+ dev->data->mac_addrs =
+ rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0);
if (!dev->data->mac_addrs) {
PMD_INIT_LOG(ERR,
"Failed to allocate memory to store mac address");
return -ENOMEM;
}
/* store it to dev data */
- ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
- &dev->data->mac_addrs[0]);
+ rte_ether_addr_copy(
+ (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
+ &dev->data->mac_addrs[0]);
return 0;
}
/* Find out specific MAC filter */
static struct ice_mac_filter *
-ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
+ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
{
struct ice_mac_filter *f;
TAILQ_FOREACH(f, &vsi->mac_list, next) {
- if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
+ if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
return f;
}
}
static int
-ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
+ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
{
struct ice_fltr_list_entry *m_list_itr = NULL;
struct ice_mac_filter *f;
}
static int
-ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
+ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
{
struct ice_fltr_list_entry *m_list_itr = NULL;
struct ice_mac_filter *f;
struct ice_fltr_list_entry *v_list_itr = NULL;
struct ice_vlan_filter *f;
struct LIST_HEAD_TYPE list_head;
- struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ struct ice_hw *hw;
int ret = 0;
- if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
+ if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)
return -EINVAL;
+ hw = ICE_VSI_TO_HW(vsi);
+
/* If it's added and configured, return. */
f = ice_find_vlan_filter(vsi, vlan_id);
if (f) {
struct ice_fltr_list_entry *v_list_itr = NULL;
struct ice_vlan_filter *f;
struct LIST_HEAD_TYPE list_head;
- struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ struct ice_hw *hw;
int ret = 0;
/**
* Vlan 0 is the generic filter for untagged packets
* and can't be removed.
*/
- if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
+ if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
return -EINVAL;
+ hw = ICE_VSI_TO_HW(vsi);
+
/* Can't find it, return an error */
f = ice_find_vlan_filter(vsi, vlan_id);
if (!f)
struct ice_vsi *vsi = NULL;
struct ice_vsi_ctx vsi_ctx;
int ret;
- struct ether_addr broadcast = {
+ struct rte_ether_addr broadcast = {
.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
- struct ether_addr mac_addr;
+ struct rte_ether_addr mac_addr;
uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
uint8_t tc_bitmap = 0x1;
TAILQ_INIT(&vsi->mac_list);
TAILQ_INIT(&vsi->vlan_list);
+ /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+ pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
+ ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+ hw->func_caps.common_cap.rss_table_size;
+ pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
+
memset(&vsi_ctx, 0, sizeof(vsi_ctx));
/* base_queue in used in queue mapping of VSI add/update command.
* Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
hw->port_info->mac.perm_addr,
ETH_ADDR_LEN);
- rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
+ rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
ret = ice_add_mac_filter(vsi, &mac_addr);
if (ret != ICE_SUCCESS)
PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
- rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
+ rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
ret = ice_add_mac_filter(vsi, &mac_addr);
if (ret != ICE_SUCCESS)
PMD_INIT_LOG(ERR, "Failed to add MAC filter");
return err;
}
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+ uint32_t reg;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+ if (reg & PFLAN_RX_QALLOC_VALID_M) {
+ pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+ } else {
+ PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+ " index");
+ }
+}
+
static int
ice_dev_init(struct rte_eth_dev *dev)
{
/* Disable double vlan by default */
ice_vsi_config_double_vlan(vsi, FALSE);
- ret = ice_aq_stop_lldp(hw, TRUE, NULL);
+ ret = ice_aq_stop_lldp(hw, TRUE, FALSE, NULL);
if (ret != ICE_SUCCESS)
PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ /* get base queue pairs index in the device */
+ ice_base_queue_get(pf);
+
return 0;
err_pf_setup:
ice_res_pool_destroy(&pf->msix_pool);
err_msix_pool_init:
rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
err_init_mac:
ice_sched_cleanup_all(hw);
rte_free(hw->port_info);
/* Clear all queues and release mbufs */
ice_clear_queues(dev);
+ ice_dev_set_link_down(dev);
+
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
if (intr_handle->intr_vec) {
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* Since stop will make link down, then the link event will be
+ * triggered, disable the irq firstly to avoid the port_infoe etc
+ * resources deallocation causing the interrupt service thread
+ * crash.
+ */
+ ice_pf_disable_irq0(hw);
+
ice_dev_stop(dev);
/* release all queue resource */
ice_release_vsi(pf->main_vsi);
ice_sched_cleanup_all(hw);
rte_free(hw->port_info);
+ hw->port_info = NULL;
ice_shutdown_all_ctrlq(hw);
}
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
- /* register callback func to eal lib */
+ /* unregister callback func from eal lib */
rte_intr_callback_unregister(intr_handle,
ice_interrupt_handler, dev);
rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
nb_q = dev->data->nb_rx_queues;
vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
- vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
+ vsi->rss_lut_size = pf->hash_lut_size;
if (is_safe_mode) {
PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
if (ret != ICE_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
+ ice_dev_set_link_up(dev);
+
/* Call get_link_info aq commond to enable/disable LSE */
ice_link_update(dev, 0);
dev_info->max_tx_queues = vsi->nb_qps;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
}
dev_info->rx_queue_offload_capa = 0;
dev_info->tx_queue_offload_capa = 0;
- dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
+ dev_info->reta_size = pf->hash_lut_size;
dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
}
static int
-ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
+ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
return 0;
}
+/* Force the physical link state by getting the current PHY capabilities from
+ * hardware and setting the PHY config based on the determined capabilities. If
+ * link changes, link event will be triggered because both the Enable Automatic
+ * Link Update and LESM Enable bits are set when setting the PHY capabilities.
+ */
+static enum ice_status
+ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
+{
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_port_info *pi;
+ enum ice_status status;
+
+ if (!hw || !hw->port_info)
+ return ICE_ERR_PARAM;
+
+ pi = hw->port_info;
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status)
+ goto out;
+
+ /* No change in link */
+ if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
+ link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
+ goto out;
+
+ cfg.phy_type_low = pcaps->phy_type_low;
+ cfg.phy_type_high = pcaps->phy_type_high;
+ cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ cfg.low_power_ctrl = pcaps->low_power_ctrl;
+ cfg.eee_cap = pcaps->eee_cap;
+ cfg.eeer_value = pcaps->eeer_value;
+ cfg.link_fec_opt = pcaps->link_fec_options;
+ if (link_up)
+ cfg.caps |= ICE_AQ_PHY_ENA_LINK;
+ else
+ cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
+
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
+
+out:
+ ice_free(hw, pcaps);
+ return status;
+}
+
+static int
+ice_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return ice_force_phys_link_state(hw, true);
+}
+
+static int
+ice_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return ice_force_phys_link_state(hw, false);
+}
+
static int
ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
/* check if mtu is within the allowed range */
- if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
return -EINVAL;
/* mtu setting is forbidden if port is start */
return -EBUSY;
}
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev_data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
}
static int ice_macaddr_set(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr)
+ struct rte_ether_addr *mac_addr)
{
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
uint8_t flags = 0;
int ret;
- if (!is_valid_assigned_ether_addr(mac_addr)) {
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
return -EINVAL;
}
TAILQ_FOREACH(f, &vsi->mac_list, next) {
- if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
+ if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
break;
}
/* Add a MAC address, and update filters */
static int
ice_macaddr_add(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
+ struct rte_ether_addr *mac_addr,
__rte_unused uint32_t index,
__rte_unused uint32_t pool)
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct ice_vsi *vsi = pf->main_vsi;
struct rte_eth_dev_data *data = dev->data;
- struct ether_addr *macaddr;
+ struct rte_ether_addr *macaddr;
int ret;
macaddr = &data->mac_addrs[index];
reg_id = 3;
else
reg_id = 5;
- break;
+ break;
case ETH_VLAN_TYPE_INNER:
if (qinq) {
reg_id = 5;
static int
ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
{
- struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
- struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ struct ice_pf *pf;
+ struct ice_hw *hw;
int ret;
if (!vsi || !lut)
return -EINVAL;
+ pf = ICE_VSI_TO_PF(vsi);
+ hw = ICE_VSI_TO_HW(vsi);
+
if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
lut, lut_size);
uint16_t reta_size)
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
+ uint16_t i, lut_size = pf->hash_lut_size;
uint16_t idx, shift;
uint8_t *lut;
int ret;
- if (reta_size != lut_size ||
- reta_size > ETH_RSS_RETA_SIZE_512) {
+ if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
+ reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
+ reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
PMD_DRV_LOG(ERR,
"The size of hash lookup table configured (%d)"
"doesn't match the number hardware can "
- "supported (%d)",
- reta_size, lut_size);
+ "supported (128, 512, 2048)",
+ reta_size);
return -EINVAL;
}
- lut = rte_zmalloc(NULL, reta_size, 0);
+ /* It MUST use the current LUT size to get the RSS lookup table,
+ * otherwise if will fail with -100 error code.
+ */
+ lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0);
if (!lut) {
PMD_DRV_LOG(ERR, "No memory can be allocated");
return -ENOMEM;
}
- ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
+ ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
if (ret)
goto out;
lut[i] = reta_conf[idx].reta[shift];
}
ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
+ if (ret == 0 && lut_size != reta_size) {
+ PMD_DRV_LOG(INFO,
+ "The size of hash lookup table is changed from (%d) to (%d)",
+ lut_size, reta_size);
+ pf->hash_lut_size = reta_size;
+ }
out:
rte_free(lut);
uint16_t reta_size)
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
+ uint16_t i, lut_size = pf->hash_lut_size;
uint16_t idx, shift;
uint8_t *lut;
int ret;
- if (reta_size != lut_size ||
- reta_size > ETH_RSS_RETA_SIZE_512) {
+ if (reta_size != lut_size) {
PMD_DRV_LOG(ERR,
"The size of hash lookup table configured (%d)"
"doesn't match the number hardware can "
ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
- if (status != ICE_SUCCESS)
+ if (status == ICE_ERR_ALREADY_EXISTS)
+ PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
+ else if (status != ICE_SUCCESS)
PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
}
last_word = (eeprom->offset + eeprom->length - 1) >> 1;
nwords = last_word - first_word + 1;
- if (first_word > hw->nvm.sr_words ||
- last_word > hw->nvm.sr_words) {
+ if (first_word >= hw->nvm.sr_words ||
+ last_word >= hw->nvm.sr_words) {
PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
return -EINVAL;
}
&nes->rx_broadcast);
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
- nes->rx_broadcast) * ETHER_CRC_LEN;
+ nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
&oes->rx_discards, &nes->rx_discards);
&ns->eth.rx_discards);
/* Workaround: CRC size should not be included in byte statistics,
- * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
+ * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
+ * packet.
*/
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
/* GLPRT_REPC not supported */
/* GLPRT_RMPC not supported */
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
/* GLPRT_TEPC not supported */
/* call read registers - updates values, now write them to struct */
ice_read_stats_registers(pf, hw);
- stats->ipackets = ns->eth.rx_unicast +
- ns->eth.rx_multicast +
- ns->eth.rx_broadcast -
- ns->eth.rx_discards -
+ stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
+ pf->main_vsi->eth_stats.rx_multicast +
+ pf->main_vsi->eth_stats.rx_broadcast -
pf->main_vsi->eth_stats.rx_discards;
stats->opackets = ns->eth.tx_unicast +
ns->eth.tx_multicast +
ns->eth.tx_broadcast;
- stats->ibytes = ns->eth.rx_bytes;
+ stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
stats->obytes = ns->eth.tx_bytes;
stats->oerrors = ns->eth.tx_errors +
pf->main_vsi->eth_stats.tx_errors;