goto err_out;
/* Alloc RSS context only if RSS mode is enabled */
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
int j, nr_ctxs = bnxt_rss_ctxts(bp);
/* RSS table size in Thor is 512.
* setting is not available at this time, it will not be
* configured correctly in the CFA.
*/
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
vnic->vlan_strip = true;
else
vnic->vlan_strip = false;
bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
- (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ?
+ (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
true : false);
if (rc)
goto err_out;
unsigned int i, j;
int rc;
- if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
- bp->eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (bp->eth_dev->data->mtu > RTE_ETHER_MTU)
bp->flags |= BNXT_FLAG_JUMBO;
- } else {
- bp->eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
bp->flags &= ~BNXT_FLAG_JUMBO;
- }
/* THOR does not support ring groups.
* But we will use the array to save RSS context IDs.
}
}
+ for (j = 0; j < bp->tx_nr_rings; j++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[j];
+
+ if (!txq->tx_deferred_start) {
+ bp->eth_dev->data->tx_queue_state[j] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ txq->tx_started = true;
+ }
+ }
+
+ for (j = 0; j < bp->rx_nr_rings; j++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[j];
+
+ if (!rxq->rx_deferred_start) {
+ bp->eth_dev->data->rx_queue_state[j] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_started = true;
+ }
+ }
+
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
PMD_DRV_LOG(ERR,
link_speed = bp->link_info->support_pam4_speeds;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
- speed_capa |= ETH_LINK_SPEED_100M;
+ speed_capa |= RTE_ETH_LINK_SPEED_100M;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
- speed_capa |= ETH_LINK_SPEED_100M_HD;
+ speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
- speed_capa |= ETH_LINK_SPEED_1G;
+ speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
- speed_capa |= ETH_LINK_SPEED_2_5G;
+ speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
- speed_capa |= ETH_LINK_SPEED_10G;
+ speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
- speed_capa |= ETH_LINK_SPEED_20G;
+ speed_capa |= RTE_ETH_LINK_SPEED_20G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
- speed_capa |= ETH_LINK_SPEED_25G;
+ speed_capa |= RTE_ETH_LINK_SPEED_25G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
- speed_capa |= ETH_LINK_SPEED_40G;
+ speed_capa |= RTE_ETH_LINK_SPEED_40G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
- speed_capa |= ETH_LINK_SPEED_50G;
+ speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
- speed_capa |= ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
- speed_capa |= ETH_LINK_SPEED_50G;
+ speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
- speed_capa |= ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
- speed_capa |= ETH_LINK_SPEED_200G;
+ speed_capa |= RTE_ETH_LINK_SPEED_200G;
if (bp->link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
- speed_capa |= ETH_LINK_SPEED_FIXED;
+ speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
return speed_capa;
}
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
- dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
dev_info->tx_queue_offload_capa;
+ if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
*/
/* VMDq resources */
- vpool = 64; /* ETH_64_POOLS */
- vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+ vpool = 64; /* RTE_ETH_64_POOLS */
+ vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */
for (i = 0; i < 4; vpool >>= 1, i++) {
if (max_vnics > vpool) {
for (j = 0; j < 5; vrxq >>= 1, j++) {
(uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
goto resource_error;
- if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+ if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) &&
bp->max_vnics < eth_dev->data->nb_rx_queues)
goto resource_error;
bp->rx_cp_nr_rings = bp->rx_nr_rings;
bp->tx_cp_nr_rings = bp->tx_nr_rings;
- if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
- if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- eth_dev->data->mtu =
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
- BNXT_NUM_VLANS;
- bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
- }
+ bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
+
return 0;
resource_error:
PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
- (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
PMD_DRV_LOG(INFO, "Port %d Link Down\n",
*/
static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
{
+ uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU;
uint16_t buf_size;
int i;
- if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return 1;
- if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
return 1;
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
RTE_PKTMBUF_HEADROOM);
- if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
+ if (eth_dev->data->mtu + overhead > buf_size)
return 1;
}
return 0;
* a limited subset have been enabled.
*/
if (eth_dev->data->dev_conf.rxmode.offloads &
- ~(DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_VLAN_FILTER))
+ ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER))
goto use_scalar_rx;
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
* or tx offloads.
*/
if (eth_dev->data->scattered_rx ||
- (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
+ (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) ||
BNXT_TRUFLOW_EN(bp))
goto use_scalar_tx;
bnxt_link_update_op(eth_dev, 1);
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- vlan_mask |= ETH_VLAN_FILTER_MASK;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- vlan_mask |= ETH_VLAN_STRIP_MASK;
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ vlan_mask |= RTE_ETH_VLAN_FILTER_MASK;
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ vlan_mask |= RTE_ETH_VLAN_STRIP_MASK;
rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
if (rc)
goto error;
/* Retrieve link info from hardware */
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
- new.link_speed = ETH_LINK_SPEED_100M;
- new.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new.link_speed = RTE_ETH_LINK_SPEED_100M;
+ new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR,
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
if (new.link_status != eth_dev->data->dev_link.link_status ||
new.link_speed != eth_dev->data->dev_link.link_speed) {
rte_eth_linkstatus_set(eth_dev, &new);
-
- rte_eth_dev_callback_process(eth_dev,
- RTE_ETH_EVENT_INTR_LSC,
- NULL);
-
bnxt_print_link_info(eth_dev);
}
if (!vnic->rss_table)
return -EINVAL;
- if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
return -EINVAL;
if (reta_size != tbl_size) {
for (i = 0; i < reta_size; i++) {
struct bnxt_rx_queue *rxq;
- idx = i / RTE_RETA_GROUP_SIZE;
- sft = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ sft = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << sft)))
continue;
}
for (idx = 0, i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- sft = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ sft = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << sft)) {
uint16_t qid;
* If RSS enablement were different than dev_configure,
* then return -EINVAL
*/
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
PMD_DRV_LOG(ERR, "Hash type NONE\n");
} else {
vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
vnic->hash_mode =
bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
- ETH_RSS_LEVEL(rss_conf->rss_hf));
+ RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
/*
* If hashkey is not specified, use the previously configured
hash_types = vnic->hash_type;
rss_conf->rss_hf = 0;
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
- rss_conf->rss_hf |= ETH_RSS_IPV4;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
- rss_conf->rss_hf |= ETH_RSS_IPV6;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
fc_conf->autoneg = 1;
switch (bp->link_info->pause) {
case 0:
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
break;
case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
break;
}
return 0;
}
switch (fc_conf->mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
bp->link_info->auto_pause = 0;
bp->link_info->force_pause = 0;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
}
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
}
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
return rc;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
}
tunnel_type =
HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
- bp->vxlan_port_cnt++;
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
}
tunnel_type =
HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
- bp->geneve_port_cnt++;
break;
default:
PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
}
rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
tunnel_type);
+
+ if (rc != 0)
+ return rc;
+
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN)
+ bp->vxlan_port_cnt++;
+
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE)
+ bp->geneve_port_cnt++;
+
return rc;
}
return rc;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
port = bp->vxlan_fw_dst_port_id;
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
int rc;
vnic = BNXT_GET_DEFAULT_VNIC(bp);
- if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+ if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
/* Remove any VLAN filters programmed */
for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
bnxt_del_vlan_filter(bp, i);
bnxt_add_vlan_filter(bp, 0);
}
PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
- !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
+ !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
return 0;
}
/* Destroy vnic filters and vnic */
if (bp->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER) {
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
bnxt_del_vlan_filter(bp, i);
}
return rc;
if (bp->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER) {
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
rc = bnxt_add_vlan_filter(bp, 0);
if (rc)
return rc;
return rc;
PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
- !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
+ !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
return rc;
}
if (!dev->data->dev_started)
return 0;
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* Enable or disable VLAN filtering */
rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
if (rc)
return rc;
}
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
if (rc)
return rc;
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
else
PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
{
struct bnxt *bp = dev->data->dev_private;
int qinq = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND;
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
- if (vlan_type != ETH_VLAN_TYPE_INNER &&
- vlan_type != ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+ vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
PMD_DRV_LOG(ERR,
"Unsupported vlan type.");
return -EINVAL;
return -EINVAL;
}
- if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
switch (tpid) {
case RTE_ETHER_TYPE_QINQ:
bp->outer_tpid_bd =
}
bp->outer_tpid_bd |= tpid;
PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
- } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+ } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
PMD_DRV_LOG(ERR,
"Can accelerate only outer vlan in QinQ\n");
return -EINVAL;
bnxt_del_dflt_mac_filter(bp, vnic);
memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
/* This filter will allow only untagged packets */
rc = bnxt_add_vlan_filter(bp, 0);
} else {
int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
{
+ uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU;
struct bnxt *bp = eth_dev->data->dev_private;
uint32_t new_pkt_size;
- uint32_t rc = 0;
+ uint32_t rc;
uint32_t i;
rc = is_bnxt_in_error(bp);
if (!eth_dev->data->nb_rx_queues)
return rc;
- new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
- VLAN_TAG_SIZE * BNXT_NUM_VLANS;
+ new_pkt_size = new_mtu + overhead;
/*
* Disallow any MTU change that would require scattered receive support
return -EINVAL;
}
- if (new_mtu > RTE_ETHER_MTU) {
+ if (new_mtu > RTE_ETHER_MTU)
bp->flags |= BNXT_FLAG_JUMBO;
- bp->eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- } else {
- bp->eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
bp->flags &= ~BNXT_FLAG_JUMBO;
- }
/* Is there a change in mtu setting? */
- if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
+ if (eth_dev->data->mtu == new_mtu)
return rc;
for (i = 0; i < bp->nr_vnics; i++) {
}
}
- if (!rc)
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
+ if (bnxt_hwrm_config_host_mtu(bp))
+ PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n");
PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
}
static uint32_t
-bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+bnxt_rx_queue_count_op(void *rx_queue)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp;
struct bnxt_cp_ring_info *cpr;
- uint32_t desc = 0, raw_cons;
+ uint32_t desc = 0, raw_cons, cp_ring_size;
struct bnxt_rx_queue *rxq;
struct rx_pkt_cmpl *rxcmp;
int rc;
+ rxq = rx_queue;
+ bp = rxq->bp;
+
rc = is_bnxt_in_error(bp);
if (rc)
return rc;
- rxq = dev->data->rx_queues[rx_queue_id];
cpr = rxq->cp_ring;
raw_cons = cpr->cp_raw_cons;
+ cp_ring_size = cpr->cp_ring_struct->ring_size;
while (1) {
uint32_t agg_cnt, cons, cmpl_type;
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
break;
cmpl_type = CMP_TYPE(rxcmp);
struct bnxt_rx_queue *rxq = rx_queue;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
- uint32_t desc, raw_cons;
+ uint32_t desc, raw_cons, cp_ring_size;
struct bnxt *bp = rxq->bp;
struct rx_pkt_cmpl *rxcmp;
int rc;
rxr = rxq->rx_ring;
cpr = rxq->cp_ring;
+ cp_ring_size = cpr->cp_ring_struct->ring_size;
/*
* For the vector receive case, the completion at the requested
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- if (CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
return RTE_ETH_RX_DESC_DONE;
/* Check whether rx desc has an mbuf attached. */
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
break;
cmpl_type = CMP_TYPE(rxcmp);
struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
uint32_t ring_mask, raw_cons, nb_tx_pkts = 0;
- struct bnxt_ring *cp_ring_struct;
struct cmpl_base *cp_desc_ring;
int rc;
raw_cons = cpr->cp_raw_cons;
cp_desc_ring = cpr->cp_desc_ring;
- cp_ring_struct = cpr->cp_ring_struct;
ring_mask = cpr->cp_ring_struct->ring_mask;
/* Check to see if hw has posted a completion for the descriptor. */
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
- if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;
if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
int rc = 0;
if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
- /* Reset through master function driver */
+ /* Reset through primary function driver */
for (i = 0; i < info->reg_array_cnt; i++)
bnxt_write_fw_reset_reg(bp, i);
/* Wait for time specified by FW after triggering reset */
- rte_delay_ms(info->master_func_wait_period_after_reset);
+ rte_delay_ms(info->primary_func_wait_period_after_reset);
} else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
/* Reset with the help of Kong processor */
rc = bnxt_hwrm_fw_reset(bp);
struct bnxt_error_recovery_info *info = bp->recovery_info;
int rc = 0;
- /* Only Master function can do FW reset */
- if (bnxt_is_master_func(bp) &&
+ /* Only Primary function can do FW reset */
+ if (bnxt_is_primary_func(bp) &&
bnxt_is_recovery_enabled(bp)) {
rc = bnxt_fw_reset_all(bp);
if (rc) {
* advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
* When the driver detects heartbeat stop or change in reset_counter,
* it has to trigger a reset to recover from the error condition.
- * A “master PF” is the function who will have the privilege to
- * initiate the chimp reset. The master PF will be elected by the
+ * A “primary function” is the function who will have the privilege to
+ * initiate the chimp reset. The primary function will be elected by the
* firmware and will be notified through async message.
*/
static void bnxt_check_fw_health(void *arg)
PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
- if (bnxt_is_master_func(bp))
- wait_msec = info->master_func_wait_period;
+ if (bnxt_is_primary_func(bp))
+ wait_msec = info->primary_func_wait_period;
else
wait_msec = info->normal_func_wait_period;
PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
bnxt_eth_hw_addr_random(bp->mac_addr);
PMD_DRV_LOG(INFO,
- "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n",
bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
bp->pf->vf_req_buf = NULL;
}
- rc = bnxt_hwrm_func_driver_unregister(bp, 0);
+ rc = bnxt_hwrm_func_driver_unregister(bp);
bp->flags &= ~BNXT_FLAG_REGISTERED;
bnxt_free_ctx_mem(bp);
if (!reconfig_dev) {
RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
+