dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
- dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
+ dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
+ dev_info->tx_queue_offload_capa;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
{
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
#ifndef RTE_LIBRTE_IEEE1588
+ uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads;
struct bnxt *bp = eth_dev->data->dev_private;
/*
* or tx offloads.
*/
if (!eth_dev->data->scattered_rx &&
- !eth_dev->data->dev_conf.txmode.offloads &&
+ !(offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
!BNXT_TRUFLOW_EN(bp)) {
PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
eth_dev->data->port_id);
"Port %d scatter: %d tx offload: %" PRIX64 "\n",
eth_dev->data->port_id,
eth_dev->data->scattered_rx,
- eth_dev->data->dev_conf.txmode.offloads);
+ offloads);
#endif
#endif
return bnxt_xmit_pkts;
eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
eth_dev->data->dev_started = 1;
- bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
+ bnxt_link_update_op(eth_dev, 1);
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
vlan_mask |= ETH_VLAN_FILTER_MASK;
}
/* Unload the driver, release resources */
-static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
+static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_eth_link link;
+ int ret;
eth_dev->data->dev_started = 0;
eth_dev->data->scattered_rx = 0;
rte_intr_disable(intr_handle);
/* Stop the child representors for this device */
- bnxt_rep_stop_all(bp);
+ ret = bnxt_rep_stop_all(bp);
+ if (ret != 0)
+ return ret;
/* delete the bnxt ULP port details */
bnxt_ulp_port_deinit(bp);
bnxt_cancel_fw_health_check(bp);
/* Do not bring link down during reset recovery */
- if (!is_bnxt_in_error(bp))
+ if (!is_bnxt_in_error(bp)) {
bnxt_dev_set_link_down_op(eth_dev);
-
- /* Wait for link to be reset and the async notification to process.
- * During reset recovery, there is no need to wait and
- * VF/NPAR functions do not have privilege to change PHY config.
- */
- if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
- bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
+ /* Wait for link to be reset */
+ if (BNXT_SINGLE_PF(bp))
+ rte_delay_ms(500);
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(eth_dev, &link);
+ }
/* Clean queue intr-vector mapping */
rte_intr_efd_disable(intr_handle);
/* All filters are deleted on a port stop. */
if (BNXT_FLOW_XSTATS_EN(bp))
bp->flow_stat->flow_count = 0;
+
+ return 0;
}
static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
+ int ret = 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
bnxt_cancel_fc_thread(bp);
if (eth_dev->data->dev_started)
- bnxt_dev_stop_op(eth_dev);
+ ret = bnxt_dev_stop_op(eth_dev);
bnxt_free_switch_domain(bp);
bnxt_free_pf_info(bp);
bnxt_free_parent_info(bp);
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
-
rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
bp->tx_mem_zone = NULL;
rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
rte_free(bp->grp_info);
bp->grp_info = NULL;
- return 0;
+ return ret;
}
static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
return rc;
}
-int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
- bool exp_link_status)
+int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
{
int rc = 0;
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_link new;
- int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
- BNXT_LINK_DOWN_WAIT_CNT;
+ int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT :
+ BNXT_MIN_LINK_WAIT_CNT;
rc = is_bnxt_in_error(bp);
if (rc)
goto out;
}
- if (!wait_to_complete || new.link_status == exp_link_status)
+ if (!wait_to_complete || new.link_status)
break;
rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
} while (cnt--);
+ /* Only single function PF can bring phy down.
+ * When port is stopped, report link down for VF/MH/NPAR functions.
+ */
+ if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started)
+ memset(&new, 0, sizeof(new));
+
out:
/* Timed out or success */
if (new.link_status != eth_dev->data->dev_link.link_status ||
return rc;
}
-int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
- int wait_to_complete)
-{
- return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
-}
-
static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
/* Update the default RSS VNIC(s) */
vnic = BNXT_GET_DEFAULT_VNIC(bp);
vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
+ vnic->hash_mode =
+ bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
+ ETH_RSS_LEVEL(rss_conf->rss_hf));
/*
* If hashkey is not specified, use the previously configured
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
+
+ rss_conf->rss_hf |=
+ bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode);
+
if (hash_types) {
PMD_DRV_LOG(ERR,
"Unknown RSS config from firmware (%08x), RSS disabled",
}
rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
- if (!rc) {
- if (tunnel_type ==
- HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
- bp->vxlan_port = 0;
- if (tunnel_type ==
- HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
- bp->geneve_port = 0;
- }
return rc;
}
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = 0;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
- qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
+ qinfo->conf.offloads = txq->offloads;
}
static const struct {
if (!BNXT_PF(bp))
return;
-#define ALLOW_FUNC(x) \
- { \
- uint32_t arg = (x); \
- bp->pf->vf_req_fwd[((arg) >> 5)] &= \
- ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
- }
-
- /* Forward all requests if firmware is new enough */
- if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
- (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
- ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
- memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
- } else {
- PMD_DRV_LOG(WARNING,
- "Firmware too old for VF mailbox functionality\n");
- memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
- }
+ memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
- /*
- * The following are used for driver cleanup. If we disallow these,
- * VF drivers can't clean up cleanly.
- */
- ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
- ALLOW_FUNC(HWRM_VNIC_FREE);
- ALLOW_FUNC(HWRM_RING_FREE);
- ALLOW_FUNC(HWRM_RING_GRP_FREE);
- ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
- ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
- ALLOW_FUNC(HWRM_STAT_CTX_FREE);
- ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
- ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
+ if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
+ BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG);
+ BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG);
+ BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG);
+ BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC);
+ BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD);
}
uint16_t
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
bp = eth_dev->data->dev_private;
bnxt_free_int(bp);
bnxt_free_mem(bp, reconfig_dev);
+
bnxt_hwrm_func_buf_unrgtr(bp);
+ rte_free(bp->pf->vf_req_buf);
+
rc = bnxt_hwrm_func_driver_unregister(bp, 0);
bp->flags &= ~BNXT_FLAG_REGISTERED;
bnxt_free_ctx_mem(bp);