dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
+ /* MTU specifics */
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->max_mtu = BNXT_MAX_MTU;
+
/* Fast path specifics */
dev_info->min_rx_bufsize = 1;
- dev_info->max_rx_pktlen = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev)
{
#ifdef RTE_ARCH_X86
+#ifndef RTE_LIBRTE_IEEE1588
/*
* Vector mode receive can be enabled only if scatter rx is not
* in use and rx offloads are limited to VLAN stripping and
eth_dev->data->port_id,
eth_dev->data->scattered_rx,
eth_dev->data->dev_conf.rxmode.offloads);
+#endif
#endif
return bnxt_recv_pkts;
}
bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
{
#ifdef RTE_ARCH_X86
+#ifndef RTE_LIBRTE_IEEE1588
/*
* Vector mode transmit can be enabled only if not using scatter rx
* or tx offloads.
eth_dev->data->port_id,
eth_dev->data->scattered_rx,
eth_dev->data->dev_conf.txmode.offloads);
+#endif
#endif
return bnxt_xmit_pkts;
}
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
+ bnxt_enable_int(bp);
rc = bnxt_hwrm_if_change(bp, 1);
if (!rc) {
if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
- bnxt_enable_int(bp);
bp->flags |= BNXT_FLAG_INIT_DONE;
eth_dev->data->dev_started = 1;
bp->dev_stopped = 0;
/* TBD: STOP HW queues DMA */
eth_dev->data->dev_link.link_status = 0;
}
- bnxt_set_hwrm_link_config(bp, false);
+ bnxt_dev_set_link_down_op(eth_dev);
+ /* Wait for link to be reset and the async notification to process. */
+ rte_delay_ms(BNXT_LINK_WAIT_INTERVAL * 2);
/* Clean queue intr-vector mapping */
rte_intr_efd_disable(intr_handle);
bnxt_hwrm_port_clr_stats(bp);
bnxt_free_tx_mbufs(bp);
bnxt_free_rx_mbufs(bp);
+ /* Process any remaining notifications in default completion queue */
+ bnxt_int_handler(eth_dev);
bnxt_shutdown_nic(bp);
bnxt_hwrm_if_change(bp, 0);
bp->dev_stopped = 1;
/* Timed out or success */
if (new.link_status != eth_dev->data->dev_link.link_status ||
new.link_speed != eth_dev->data->dev_link.link_speed) {
- memcpy(ð_dev->data->dev_link, &new,
- sizeof(struct rte_eth_link));
+ rte_eth_linkstatus_set(eth_dev, &new);
_rte_eth_dev_callback_process(eth_dev,
RTE_ETH_EVENT_INTR_LSC,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = 0;
- qinfo->conf.rx_deferred_start = 0;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
}
static void
static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
{
struct bnxt *bp = eth_dev->data->dev_private;
- struct rte_eth_dev_info dev_info;
uint32_t new_pkt_size;
uint32_t rc = 0;
uint32_t i;
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
- rc = bnxt_dev_info_get_op(eth_dev, &dev_info);
- if (rc != 0) {
- PMD_DRV_LOG(ERR, "Error during getting ethernet device info\n");
- return rc;
- }
-
- if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
- PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
- RTE_ETHER_MIN_MTU, BNXT_MAX_MTU);
- return -EINVAL;
- }
-
#ifdef RTE_ARCH_X86
/*
* If vector-mode tx/rx is active, disallow any MTU change that would
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
- eth_dev->data->mtu = new_mtu;
- PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
-
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
uint16_t size = 0;
- vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ vnic->mru = new_mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc)
break;
}
}
+ PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
+
return rc;
}
static int
bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
{
- uint64_t ns, systime_cycles;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint64_t ns, systime_cycles = 0;
+ int rc = 0;
if (!ptp)
return 0;
- systime_cycles = bnxt_cc_read(bp);
+ if (BNXT_CHIP_THOR(bp))
+ rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
+ &systime_cycles);
+ else
+ systime_cycles = bnxt_cc_read(bp);
+
ns = rte_timecounter_update(&ptp->tc, systime_cycles);
*ts = rte_ns_to_timespec(ns);
- return 0;
+ return rc;
}
static int
bnxt_timesync_enable(struct rte_eth_dev *dev)
struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint32_t shift = 0;
+ int rc;
if (!ptp)
return 0;
ptp->tx_tstamp_en = 1;
ptp->rxctl = BNXT_PTP_MSG_EVENTS;
- if (!bnxt_hwrm_ptp_cfg(bp))
- bnxt_map_ptp_regs(bp);
+ rc = bnxt_hwrm_ptp_cfg(bp);
+ if (rc)
+ return rc;
memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
ptp->tx_tstamp_tc.cc_shift = shift;
ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+ if (!BNXT_CHIP_THOR(bp))
+ bnxt_map_ptp_regs(bp);
+
return 0;
}
bnxt_hwrm_ptp_cfg(bp);
- bnxt_unmap_ptp_regs(bp);
+ if (!BNXT_CHIP_THOR(bp))
+ bnxt_unmap_ptp_regs(bp);
return 0;
}
if (!ptp)
return 0;
- bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
+ if (BNXT_CHIP_THOR(bp))
+ rx_tstamp_cycles = ptp->rx_timestamp;
+ else
+ bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
+
ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
*timestamp = rte_ns_to_timespec(ns);
return 0;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint64_t tx_tstamp_cycles = 0;
uint64_t ns;
+ int rc = 0;
if (!ptp)
return 0;
- bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
+ if (BNXT_CHIP_THOR(bp))
+ rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
+ &tx_tstamp_cycles);
+ else
+ rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
+
ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
*timestamp = rte_ns_to_timespec(ns);
- return 0;
+ return rc;
}
static int
}
}
+ rte_free(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
return rc;
}