X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_ethdev.c;h=c953979cca2a45e24981e95f85bf86c32ba888a9;hb=d24610f7bfdaa4eb12ca1aed04d60a73b8ea7745;hp=fbd00d1c7235bfa4c8aaeb019b918e5ecaf846da;hpb=1db688997f2323b35bd64f1b47c7a0d7171339ee;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index fbd00d1c72..c953979cca 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -151,6 +151,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { DEV_TX_OFFLOAD_GRE_TNL_TSO | \ DEV_TX_OFFLOAD_IPIP_TNL_TSO | \ DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ + DEV_TX_OFFLOAD_QINQ_INSERT | \ DEV_TX_OFFLOAD_MULTI_SEGS) #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \ @@ -161,6 +162,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ DEV_RX_OFFLOAD_JUMBO_FRAME | \ DEV_RX_OFFLOAD_KEEP_CRC | \ + DEV_RX_OFFLOAD_VLAN_EXTEND | \ DEV_RX_OFFLOAD_TCP_LRO) static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); @@ -187,7 +189,7 @@ int is_bnxt_in_error(struct bnxt *bp) * High level utility functions */ -static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) +uint16_t bnxt_rss_ctxts(const struct bnxt *bp) { if (!BNXT_CHIP_THOR(bp)) return 1; @@ -316,17 +318,10 @@ static int bnxt_init_chip(struct bnxt *bp) for (i = 0; i < bp->nr_vnics; i++) { struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps; - vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0); - if (!vnic->fw_grp_ids) { - PMD_DRV_LOG(ERR, - "Failed to alloc %d bytes for group ids\n", - size); - rc = -ENOMEM; + rc = bnxt_vnic_grp_alloc(bp, vnic); + if (rc) goto err_out; - } - memset(vnic->fw_grp_ids, -1, size); PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", i, vnic, vnic->fw_grp_ids); @@ -382,7 +377,7 @@ static int bnxt_init_chip(struct bnxt *bp) goto err_out; } - for (j = 0; j < bp->rx_nr_rings; j++) { + for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { rxq = bp->eth_dev->data->rx_queues[j]; PMD_DRV_LOG(DEBUG, @@ -521,6 +516,7 @@ static int bnxt_init_nic(struct bnxt *bp) static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info) { + struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); struct bnxt *bp = eth_dev->data->dev_private; uint16_t max_vnics, i, j, vpool, vrxq; unsigned int max_rx_rings; @@ -536,7 +532,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, /* PF/VF specifics */ if (BNXT_PF(bp)) - dev_info->max_vfs = bp->pdev->max_vfs; + dev_info->max_vfs = pdev->max_vfs; + max_rx_rings = RTE_MIN(bp->max_rx_rings, bp->max_stat_ctx); /* For the sake of symmetry, max_rx_queues = max_tx_queues */ dev_info->max_rx_queues = max_rx_rings; @@ -545,10 +542,13 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, dev_info->hash_key_size = 40; max_vnics = bp->max_vnics; + /* MTU specifics */ + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->max_mtu = BNXT_MAX_MTU; + /* Fast path specifics */ dev_info->min_rx_bufsize = 1; - dev_info->max_rx_pktlen = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; + dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) @@ -740,6 +740,7 @@ static eth_rx_burst_t bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev) { #ifdef RTE_ARCH_X86 +#ifndef RTE_LIBRTE_IEEE1588 /* * Vector mode receive can be enabled only if scatter rx is not * in use and rx offloads are limited to VLAN stripping and @@ -766,6 +767,7 @@ bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev) eth_dev->data->port_id, eth_dev->data->scattered_rx, eth_dev->data->dev_conf.rxmode.offloads); +#endif #endif return bnxt_recv_pkts; } @@ -774,6 +776,7 @@ static eth_tx_burst_t bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) { #ifdef RTE_ARCH_X86 +#ifndef RTE_LIBRTE_IEEE1588 /* * Vector mode transmit can be enabled only if not using scatter rx * or tx offloads. @@ -791,6 +794,7 @@ bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) eth_dev->data->port_id, eth_dev->data->scattered_rx, eth_dev->data->dev_conf.txmode.offloads); +#endif #endif return bnxt_xmit_pkts; } @@ -827,6 +831,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); } + bnxt_enable_int(bp); rc = bnxt_hwrm_if_change(bp, 1); if (!rc) { if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { @@ -855,7 +860,6 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); - bnxt_enable_int(bp); bp->flags |= BNXT_FLAG_INIT_DONE; eth_dev->data->dev_started = 1; bp->dev_stopped = 0; @@ -919,7 +923,9 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) /* TBD: STOP HW queues DMA */ eth_dev->data->dev_link.link_status = 0; } - bnxt_set_hwrm_link_config(bp, false); + bnxt_dev_set_link_down_op(eth_dev); + /* Wait for link to be reset and the async notification to process. */ + rte_delay_ms(BNXT_LINK_WAIT_INTERVAL * 2); /* Clean queue intr-vector mapping */ rte_intr_efd_disable(intr_handle); @@ -931,6 +937,8 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) bnxt_hwrm_port_clr_stats(bp); bnxt_free_tx_mbufs(bp); bnxt_free_rx_mbufs(bp); + /* Process any remaining notifications in default completion queue */ + bnxt_int_handler(eth_dev); bnxt_shutdown_nic(bp); bnxt_hwrm_if_change(bp, 0); bp->dev_stopped = 1; @@ -1077,8 +1085,7 @@ out: /* Timed out or success */ if (new.link_status != eth_dev->data->dev_link.link_status || new.link_speed != eth_dev->data->dev_link.link_speed) { - memcpy(ð_dev->data->dev_link, &new, - sizeof(struct rte_eth_link)); + rte_eth_linkstatus_set(eth_dev, &new); _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, @@ -1104,7 +1111,7 @@ static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) if (bp->vnic_info == NULL) return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); old_flags = vnic->flags; vnic->flags |= BNXT_VNIC_INFO_PROMISC; @@ -1129,7 +1136,7 @@ static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) if (bp->vnic_info == NULL) return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); old_flags = vnic->flags; vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; @@ -1154,7 +1161,7 @@ static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) if (bp->vnic_info == NULL) return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); old_flags = vnic->flags; vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; @@ -1179,7 +1186,7 @@ static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) if (bp->vnic_info == NULL) return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); old_flags = vnic->flags; vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; @@ -1339,8 +1346,6 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, struct bnxt *bp = eth_dev->data->dev_private; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic; - uint16_t hash_type = 0; - unsigned int i; int rc; rc = is_bnxt_in_error(bp); @@ -1362,35 +1367,20 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, bp->flags |= BNXT_FLAG_UPDATE_HASH; memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); - if (rss_conf->rss_hf & ETH_RSS_IPV4) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; - if (rss_conf->rss_hf & ETH_RSS_IPV6) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; - - /* Update the RSS VNIC(s) */ - for (i = 0; i < bp->nr_vnics; i++) { - vnic = &bp->vnic_info[i]; - vnic->hash_type = hash_type; + /* Update the default RSS VNIC(s) */ + vnic = &bp->vnic_info[0]; + vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); - /* - * Use the supplied key if the key length is - * acceptable and the rss_key is not NULL - */ - if (rss_conf->rss_key && - rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) - memcpy(vnic->rss_hash_key, rss_conf->rss_key, - rss_conf->rss_key_len); + /* + * Use the supplied key if the key length is + * acceptable and the rss_key is not NULL + */ + if (rss_conf->rss_key && rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) + memcpy(vnic->rss_hash_key, + rss_conf->rss_key, + rss_conf->rss_key_len); - bnxt_hwrm_vnic_rss_cfg(bp, vnic); - } + bnxt_hwrm_vnic_rss_cfg(bp, vnic); return 0; } @@ -1819,15 +1809,77 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); } - if (mask & ETH_VLAN_EXTEND_MASK) - PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n"); + if (mask & ETH_VLAN_EXTEND_MASK) { + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); + else + PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); + } + + return 0; +} + +static int +bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct bnxt *bp = dev->data->dev_private; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; + + if (vlan_type != ETH_VLAN_TYPE_INNER && + vlan_type != ETH_VLAN_TYPE_OUTER) { + PMD_DRV_LOG(ERR, + "Unsupported vlan type."); + return -EINVAL; + } + if (!qinq) { + PMD_DRV_LOG(ERR, + "QinQ not enabled. Needs to be ON as we can " + "accelerate only outer vlan\n"); + return -EINVAL; + } + + if (vlan_type == ETH_VLAN_TYPE_OUTER) { + switch (tpid) { + case RTE_ETHER_TYPE_QINQ: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; + break; + case RTE_ETHER_TYPE_VLAN: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; + break; + case 0x9100: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; + break; + case 0x9200: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; + break; + case 0x9300: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; + break; + default: + PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); + return -EINVAL; + } + bp->outer_tpid_bd |= tpid; + PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); + } else if (vlan_type == ETH_VLAN_TYPE_INNER) { + PMD_DRV_LOG(ERR, + "Can accelerate only outer vlan in QinQ\n"); + return -EINVAL; + } return 0; } static int bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, - struct rte_ether_addr *addr) + struct rte_ether_addr *addr) { struct bnxt *bp = dev->data->dev_private; /* Default Filter is tied to VNIC 0 */ @@ -1884,7 +1936,7 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, if (rc) return rc; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; @@ -1938,7 +1990,7 @@ bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = 0; - qinfo->conf.rx_deferred_start = 0; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; } static void @@ -1963,7 +2015,6 @@ bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) { struct bnxt *bp = eth_dev->data->dev_private; - struct rte_eth_dev_info dev_info; uint32_t new_pkt_size; uint32_t rc = 0; uint32_t i; @@ -1975,18 +2026,6 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * BNXT_NUM_VLANS; - rc = bnxt_dev_info_get_op(eth_dev, &dev_info); - if (rc != 0) { - PMD_DRV_LOG(ERR, "Error during getting ethernet device info\n"); - return rc; - } - - if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) { - PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n", - RTE_ETHER_MIN_MTU, BNXT_MAX_MTU); - return -EINVAL; - } - #ifdef RTE_ARCH_X86 /* * If vector-mode tx/rx is active, disallow any MTU change that would @@ -2016,15 +2055,12 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; - eth_dev->data->mtu = new_mtu; - PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu); - for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; uint16_t size = 0; - vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; + vnic->mru = new_mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; rc = bnxt_hwrm_vnic_cfg(bp, vnic); if (rc) break; @@ -2039,6 +2075,8 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) } } + PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); + return rc; } @@ -2100,9 +2138,6 @@ bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct bnxt_cp_ring_info *cpr; struct bnxt_rx_queue *rxq; struct rx_pkt_cmpl *rxcmp; - uint16_t cmp_type; - uint8_t cmp = 1; - bool valid; int rc; rc = is_bnxt_in_error(bp); @@ -2111,33 +2146,19 @@ bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxq = dev->data->rx_queues[rx_queue_id]; cpr = rxq->cp_ring; - valid = cpr->valid; + raw_cons = cpr->cp_raw_cons; - while (raw_cons < rxq->nb_rx_desc) { + while (1) { cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + rte_prefetch0(&cpr->cp_desc_ring[cons]); rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; - if (!CMPL_VALID(rxcmp, valid)) - goto nothing_to_do; - valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid); - cmp_type = CMP_TYPE(rxcmp); - if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { - cmp = (rte_le_to_cpu_32( - ((struct rx_tpa_end_cmpl *) - (rxcmp))->agg_bufs_v1) & - RX_TPA_END_CMPL_AGG_BUFS_MASK) >> - RX_TPA_END_CMPL_AGG_BUFS_SFT; - desc++; - } else if (cmp_type == 0x11) { - desc++; - cmp = (rxcmp->agg_bufs_v1 & - RX_PKT_CMPL_AGG_BUFS_MASK) >> - RX_PKT_CMPL_AGG_BUFS_SFT; + if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) { + break; } else { - cmp = 1; + raw_cons++; + desc++; } -nothing_to_do: - raw_cons += cmp ? cmp : 2; } return desc; @@ -3223,18 +3244,24 @@ bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) static int bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) { - uint64_t ns, systime_cycles; struct bnxt *bp = dev->data->dev_private; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint64_t ns, systime_cycles = 0; + int rc = 0; if (!ptp) return 0; - systime_cycles = bnxt_cc_read(bp); + if (BNXT_CHIP_THOR(bp)) + rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, + &systime_cycles); + else + systime_cycles = bnxt_cc_read(bp); + ns = rte_timecounter_update(&ptp->tc, systime_cycles); *ts = rte_ns_to_timespec(ns); - return 0; + return rc; } static int bnxt_timesync_enable(struct rte_eth_dev *dev) @@ -3242,6 +3269,7 @@ bnxt_timesync_enable(struct rte_eth_dev *dev) struct bnxt *bp = dev->data->dev_private; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; uint32_t shift = 0; + int rc; if (!ptp) return 0; @@ -3250,8 +3278,9 @@ bnxt_timesync_enable(struct rte_eth_dev *dev) ptp->tx_tstamp_en = 1; ptp->rxctl = BNXT_PTP_MSG_EVENTS; - if (!bnxt_hwrm_ptp_cfg(bp)) - bnxt_map_ptp_regs(bp); + rc = bnxt_hwrm_ptp_cfg(bp); + if (rc) + return rc; memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); @@ -3269,6 +3298,9 @@ bnxt_timesync_enable(struct rte_eth_dev *dev) ptp->tx_tstamp_tc.cc_shift = shift; ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + if (!BNXT_CHIP_THOR(bp)) + bnxt_map_ptp_regs(bp); + return 0; } @@ -3287,7 +3319,8 @@ bnxt_timesync_disable(struct rte_eth_dev *dev) bnxt_hwrm_ptp_cfg(bp); - bnxt_unmap_ptp_regs(bp); + if (!BNXT_CHIP_THOR(bp)) + bnxt_unmap_ptp_regs(bp); return 0; } @@ -3305,7 +3338,11 @@ bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, if (!ptp) return 0; - bnxt_get_rx_ts(bp, &rx_tstamp_cycles); + if (BNXT_CHIP_THOR(bp)) + rx_tstamp_cycles = ptp->rx_timestamp; + else + bnxt_get_rx_ts(bp, &rx_tstamp_cycles); + ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); *timestamp = rte_ns_to_timespec(ns); return 0; @@ -3319,15 +3356,21 @@ bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; uint64_t tx_tstamp_cycles = 0; uint64_t ns; + int rc = 0; if (!ptp) return 0; - bnxt_get_tx_ts(bp, &tx_tstamp_cycles); + if (BNXT_CHIP_THOR(bp)) + rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, + &tx_tstamp_cycles); + else + rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); + ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); *timestamp = rte_ns_to_timespec(ns); - return 0; + return rc; } static int @@ -3531,6 +3574,7 @@ static const struct eth_dev_ops bnxt_dev_ops = { .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, .vlan_filter_set = bnxt_vlan_filter_set_op, .vlan_offload_set = bnxt_vlan_offload_set_op, + .vlan_tpid_set = bnxt_vlan_tpid_set_op, .vlan_pvid_set = bnxt_vlan_pvid_set_op, .mtu_set = bnxt_mtu_set_op, .mac_addr_set = bnxt_set_default_mac_addr_op, @@ -3976,10 +4020,9 @@ static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp, memset(mz->addr, 0, mz->len); mz_phys_addr = mz->iova; if ((unsigned long)mz->addr == mz_phys_addr) { - PMD_DRV_LOG(WARNING, - "Memzone physical address same as virtual.\n"); - PMD_DRV_LOG(WARNING, - "Using rte_mem_virt2iova()\n"); + PMD_DRV_LOG(DEBUG, + "physical address same as virtual\n"); + PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n"); mz_phys_addr = rte_mem_virt2iova(mz->addr); if (mz_phys_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, @@ -4012,10 +4055,9 @@ static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp, memset(mz->addr, 0, mz->len); mz_phys_addr = mz->iova; if ((unsigned long)mz->addr == mz_phys_addr) { - PMD_DRV_LOG(WARNING, + PMD_DRV_LOG(DEBUG, "Memzone physical address same as virtual.\n"); - PMD_DRV_LOG(WARNING, - "Using rte_mem_virt2iova()\n"); + PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n"); for (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE) rte_mem_lock_page(((char *)mz->addr) + sz); mz_phys_addr = rte_mem_virt2iova(mz->addr); @@ -4203,9 +4245,9 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp) memset(mz->addr, 0, mz->len); mz_phys_addr = mz->iova; if ((unsigned long)mz->addr == mz_phys_addr) { - PMD_DRV_LOG(WARNING, + PMD_DRV_LOG(DEBUG, "Memzone physical address same as virtual.\n"); - PMD_DRV_LOG(WARNING, + PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n"); mz_phys_addr = rte_mem_virt2iova(mz->addr); if (mz_phys_addr == RTE_BAD_IOVA) { @@ -4241,10 +4283,9 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp) memset(mz->addr, 0, mz->len); mz_phys_addr = mz->iova; if ((unsigned long)mz->addr == mz_phys_addr) { - PMD_DRV_LOG(WARNING, + PMD_DRV_LOG(DEBUG, "Memzone physical address same as virtual\n"); - PMD_DRV_LOG(WARNING, - "Using rte_mem_virt2iova()\n"); + PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n"); mz_phys_addr = rte_mem_virt2iova(mz->addr); if (mz_phys_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, @@ -4490,12 +4531,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) if (version_printed++ == 0) PMD_DRV_LOG(INFO, "%s\n", bnxt_version); - rte_eth_copy_pci_info(eth_dev, pci_dev); - - bp = eth_dev->data->dev_private; - - bp->dev_stopped = 1; - eth_dev->dev_ops = &bnxt_dev_ops; eth_dev->rx_pkt_burst = &bnxt_recv_pkts; eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; @@ -4507,6 +4542,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; + rte_eth_copy_pci_info(eth_dev, pci_dev); + + bp = eth_dev->data->dev_private; + + bp->dev_stopped = 1; + if (bnxt_vf_pciid(pci_dev->id.device_id)) bp->flags |= BNXT_FLAG_VF; @@ -4561,7 +4602,6 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) { int rc; - bnxt_disable_int(bp); bnxt_free_int(bp); bnxt_free_mem(bp, reconfig_dev); bnxt_hwrm_func_buf_unrgtr(bp); @@ -4577,6 +4617,8 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) } } + rte_free(bp->ptp_cfg); + bp->ptp_cfg = NULL; return rc; }