/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
* All rights reserved.
*/
*/
rte_bitmap_set(rxr->ag_bitmap, ag_cons);
}
+ last->next = NULL;
bnxt_prod_ag_mbuf(rxq);
return 0;
}
ip6 = i & (RX_PKT_CMPL_FLAGS2_IP_TYPE >> 7);
tun = i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC >> 2);
- type = (i & 0x38) << 9;
+ type = (i & 0x78) << 9;
if (!tun && !ip6)
l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
pt[i] |= PKT_RX_IP_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
- pt[i] |= PKT_RX_EIP_CKSUM_BAD;
+ pt[i] |= PKT_RX_OUTER_IP_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
pt[i] |= PKT_RX_L4_CKSUM_BAD;
ol_flags |= PKT_RX_RSS_HASH;
}
+#ifdef RTE_LIBRTE_IEEE1588
+ if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
+ ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+#endif
+
mbuf->ol_flags = ol_flags;
}
static void
bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
{
- uint64_t systime_cycles = 0;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint64_t last_hwrm_time;
+ uint64_t pkt_time = 0;
- if (!BNXT_CHIP_P5(bp))
+ if (!BNXT_CHIP_P5(bp) || !ptp)
return;
/* On Thor, Rx timestamps are provided directly in the
* from the HWRM response with the lower 32 bits in the
* Rx completion to produce the 48 bit timestamp for the Rx packet
*/
- bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
- &systime_cycles);
- bp->ptp_cfg->rx_timestamp = (systime_cycles & 0xFFFF00000000);
- bp->ptp_cfg->rx_timestamp |= rx_ts_cmpl;
+ last_hwrm_time = ptp->current_time;
+ pkt_time = (last_hwrm_time & BNXT_PTP_CURRENT_TIME_MASK) | rx_ts_cmpl;
+ if (rx_ts_cmpl < (uint32_t)last_hwrm_time) {
+ /* timer has rolled over */
+ pkt_time += (1ULL << 32);
+ }
+ ptp->rx_timestamp = pkt_time;
}
#endif
goto next_rx;
}
- agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
- >> RX_PKT_CMPL_AGG_BUFS_SFT;
+ agg_buf = BNXT_RX_L2_AGG_BUFS(rxcmp);
if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
return -EBUSY;
#ifdef RTE_LIBRTE_IEEE1588
if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) &
RX_PKT_CMPL_FLAGS_MASK) ==
- RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) {
- mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder);
- }
#endif
if (cmp_type == CMPL_BASE_TYPE_RX_L2_V2) {
cpr->cp_ring_struct->ring_mask,
cpr->valid);
- if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) &&
+ if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) {
+ PMD_DRV_LOG(ERR, "Rx flush done\n");
+ } else if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) &&
(CMP_TYPE(rxcmp) <= RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG)) {
rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
if (!rc)
raw_cons = NEXT_RAW_CMP(raw_cons);
if (nb_rx_pkts == nb_pkts || nb_rep_rx_pkts == nb_pkts || evt)
break;
- /* Post some Rx buf early in case of larger burst processing */
- if (nb_rx_pkts == BNXT_RX_POST_THRESH)
- bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
}
cpr->cp_raw_cons = raw_cons;
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
{
- struct rte_eth_dev *eth_dev = rxq->bp->eth_dev;
- struct rte_eth_rxmode *rxmode;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring *ring;
- bool use_agg_ring;
rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
return -ENOMEM;
cpr->cp_ring_struct = ring;
- rxmode = ð_dev->data->dev_conf.rxmode;
- use_agg_ring = (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
- (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) ||
- (rxmode->max_rx_pkt_len >
- (uint32_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
- RTE_PKTMBUF_HEADROOM));
-
/* Allocate two completion slots per entry in desc ring. */
ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
-
- /* Allocate additional slots if aggregation ring is in use. */
- if (use_agg_ring)
- ring->ring_size *= AGG_RING_SIZE_FACTOR;
+ ring->ring_size *= AGG_RING_SIZE_FACTOR;
ring->ring_size = rte_align32pow2(ring->ring_size);
ring->ring_mask = ring->ring_size - 1;
return 0;
}
+
+/* Sweep the Rx completion queue till HWRM_DONE for ring flush is received.
+ * The mbufs will not be freed in this call.
+ * They will be freed during ring free as a part of mem cleanup.
+ */
+int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
+ uint32_t ring_mask = cp_ring_struct->ring_mask;
+ uint32_t raw_cons = cpr->cp_raw_cons;
+ struct rx_pkt_cmpl *rxcmp;
+ uint32_t nb_rx = 0;
+ uint32_t cons;
+
+ do {
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE)
+ return 1;
+
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ nb_rx++;
+ } while (nb_rx < ring_mask);
+
+ cpr->cp_raw_cons = raw_cons;
+
+ /* Ring the completion queue doorbell. */
+ bnxt_db_cq(cpr);
+
+ return 0;
+}