X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.c;h=7a2220daa448436e82a2ac38ecd60174896455bf;hb=39e4a2577fd05199f53182b7c8509aeed40dc07f;hp=5d7ab4f047ee6730e5a0a51d06c03a0ca67d584c;hpb=6afc4baf4ffb90427208746f8484c7c3c430ae86;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 5d7ab4f047..7a2220daa4 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode; uint32_t rxdid = ICE_RXDID_COMMS_OVS; uint32_t regval; + struct ice_adapter *ad = rxq->vsi->adapter; /* Set buffer size as the head split is disabled. */ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - @@ -302,6 +303,18 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) } } + if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) { + /* Register mbuf field and flag for Rx timestamp */ + err = rte_mbuf_dyn_rx_timestamp_register( + &ice_timestamp_dynfield_offset, + &ice_timestamp_dynflag); + if (err) { + PMD_DRV_LOG(ERR, + "Cannot register mbuf field/flag for timestamp"); + return -EINVAL; + } + } + memset(&rx_ctx, 0, sizeof(rx_ctx)); rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; @@ -354,6 +367,9 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & QRXFLXP_CNTXT_RXDID_PRIO_M; + if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) + regval |= QRXFLXP_CNTXT_TS_M; + ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); err = ice_clear_rxq_ctx(hw, rxq->reg_idx); @@ -689,6 +705,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) tx_ctx.tso_ena = 1; /* tso enable */ tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ + tx_ctx.tsyn_ena = 1; ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx, ice_tlan_ctx_info); @@ -1135,6 +1152,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + rxq->mz = rz; /* Zero all the descriptors in the ring. */ memset(rz->addr, 0, ring_size); @@ -1190,6 +1208,7 @@ ice_rx_queue_release(void *rxq) q->rx_rel_mbufs(q); rte_free(q->sw_ring); + rte_memzone_free(q->mz); rte_free(q); } @@ -1336,6 +1355,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + txq->mz = tz; txq->nb_tx_desc = nb_desc; txq->tx_rs_thresh = tx_rs_thresh; txq->tx_free_thresh = tx_free_thresh; @@ -1374,6 +1394,18 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, return 0; } +void +ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) +{ + ice_rx_queue_release(dev->data->rx_queues[qid]); +} + +void +ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) +{ + ice_tx_queue_release(dev->data->tx_queues[qid]); +} + void ice_tx_queue_release(void *txq) { @@ -1386,6 +1418,7 @@ ice_tx_queue_release(void *txq) q->tx_rel_mbufs(q); rte_free(q->sw_ring); + rte_memzone_free(q->mz); rte_free(q); } @@ -1546,6 +1579,10 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) int32_t i, j, nb_rx = 0; uint64_t pkt_flags = 0; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + struct ice_vsi *vsi = rxq->vsi; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint64_t ts_ns; + struct ice_adapter *ad = rxq->vsi->adapter; rxdp = &rxq->rx_ring[rxq->rx_tail]; rxep = &rxq->sw_ring[rxq->rx_tail]; @@ -1589,6 +1626,25 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) ice_rxd_to_vlan_tci(mb, &rxdp[j]); rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]); + if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) { + ts_ns = ice_tstamp_convert_32b_64b(hw, + rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high)); + if (ice_timestamp_dynflag > 0) { + *RTE_MBUF_DYNFIELD(mb, + ice_timestamp_dynfield_offset, + rte_mbuf_timestamp_t *) = ts_ns; + mb->ol_flags |= ice_timestamp_dynflag; + } + } + + if (ad->ptp_ena && ((mb->packet_type & + RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) { + rxq->time_high = + rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); + mb->timesync = rxq->queue_id; + pkt_flags |= PKT_RX_IEEE1588_PTP; + } + mb->ol_flags |= pkt_flags; } @@ -1772,6 +1828,10 @@ ice_recv_scattered_pkts(void *rx_queue, uint64_t dma_addr; uint64_t pkt_flags; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + struct ice_vsi *vsi = rxq->vsi; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint64_t ts_ns; + struct ice_adapter *ad = rxq->vsi->adapter; while (nb_rx < nb_pkts) { rxdp = &rx_ring[rx_id]; @@ -1882,6 +1942,26 @@ ice_recv_scattered_pkts(void *rx_queue, ice_rxd_to_vlan_tci(first_seg, &rxd); rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd); pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); + + if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) { + ts_ns = ice_tstamp_convert_32b_64b(hw, + rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high)); + if (ice_timestamp_dynflag > 0) { + *RTE_MBUF_DYNFIELD(first_seg, + ice_timestamp_dynfield_offset, + rte_mbuf_timestamp_t *) = ts_ns; + first_seg->ol_flags |= ice_timestamp_dynflag; + } + } + + if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK) + == RTE_PTYPE_L2_ETHER_TIMESYNC)) { + rxq->time_high = + rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); + first_seg->timesync = rxq->queue_id; + pkt_flags |= PKT_RX_IEEE1588_PTP; + } + first_seg->ol_flags |= pkt_flags; /* Prefetch data of first segment, if configured to do so. */ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, @@ -2080,7 +2160,6 @@ ice_free_queues(struct rte_eth_dev *dev) continue; ice_rx_queue_release(dev->data->rx_queues[i]); dev->data->rx_queues[i] = NULL; - rte_eth_dma_zone_free(dev, "rx_ring", i); } dev->data->nb_rx_queues = 0; @@ -2089,7 +2168,6 @@ ice_free_queues(struct rte_eth_dev *dev) continue; ice_tx_queue_release(dev->data->tx_queues[i]); dev->data->tx_queues[i] = NULL; - rte_eth_dma_zone_free(dev, "tx_ring", i); } dev->data->nb_tx_queues = 0; } @@ -2136,6 +2214,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf) return -ENOMEM; } + txq->mz = tz; txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC; txq->queue_id = ICE_FDIR_QUEUE_ID; txq->reg_idx = pf->fdir.fdir_vsi->base_queue; @@ -2194,6 +2273,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf) return -ENOMEM; } + rxq->mz = rz; rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC; rxq->queue_id = ICE_FDIR_QUEUE_ID; rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; @@ -2237,6 +2317,10 @@ ice_recv_pkts(void *rx_queue, uint64_t dma_addr; uint64_t pkt_flags; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + struct ice_vsi *vsi = rxq->vsi; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint64_t ts_ns; + struct ice_adapter *ad = rxq->vsi->adapter; while (nb_rx < nb_pkts) { rxdp = &rx_ring[rx_id]; @@ -2288,6 +2372,26 @@ ice_recv_pkts(void *rx_queue, ice_rxd_to_vlan_tci(rxm, &rxd); rxq->rxd_to_pkt_fields(rxq, rxm, &rxd); pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); + + if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) { + ts_ns = ice_tstamp_convert_32b_64b(hw, + rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high)); + if (ice_timestamp_dynflag > 0) { + *RTE_MBUF_DYNFIELD(rxm, + ice_timestamp_dynfield_offset, + rte_mbuf_timestamp_t *) = ts_ns; + rxm->ol_flags |= ice_timestamp_dynflag; + } + } + + if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) == + RTE_PTYPE_L2_ETHER_TIMESYNC)) { + rxq->time_high = + rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); + rxm->timesync = rxq->queue_id; + pkt_flags |= PKT_RX_IEEE1588_PTP; + } + rxm->ol_flags |= pkt_flags; /* copy old mbuf to rx_pkts */ rx_pkts[nb_rx++] = rxm; @@ -2499,7 +2603,8 @@ ice_calc_context_desc(uint64_t flags) static uint64_t mask = PKT_TX_TCP_SEG | PKT_TX_QINQ | PKT_TX_OUTER_IP_CKSUM | - PKT_TX_TUNNEL_MASK; + PKT_TX_TUNNEL_MASK | + PKT_TX_IEEE1588_TMST; return (flags & mask) ? 1 : 0; } @@ -2667,6 +2772,10 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (ol_flags & PKT_TX_TCP_SEG) cd_type_cmd_tso_mss |= ice_set_tso_ctx(tx_pkt, tx_offload); + else if (ol_flags & PKT_TX_IEEE1588_TMST) + cd_type_cmd_tso_mss |= + ((uint64_t)ICE_TX_CTX_DESC_TSYN << + ICE_TXD_CTX_QW1_CMD_S); ctx_txd->tunneling_params = rte_cpu_to_le_32(cd_tunneling_params); @@ -3068,6 +3177,8 @@ ice_set_rx_function(struct rte_eth_dev *dev) ad->rx_use_avx512 = false; ad->rx_use_avx2 = false; rx_check_ret = ice_rx_vec_dev_check(dev); + if (ad->ptp_ena) + rx_check_ret = -1; if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed && rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { ad->rx_vec_allowed = true;