struct ice_rlan_ctx rx_ctx;
enum ice_status err;
uint16_t buf_size;
- struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
+ struct ice_adapter *ad = rxq->vsi->adapter;
+ uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
/* Set buffer size as the head split is disabled. */
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
RTE_PKTMBUF_HEADROOM);
rxq->rx_hdr_len = 0;
rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
- rxq->max_pkt_len = RTE_MIN((uint32_t)
- ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
- dev_data->dev_conf.rxmode.max_rx_pkt_len);
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
- rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
- PMD_DRV_LOG(ERR, "maximum packet length must "
- "be larger than %u and smaller than %u,"
- "as jumbo frame is enabled",
- (uint32_t)ICE_ETH_MAX_LEN,
- (uint32_t)ICE_FRAME_SIZE_MAX);
- return -EINVAL;
- }
- } else {
- if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
- rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
- PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled",
- (uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)ICE_ETH_MAX_LEN);
+ rxq->max_pkt_len =
+ RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+ frame_size);
+
+ if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must "
+ "be larger than %u and smaller than %u",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)ICE_FRAME_SIZE_MAX);
+ return -EINVAL;
+ }
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ /* Register mbuf field and flag for Rx timestamp */
+ err = rte_mbuf_dyn_rx_timestamp_register(
+ &ice_timestamp_dynfield_offset,
+ &ice_timestamp_dynflag);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Cannot register mbuf field/flag for timestamp");
return -EINVAL;
}
}
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
+ if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ regval |= QRXFLXP_CNTXT_TS_M;
+
ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
return -EINVAL;
}
- buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
- RTE_PKTMBUF_HEADROOM);
-
/* Check if scattered RX needs to be used. */
- if (rxq->max_pkt_len > buf_size)
+ if (frame_size > buf_size)
dev_data->scattered_rx = 1;
rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
tx_ctx.tso_ena = 1; /* tso enable */
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+ tx_ctx.tsyn_ena = 1;
ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
ice_tlan_ctx_info);
return -ENOMEM;
}
+ rxq->mz = rz;
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
q->rx_rel_mbufs(q);
rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
rte_free(q);
}
return -ENOMEM;
}
+ txq->mz = tz;
txq->nb_tx_desc = nb_desc;
txq->tx_rs_thresh = tx_rs_thresh;
txq->tx_free_thresh = tx_free_thresh;
return 0;
}
+void
+ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ ice_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ ice_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
void
ice_tx_queue_release(void *txq)
{
q->tx_rel_mbufs(q);
rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
rte_free(q);
}
}
uint32_t
-ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ice_rx_queue_count(void *rx_queue)
{
#define ICE_RXQ_SCAN_INTERVAL 4
volatile union ice_rx_flex_desc *rxdp;
struct ice_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &rxq->rx_ring[rxq->rx_tail];
while ((desc < rxq->nb_rx_desc) &&
rte_le_to_cpu_16(rxdp->wb.status_error0) &
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags = 0;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
-
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint64_t ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
+#endif
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw,
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+ if (ice_timestamp_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(mb,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ mb->ol_flags |= ice_timestamp_dynflag;
+ }
+ }
+ if (ad->ptp_ena && ((mb->packet_type &
+ RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+ mb->timesync = rxq->queue_id;
+ pkt_flags |= PKT_RX_IEEE1588_PTP;
+ }
+#endif
mb->ol_flags |= pkt_flags;
}
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
-
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint64_t ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
+#endif
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
ice_rxd_to_vlan_tci(first_seg, &rxd);
rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+ if (ice_timestamp_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(first_seg,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ first_seg->ol_flags |= ice_timestamp_dynflag;
+ }
+ }
+
+ if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+ == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ first_seg->timesync = rxq->queue_id;
+ pkt_flags |= PKT_RX_IEEE1588_PTP;
+ }
+#endif
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
continue;
ice_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
- rte_eth_dma_zone_free(dev, "rx_ring", i);
}
dev->data->nb_rx_queues = 0;
continue;
ice_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
- rte_eth_dma_zone_free(dev, "tx_ring", i);
}
dev->data->nb_tx_queues = 0;
}
return -ENOMEM;
}
+ txq->mz = tz;
txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
txq->queue_id = ICE_FDIR_QUEUE_ID;
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
return -ENOMEM;
}
+ rxq->mz = rz;
rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
rxq->queue_id = ICE_FDIR_QUEUE_ID;
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
-
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint64_t ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
+#endif
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
ice_rxd_to_vlan_tci(rxm, &rxd);
rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+ if (ice_timestamp_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(rxm,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ rxm->ol_flags |= ice_timestamp_dynflag;
+ }
+ }
+
+ if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+ RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ rxm->timesync = rxq->queue_id;
+ pkt_flags |= PKT_RX_IEEE1588_PTP;
+ }
+#endif
rxm->ol_flags |= pkt_flags;
/* copy old mbuf to rx_pkts */
rx_pkts[nb_rx++] = rxm;
static uint64_t mask = PKT_TX_TCP_SEG |
PKT_TX_QINQ |
PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_TUNNEL_MASK;
+ PKT_TX_TUNNEL_MASK |
+ PKT_TX_IEEE1588_TMST;
return (flags & mask) ? 1 : 0;
}
if (ol_flags & PKT_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
ice_set_tso_ctx(tx_pkt, tx_offload);
+ else if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+ ICE_TXD_CTX_QW1_CMD_S);
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
ad->rx_use_avx512 = false;
ad->rx_use_avx2 = false;
rx_check_ret = ice_rx_vec_dev_check(dev);
+ if (ad->ptp_ena)
+ rx_check_ret = -1;
if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->rx_vec_allowed = true;