struct ice_rlan_ctx rx_ctx;
enum ice_status err;
uint16_t buf_size;
- struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
struct ice_adapter *ad = rxq->vsi->adapter;
+ uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
/* Set buffer size as the head split is disabled. */
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
RTE_PKTMBUF_HEADROOM);
rxq->rx_hdr_len = 0;
rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
- rxq->max_pkt_len = RTE_MIN((uint32_t)
- ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
- dev_data->dev_conf.rxmode.max_rx_pkt_len);
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
- rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
- PMD_DRV_LOG(ERR, "maximum packet length must "
- "be larger than %u and smaller than %u,"
- "as jumbo frame is enabled",
- (uint32_t)ICE_ETH_MAX_LEN,
- (uint32_t)ICE_FRAME_SIZE_MAX);
- return -EINVAL;
- }
- } else {
- if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
- rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
- PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled",
- (uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)ICE_ETH_MAX_LEN);
- return -EINVAL;
- }
+ rxq->max_pkt_len =
+ RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+ frame_size);
+
+ if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must "
+ "be larger than %u and smaller than %u",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)ICE_FRAME_SIZE_MAX);
+ return -EINVAL;
}
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
return -EINVAL;
}
- buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
- RTE_PKTMBUF_HEADROOM);
-
/* Check if scattered RX needs to be used. */
- if (rxq->max_pkt_len > buf_size)
+ if (frame_size > buf_size)
dev_data->scattered_rx = 1;
rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
}
uint32_t
-ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ice_rx_queue_count(void *rx_queue)
{
#define ICE_RXQ_SCAN_INTERVAL 4
volatile union ice_rx_flex_desc *rxdp;
struct ice_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &rxq->rx_ring[rxq->rx_tail];
while ((desc < rxq->nb_rx_desc) &&
rte_le_to_cpu_16(rxdp->wb.status_error0) &
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags = 0;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
-
+#endif
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
-
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
mb->timesync = rxq->queue_id;
pkt_flags |= PKT_RX_IEEE1588_PTP;
}
-
+#endif
mb->ol_flags |= pkt_flags;
}
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
-
+#endif
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
ice_rxd_to_vlan_tci(first_seg, &rxd);
rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
-
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
first_seg->timesync = rxq->queue_id;
pkt_flags |= PKT_RX_IEEE1588_PTP;
}
-
+#endif
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
-
+#endif
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
ice_rxd_to_vlan_tci(rxm, &rxd);
rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
-
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
rxm->timesync = rxq->queue_id;
pkt_flags |= PKT_RX_IEEE1588_PTP;
}
-
+#endif
rxm->ol_flags |= pkt_flags;
/* copy old mbuf to rx_pkts */
rx_pkts[nb_rx++] = rxm;