* Copyright(c) 2018 Intel Corporation
*/
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_net.h>
+#include <rte_vect.h>
#include "rte_pmd_ice.h"
#include "ice_rxtx.h"
+#include "ice_rxtx_vec_common.h"
-#define ICE_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG | \
- PKT_TX_OUTER_IP_CKSUM)
+#define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM)
/* Offset of mbuf dynamic field for protocol extraction data */
int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
+uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
-static inline uint64_t
-ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid)
-{
- static uint64_t *ol_flag_map[] = {
- [ICE_RXDID_COMMS_AUX_VLAN] =
- &rte_net_ice_dynflag_proto_xtr_vlan_mask,
- [ICE_RXDID_COMMS_AUX_IPV4] =
- &rte_net_ice_dynflag_proto_xtr_ipv4_mask,
- [ICE_RXDID_COMMS_AUX_IPV6] =
- &rte_net_ice_dynflag_proto_xtr_ipv6_mask,
- [ICE_RXDID_COMMS_AUX_IPV6_FLOW] =
- &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
- [ICE_RXDID_COMMS_AUX_TCP] =
- &rte_net_ice_dynflag_proto_xtr_tcp_mask,
- };
- uint64_t *ol_flag;
+static int
+ice_monitor_callback(const uint64_t value,
+ const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
+{
+ const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ /*
+ * we expect the DD bit to be set to 1 if this descriptor was already
+ * written to.
+ */
+ return (value & m) == m ? -1 : 0;
+}
- ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL;
+int
+ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
+{
+ volatile union ice_rx_flex_desc *rxdp;
+ struct ice_rx_queue *rxq = rx_queue;
+ uint16_t desc;
+
+ desc = rxq->rx_tail;
+ rxdp = &rxq->rx_ring[desc];
+ /* watch for changes in status bit */
+ pmc->addr = &rxdp->wb.status_error0;
- return ol_flag != NULL ? *ol_flag : 0ULL;
+ /* comparison callback */
+ pmc->fn = ice_monitor_callback;
+
+ /* register is 16-bit */
+ pmc->size = sizeof(uint16_t);
+
+ return 0;
}
+
static inline uint8_t
ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
{
static uint8_t rxdid_map[] = {
- [PROTO_XTR_NONE] = ICE_RXDID_COMMS_GENERIC,
+ [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
[PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
[PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
[PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
[PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
[PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
+ [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
};
return xtr_type < RTE_DIM(rxdid_map) ?
- rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC;
+ rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
+}
+
+static inline void
+ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union ice_rx_flex_desc *rxdp)
+{
+ volatile struct ice_32b_rx_flex_desc_comms *desc =
+ (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+ uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
+
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+#endif
+}
+
+static inline void
+ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union ice_rx_flex_desc *rxdp)
+{
+ volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
+ (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ uint16_t stat_err;
+#endif
+
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+#endif
+}
+
+static inline void
+ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union ice_rx_flex_desc *rxdp)
+{
+ volatile struct ice_32b_rx_flex_desc_comms *desc =
+ (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+ uint16_t stat_err;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+
+ if (rxq->xtr_ol_flag) {
+ uint32_t metadata = 0;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error1);
+
+ if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+
+ if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
+ metadata |=
+ rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
+
+ if (metadata) {
+ mb->ol_flags |= rxq->xtr_ol_flag;
+
+ *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+ }
+ }
+#endif
+}
+
+static inline void
+ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union ice_rx_flex_desc *rxdp)
+{
+ volatile struct ice_32b_rx_flex_desc_comms *desc =
+ (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+ uint16_t stat_err;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+
+ if (rxq->xtr_ol_flag) {
+ uint32_t metadata = 0;
+
+ if (desc->flex_ts.flex.aux0 != 0xFFFF)
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+ else if (desc->flex_ts.flex.aux1 != 0xFFFF)
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
+
+ if (metadata) {
+ mb->ol_flags |= rxq->xtr_ol_flag;
+
+ *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+ }
+ }
+#endif
+}
+
+static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
+ [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
+ [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
+};
+
+void
+ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
+{
+ rxq->rxdid = rxdid;
+
+ switch (rxdid) {
+ case ICE_RXDID_COMMS_AUX_VLAN:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
+ break;
+
+ case ICE_RXDID_COMMS_AUX_IPV4:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
+ break;
+
+ case ICE_RXDID_COMMS_AUX_IPV6:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
+ break;
+
+ case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
+ break;
+
+ case ICE_RXDID_COMMS_AUX_TCP:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
+ break;
+
+ case ICE_RXDID_COMMS_AUX_IP_OFFSET:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
+ break;
+
+ case ICE_RXDID_COMMS_GENERIC:
+ /* fallthrough */
+ case ICE_RXDID_COMMS_OVS:
+ break;
+
+ default:
+ /* update this according to the RXDID for PROTO_XTR_NONE */
+ rxq->rxdid = ICE_RXDID_COMMS_OVS;
+ break;
+ }
+
+ if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
+ rxq->xtr_ol_flag = 0;
}
static enum ice_status
{
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
- struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
+ struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
+ struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
struct ice_rlan_ctx rx_ctx;
enum ice_status err;
- uint16_t buf_size, len;
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
+ uint16_t buf_size;
+ uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
+ struct ice_adapter *ad = rxq->vsi->adapter;
+ uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
/* Set buffer size as the head split is disabled. */
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
RTE_PKTMBUF_HEADROOM);
rxq->rx_hdr_len = 0;
rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
- len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
- rxq->max_pkt_len = RTE_MIN(len,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
- rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
- PMD_DRV_LOG(ERR, "maximum packet length must "
- "be larger than %u and smaller than %u,"
- "as jumbo frame is enabled",
- (uint32_t)RTE_ETHER_MAX_LEN,
- (uint32_t)ICE_FRAME_SIZE_MAX);
- return -EINVAL;
- }
- } else {
- if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
- rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
- PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled",
- (uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)RTE_ETHER_MAX_LEN);
+ rxq->max_pkt_len =
+ RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+ frame_size);
+
+ if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must "
+ "be larger than %u and smaller than %u",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)ICE_FRAME_SIZE_MAX);
+ return -EINVAL;
+ }
+
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ /* Register mbuf field and flag for Rx timestamp */
+ err = rte_mbuf_dyn_rx_timestamp_register(
+ &ice_timestamp_dynfield_offset,
+ &ice_timestamp_dynflag);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Cannot register mbuf field/flag for timestamp");
return -EINVAL;
}
}
PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
rxq->port_id, rxq->queue_id, rxdid);
+ if (!(pf->supported_rxdid & BIT(rxdid))) {
+ PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
+ rxdid);
+ return -EINVAL;
+ }
+
+ ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
+
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
*/
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
+ if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ regval |= QRXFLXP_CNTXT_TS_M;
+
ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
return -EINVAL;
}
- buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
- RTE_PKTMBUF_HEADROOM);
-
/* Check if scattered RX needs to be used. */
- if (rxq->max_pkt_len > buf_size)
- dev->data->scattered_rx = 1;
+ if (frame_size > buf_size)
+ dev_data->scattered_rx = 1;
rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
rxq->sw_ring[i].mbuf = NULL;
}
}
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
- if (rxq->rx_nb_avail == 0)
- return;
- for (i = 0; i < rxq->rx_nb_avail; i++) {
- struct rte_mbuf *mbuf;
-
- mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
- rte_pktmbuf_free_seg(mbuf);
- }
- rxq->rx_nb_avail = 0;
-#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
-}
+ if (rxq->rx_nb_avail == 0)
+ return;
+ for (i = 0; i < rxq->rx_nb_avail; i++)
+ rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
-static void
-ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
-{
- rxq->rx_rel_mbufs(rxq);
+ rxq->rx_nb_avail = 0;
}
/* turn on or off rx queue
}
static inline int
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
-#else
-ice_check_rx_burst_bulk_alloc_preconditions
- (__rte_unused struct ice_rx_queue *rxq)
-#endif
{
int ret = 0;
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
"rxq->rx_free_thresh=%d, "
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
}
-#else
- ret = -EINVAL;
-#endif
return ret;
}
return;
}
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
- if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
- len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
- else
-#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
- len = rxq->nb_rx_desc;
+ len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
((volatile char *)rxq->rx_ring)[i] = 0;
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
for (i = 0; i < ICE_RX_MAX_BURST; ++i)
rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
-#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
/* Init the RX tail register. */
ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
rx_queue_id);
- ice_rx_queue_release_mbufs(rxq);
+ rxq->rx_rel_mbufs(rxq);
ice_reset_rx_queue(rxq);
return -EINVAL;
}
if (rx_queue_id < dev->data->nb_rx_queues) {
rxq = dev->data->rx_queues[rx_queue_id];
- err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
return -EINVAL;
}
- ice_rx_queue_release_mbufs(rxq);
+ rxq->rx_rel_mbufs(rxq);
ice_reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] =
RTE_ETH_QUEUE_STATE_STOPPED;
int err;
struct ice_vsi *vsi;
struct ice_hw *hw;
- struct ice_aqc_add_tx_qgrp txq_elem;
+ struct ice_aqc_add_tx_qgrp *txq_elem;
struct ice_tlan_ctx tx_ctx;
+ int buf_len;
PMD_INIT_FUNC_TRACE();
return -EINVAL;
}
+ buf_len = ice_struct_size(txq_elem, txqs, 1);
+ txq_elem = ice_malloc(hw, buf_len);
+ if (!txq_elem)
+ return -ENOMEM;
+
vsi = txq->vsi;
hw = ICE_VSI_TO_HW(vsi);
- memset(&txq_elem, 0, sizeof(txq_elem));
memset(&tx_ctx, 0, sizeof(tx_ctx));
- txq_elem.num_txqs = 1;
- txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
+ txq_elem->num_txqs = 1;
+ txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->nb_tx_desc;
tx_ctx.tso_ena = 1; /* tso enable */
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+ tx_ctx.tsyn_ena = 1;
- ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+ ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
ice_tlan_ctx_info);
txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
/* Fix me, we assume TC always 0 here */
err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
- &txq_elem, sizeof(txq_elem), NULL);
+ txq_elem, buf_len, NULL);
if (err) {
PMD_DRV_LOG(ERR, "Failed to add lan txq");
+ rte_free(txq_elem);
return -EIO;
}
/* store the schedule node id */
- txq->q_teid = txq_elem.txqs[0].q_teid;
+ txq->q_teid = txq_elem->txqs[0].q_teid;
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ rte_free(txq_elem);
return 0;
}
rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
rx_ctx.dtype = 0; /* No Header Split mode */
rx_ctx.dsize = 1; /* 32B descriptors */
- rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
+ rx_ctx.rxmax = ICE_ETH_MAX_LEN;
/* TPH: Transaction Layer Packet (TLP) processing hints */
rx_ctx.tphrdesc_ena = 1;
rx_ctx.tphwdesc_ena = 1;
/* Init the RX tail register. */
ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
rx_queue_id);
int err;
struct ice_vsi *vsi;
struct ice_hw *hw;
- struct ice_aqc_add_tx_qgrp txq_elem;
+ struct ice_aqc_add_tx_qgrp *txq_elem;
struct ice_tlan_ctx tx_ctx;
+ int buf_len;
PMD_INIT_FUNC_TRACE();
return -EINVAL;
}
+ buf_len = ice_struct_size(txq_elem, txqs, 1);
+ txq_elem = ice_malloc(hw, buf_len);
+ if (!txq_elem)
+ return -ENOMEM;
+
vsi = txq->vsi;
hw = ICE_VSI_TO_HW(vsi);
- memset(&txq_elem, 0, sizeof(txq_elem));
memset(&tx_ctx, 0, sizeof(tx_ctx));
- txq_elem.num_txqs = 1;
- txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
+ txq_elem->num_txqs = 1;
+ txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->nb_tx_desc;
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
- ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+ ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
ice_tlan_ctx_info);
txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
/* Fix me, we assume TC always 0 here */
err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
- &txq_elem, sizeof(txq_elem), NULL);
+ txq_elem, buf_len, NULL);
if (err) {
PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
+ rte_free(txq_elem);
return -EIO;
}
/* store the schedule node id */
- txq->q_teid = txq_elem.txqs[0].q_teid;
+ txq->q_teid = txq_elem->txqs[0].q_teid;
+ rte_free(txq_elem);
return 0;
}
}
}
}
-static void
-ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
-{
- txq->tx_rel_mbufs(txq);
-}
static void
ice_reset_tx_queue(struct ice_tx_queue *txq)
return -EINVAL;
}
- ice_tx_queue_release_mbufs(txq);
+ txq->tx_rel_mbufs(txq);
ice_reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
rxq = pf->fdir.rxq;
- err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
rx_queue_id);
return -EINVAL;
}
- ice_rx_queue_release_mbufs(rxq);
+ rxq->rx_rel_mbufs(rxq);
return 0;
}
return -EINVAL;
}
- ice_tx_queue_release_mbufs(txq);
+ txq->tx_rel_mbufs(txq);
return 0;
}
uint32_t ring_size;
uint16_t len;
int use_def_burst_func = 1;
+ uint64_t offloads;
if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
nb_desc > ICE_MAX_RING_DESC ||
return -EINVAL;
}
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx]) {
ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
+ rxq->offloads = offloads;
rxq->reg_idx = vsi->base_queue + queue_idx;
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
/* Allocate the maximun number of RX ring hardware descriptor. */
len = ICE_MAX_RING_DESC;
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
/**
* Allocating a little more memory because vectorized/bulk_alloc Rx
* functions doesn't check boundaries each time.
*/
len += ICE_RX_MAX_BURST;
-#endif
/* Allocate the maximum number of RX ring hardware descriptor. */
ring_size = sizeof(union ice_rx_flex_desc) * len;
return -ENOMEM;
}
+ rxq->mz = rz;
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
rxq->rx_ring_dma = rz->iova;
rxq->rx_ring = rz->addr;
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
+ /* always reserve more for bulk alloc */
len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
-#else
- len = nb_desc;
-#endif
/* Allocate the software ring. */
rxq->sw_ring = rte_zmalloc_socket(NULL,
}
ice_reset_rx_queue(rxq);
- rxq->q_set = TRUE;
+ rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
if (!use_def_burst_func) {
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function will be "
"used on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
-#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
- "not enabled on port=%d, queue=%d.",
+ "not satisfied, Scattered Rx is requested. "
+ "on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
ad->rx_bulk_alloc_allowed = false;
}
return;
}
- ice_rx_queue_release_mbufs(q);
+ q->rx_rel_mbufs(q);
rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
rte_free(q);
}
return -ENOMEM;
}
+ txq->mz = tz;
txq->nb_tx_desc = nb_desc;
txq->tx_rs_thresh = tx_rs_thresh;
txq->tx_free_thresh = tx_free_thresh;
}
ice_reset_tx_queue(txq);
- txq->q_set = TRUE;
+ txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
ice_set_tx_function_flag(dev, txq);
return 0;
}
+void
+ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ ice_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ ice_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
void
ice_tx_queue_release(void *txq)
{
return;
}
- ice_tx_queue_release_mbufs(q);
+ q->tx_rel_mbufs(q);
rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
rte_free(q);
}
}
uint32_t
-ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ice_rx_queue_count(void *rx_queue)
{
#define ICE_RXQ_SCAN_INTERVAL 4
volatile union ice_rx_flex_desc *rxdp;
struct ice_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &rxq->rx_ring[rxq->rx_tail];
while ((desc < rxq->nb_rx_desc) &&
rte_le_to_cpu_16(rxdp->wb.status_error0) &
return 0;
if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
- flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return flags;
}
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
- flags |= PKT_RX_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- flags |= PKT_RX_IP_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
- flags |= PKT_RX_L4_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- flags |= PKT_RX_L4_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
- flags |= PKT_RX_EIP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+
+ if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
+ flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
+ else
+ flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
return flags;
}
{
if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
(1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.l2tag1);
PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
(1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
- mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
- PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+ mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
+ RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
mb->vlan_tci, mb->vlan_tci_outer);
}
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-#define ICE_RX_PROTO_XTR_VALID \
- ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
- (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
-
-static void
-ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
- volatile struct ice_32b_rx_flex_desc_comms *desc)
-{
- uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
- uint32_t metadata;
- uint64_t ol_flag;
-
- if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID)))
- return;
-
- ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid);
- if (unlikely(!ol_flag))
- return;
-
- mb->ol_flags |= ol_flag;
-
- metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ?
- rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0;
-
- if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)))
- metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
-
- *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
-}
-#endif
-
-static inline void
-ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
- volatile union ice_rx_flex_desc *rxdp)
-{
- volatile struct ice_32b_rx_flex_desc_comms *desc =
- (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
- uint16_t stat_err;
-
- stat_err = rte_le_to_cpu_16(desc->status_error0);
- if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
- mb->ol_flags |= PKT_RX_RSS_HASH;
- mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
- }
-
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (desc->flow_id != 0xFFFFFFFF) {
- mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
- mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
- }
-
- if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
- ice_rxd_to_proto_xtr(mb, desc);
-#endif
-}
-
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
#define ICE_LOOK_AHEAD 8
#if (ICE_LOOK_AHEAD != 8)
#error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags = 0;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
-
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint64_t ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
+#endif
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
- ice_rxd_to_pkt_fields(mb, &rxdp[j]);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw,
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+ if (ice_timestamp_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(mb,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ mb->ol_flags |= ice_timestamp_dynflag;
+ }
+ }
+ if (ad->ptp_ena && ((mb->packet_type &
+ RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+ mb->timesync = rxq->queue_id;
+ pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+ }
+#endif
mb->ol_flags |= pkt_flags;
}
{
struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
uint16_t nb_rx = 0;
- struct rte_eth_dev *dev;
if (!nb_pkts)
return 0;
if (ice_rx_alloc_bufs(rxq) != 0) {
uint16_t i, j;
- dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
- dev->data->rx_mbuf_alloc_failed +=
+ rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
rxq->rx_free_thresh;
PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
"port_id=%u, queue_id=%u",
return nb_rx;
}
-#else
-static uint16_t
-ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
- struct rte_mbuf __rte_unused **rx_pkts,
- uint16_t __rte_unused nb_pkts)
-{
- return 0;
-}
-#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
static uint16_t
ice_recv_scattered_pkts(void *rx_queue,
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct rte_eth_dev *dev;
-
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint64_t ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
+#endif
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
/* allocate mbuf */
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
- dev->data->rx_mbuf_alloc_failed++;
+ rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
break;
}
rxd = *rxdp; /* copy descriptor in ring to temp variable*/
first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(first_seg, &rxd);
- ice_rxd_to_pkt_fields(first_seg, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+ if (ice_timestamp_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(first_seg,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ first_seg->ol_flags |= ice_timestamp_dynflag;
+ }
+ }
+
+ if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+ == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ first_seg->timesync = rxq->queue_id;
+ pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+ }
+#endif
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
rx_id = (uint16_t)(rx_id == 0 ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
/* write TAIL register */
- ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
ptypes = ptypes_os;
if (dev->rx_pkt_burst == ice_recv_pkts ||
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
-#endif
dev->rx_pkt_burst == ice_recv_scattered_pkts)
return ptypes;
#ifdef RTE_ARCH_X86
if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
+#ifdef CC_AVX512_SUPPORT
+ dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
+ dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
+#endif
dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
- dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
+ dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
return ptypes;
#endif
return RTE_ETH_TX_DESC_FULL;
}
-void
-ice_clear_queues(struct rte_eth_dev *dev)
-{
- uint16_t i;
-
- PMD_INIT_FUNC_TRACE();
-
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
- ice_reset_tx_queue(dev->data->tx_queues[i]);
- }
-
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
- ice_reset_rx_queue(dev->data->rx_queues[i]);
- }
-}
-
void
ice_free_queues(struct rte_eth_dev *dev)
{
return -EINVAL;
}
- dev = pf->adapter->eth_dev;
+ dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("ice fdir tx queue",
return -ENOMEM;
}
+ txq->mz = tz;
txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
txq->queue_id = ICE_FDIR_QUEUE_ID;
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
* don't need to allocate software ring and reset for the fdir
* program queue just set the queue has been configured.
*/
- txq->q_set = TRUE;
+ txq->q_set = true;
pf->fdir.txq = txq;
txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
return -EINVAL;
}
- dev = pf->adapter->eth_dev;
+ dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
/* Allocate the RX queue data structure. */
rxq = rte_zmalloc_socket("ice fdir rx queue",
return -ENOMEM;
}
+ rxq->mz = rz;
rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
rxq->queue_id = ICE_FDIR_QUEUE_ID;
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
* Don't need to allocate software ring and reset for the fdir
* rx queue, just set the queue has been configured.
*/
- rxq->q_set = TRUE;
+ rxq->q_set = true;
pf->fdir.rxq = rxq;
rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct rte_eth_dev *dev;
-
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint64_t ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
+#endif
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
/* allocate mbuf */
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
- dev->data->rx_mbuf_alloc_failed++;
+ rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
break;
}
rxd = *rxdp; /* copy descriptor in ring to temp variable*/
rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(rxm, &rxd);
- ice_rxd_to_pkt_fields(rxm, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+ if (ice_timestamp_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(rxm,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ rxm->ol_flags |= ice_timestamp_dynflag;
+ }
+ }
+
+ if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+ RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ rxm->timesync = rxq->queue_id;
+ pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+ }
+#endif
rxm->ol_flags |= pkt_flags;
/* copy old mbuf to rx_pkts */
rx_pkts[nb_rx++] = rxm;
rx_id = (uint16_t)(rx_id == 0 ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
/* write TAIL register */
- ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
uint32_t *cd_tunneling)
{
/* EIPT: External (outer) IP header type */
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
- else if (ol_flags & PKT_TX_OUTER_IPV4)
+ else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
- else if (ol_flags & PKT_TX_OUTER_IPV6)
+ else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
*cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
/* EIPLEN: External (outer) IP header length, in DWords */
ICE_TXD_CTX_QW0_EIPLEN_S;
/* L4TUNT: L4 Tunneling Type */
- switch (ol_flags & PKT_TX_TUNNEL_MASK) {
- case PKT_TX_TUNNEL_IPIP:
+ switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ case RTE_MBUF_F_TX_TUNNEL_IPIP:
/* for non UDP / GRE tunneling, set to 00b */
break;
- case PKT_TX_TUNNEL_VXLAN:
- case PKT_TX_TUNNEL_GTP:
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+ case RTE_MBUF_F_TX_TUNNEL_GTP:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
*cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
break;
- case PKT_TX_TUNNEL_GRE:
+ case RTE_MBUF_F_TX_TUNNEL_GRE:
*cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
break;
default:
*cd_tunneling |= (tx_offload.l2_len >> 1) <<
ICE_TXD_CTX_QW0_NATLEN_S;
- if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
- (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
+ /**
+ * Calculate the tunneling UDP checksum.
+ * Shall be set only if L4TUNT = 01b and EIPT is not zero
+ */
+ if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
(*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
*cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
}
union ice_tx_offload tx_offload)
{
/* Set MACLEN */
- if (ol_flags & PKT_TX_TUNNEL_MASK)
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
*td_offset |= (tx_offload.outer_l2_len >> 1)
<< ICE_TX_DESC_LEN_MACLEN_S;
else
<< ICE_TX_DESC_LEN_MACLEN_S;
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (tx_offload.l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
- } else if (ol_flags & PKT_TX_IPV4) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
*td_offset |= (tx_offload.l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
- } else if (ol_flags & PKT_TX_IPV6) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
*td_offset |= (tx_offload.l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
}
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (tx_offload.l4_len >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
}
/* Enable L4 checksum offloads */
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
- PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
- "(port=%d queue=%d) value=0x%"PRIx64"\n",
- desc_to_clean_to,
- txq->port_id, txq->queue_id,
- txd[desc_to_clean_to].cmd_type_offset_bsz);
+ PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
+ "(port=%d queue=%d) value=0x%"PRIx64"\n",
+ desc_to_clean_to,
+ txq->port_id, txq->queue_id,
+ txd[desc_to_clean_to].cmd_type_offset_bsz);
/* Failed to clean any descriptors */
return -1;
}
static inline uint16_t
ice_calc_context_desc(uint64_t flags)
{
- static uint64_t mask = PKT_TX_TCP_SEG |
- PKT_TX_QINQ |
- PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_TUNNEL_MASK;
+ static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG |
+ RTE_MBUF_F_TX_QINQ |
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ RTE_MBUF_F_TX_TUNNEL_MASK |
+ RTE_MBUF_F_TX_IEEE1588_TMST;
return (flags & mask) ? 1 : 0;
}
}
hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
- hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
cd_cmd = ICE_TX_CTX_DESC_TSO;
return ctx_desc;
}
+/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
+#define ICE_MAX_DATA_PER_TXD \
+ (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
+/* Calculate the number of TX descriptors needed for each pkt */
+static inline uint16_t
+ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
+{
+ struct rte_mbuf *txd = tx_pkt;
+ uint16_t count = 0;
+
+ while (txd != NULL) {
+ count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
+ txd = txd->next;
+ }
+
+ return count;
+}
+
uint16_t
ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
uint32_t td_offset = 0;
uint32_t td_tag = 0;
uint16_t tx_last;
+ uint16_t slen;
uint64_t buf_dma_addr;
uint64_t ol_flags;
union ice_tx_offload tx_offload = {0};
/* Check if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
- ice_xmit_cleanup(txq);
+ (void)ice_xmit_cleanup(txq);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
tx_pkt = *tx_pkts++;
td_cmd = 0;
+ td_tag = 0;
+ td_offset = 0;
ol_flags = tx_pkt->ol_flags;
tx_offload.l2_len = tx_pkt->l2_len;
tx_offload.l3_len = tx_pkt->l3_len;
/* The number of descriptors that must be allocated for
* a packet equals to the number of the segments of that
* packet plus the number of context descriptor if needed.
+ * Recalculate the needed tx descs when TSO enabled in case
+ * the mbuf data size exceeds max data size that hw allows
+ * per tx desc.
*/
- nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+ nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
+ nb_ctx);
+ else
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
tx_last = (uint16_t)(tx_id + nb_used - 1);
/* Circular ring */
}
/* Descriptor based VLAN insertion */
- if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
+ if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
}
/* Fill in tunneling parameters if necessary */
cd_tunneling_params = 0;
- if (ol_flags & PKT_TX_TUNNEL_MASK)
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
ice_parse_tunneling_params(ol_flags, tx_offload,
&cd_tunneling_params);
/* Enable checksum offloading */
- if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
+ if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
ice_txd_enable_checksum(ol_flags, &td_cmd,
&td_offset, tx_offload);
- }
if (nb_ctx) {
/* Setup TX context descriptor if required */
txe->mbuf = NULL;
}
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
ice_set_tso_ctx(tx_pkt, tx_offload);
+ else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+ ICE_TXD_CTX_QW1_CMD_S);
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
/* TX context descriptor based double VLAN insert */
- if (ol_flags & PKT_TX_QINQ) {
+ if (ol_flags & RTE_MBUF_F_TX_QINQ) {
cd_l2tag2 = tx_pkt->vlan_tci_outer;
cd_type_cmd_tso_mss |=
((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
txe->mbuf = m_seg;
/* Setup TX Descriptor */
+ slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
+
+ while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
+ unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
+ txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->cmd_type_offset_bsz =
+ rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
+ ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
+ ((uint64_t)ICE_MAX_DATA_PER_TXD <<
+ ICE_TXD_QW1_TX_BUF_SZ_S) |
+ ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
+
+ buf_dma_addr += ICE_MAX_DATA_PER_TXD;
+ slen -= ICE_MAX_DATA_PER_TXD;
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ txd = &tx_ring[tx_id];
+ txn = &sw_ring[txe->next_id];
+ }
+
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz =
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
- ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
+ ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
- ((uint64_t)m_seg->data_len <<
- ICE_TXD_QW1_TX_BUF_SZ_S) |
- ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
+ ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
+ ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
txe->last_id = tx_last;
tx_id = txe->next_id;
/* set RS bit on the last descriptor of one packet */
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
- PMD_TX_FREE_LOG(DEBUG,
- "Setting RS bit on TXD id="
- "%4u (port=%d queue=%d)",
- tx_last, txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
td_cmd |= ICE_TX_DESC_CMD_RS;
return nb_tx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
ice_tx_free_bufs(struct ice_tx_queue *txq)
{
struct ice_tx_entry *txep;
for (i = 0; i < txq->tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
rte_mempool_put(txep->mbuf->pool, txep->mbuf);
txep->mbuf = NULL;
return txq->tx_rs_thresh;
}
+static int
+ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ struct ice_tx_entry *swr_ring = txq->sw_ring;
+ uint16_t i, tx_last, tx_id;
+ uint16_t nb_tx_free_last;
+ uint16_t nb_tx_to_clean;
+ uint32_t pkt_cnt;
+
+ /* Start free mbuf from the next of tx_tail */
+ tx_last = txq->tx_tail;
+ tx_id = swr_ring[tx_last].next_id;
+
+ if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
+ return 0;
+
+ nb_tx_to_clean = txq->nb_tx_free;
+ nb_tx_free_last = txq->nb_tx_free;
+ if (!free_cnt)
+ free_cnt = txq->nb_tx_desc;
+
+ /* Loop through swr_ring to count the amount of
+ * freeable mubfs and packets.
+ */
+ for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+ for (i = 0; i < nb_tx_to_clean &&
+ pkt_cnt < free_cnt &&
+ tx_id != tx_last; i++) {
+ if (swr_ring[tx_id].mbuf != NULL) {
+ rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+ swr_ring[tx_id].mbuf = NULL;
+
+ /*
+ * last segment in the packet,
+ * increment packet count
+ */
+ pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+ }
+
+ tx_id = swr_ring[tx_id].next_id;
+ }
+
+ if (txq->tx_rs_thresh > txq->nb_tx_desc -
+ txq->nb_tx_free || tx_id == tx_last)
+ break;
+
+ if (pkt_cnt < free_cnt) {
+ if (ice_xmit_cleanup(txq))
+ break;
+
+ nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_tx_free;
+ }
+ }
+
+ return (int)pkt_cnt;
+}
+
+#ifdef RTE_ARCH_X86
+static int
+ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
+ uint32_t free_cnt __rte_unused)
+{
+ return -ENOTSUP;
+}
+#endif
+
+static int
+ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ int i, n, cnt;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
+
+ for (i = 0; i < cnt; i += n) {
+ if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
+ break;
+
+ n = ice_tx_free_bufs(txq);
+
+ if (n == 0)
+ break;
+ }
+
+ return i;
+}
+
+int
+ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
+ struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+#ifdef RTE_ARCH_X86
+ if (ad->tx_vec_allowed)
+ return ice_tx_done_cleanup_vec(q, free_cnt);
+#endif
+ if (ad->tx_simple_allowed)
+ return ice_tx_done_cleanup_simple(q, free_cnt);
+ else
+ return ice_tx_done_cleanup_full(q, free_cnt);
+}
+
/* Populate 4 descriptors with data from 4 mbufs */
static inline void
tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
txq->tx_tail = 0;
/* Update the tx tail register */
- ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
return nb_tx;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_rx_function(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
#ifdef RTE_ARCH_X86
struct ice_rx_queue *rxq;
int i;
- bool use_avx2 = false;
+ int rx_check_ret = -1;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
+ ad->rx_use_avx512 = false;
+ ad->rx_use_avx2 = false;
+ rx_check_ret = ice_rx_vec_dev_check(dev);
+ if (ad->ptp_ena)
+ rx_check_ret = -1;
+ if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->rx_vec_allowed = true;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
}
}
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
- use_avx2 = true;
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
+#ifdef CC_AVX512_SUPPORT
+ ad->rx_use_avx512 = true;
+#else
+ PMD_DRV_LOG(NOTICE,
+ "AVX512 is not supported in build env");
+#endif
+ if (!ad->rx_use_avx512 &&
+ (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+ ad->rx_use_avx2 = true;
} else {
ad->rx_vec_allowed = false;
if (ad->rx_vec_allowed) {
if (dev->data->scattered_rx) {
- PMD_DRV_LOG(DEBUG,
- "Using %sVector Scattered Rx (port %d).",
- use_avx2 ? "avx2 " : "",
+ if (ad->rx_use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx512_offload;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx512;
+ }
+#endif
+ } else if (ad->rx_use_avx2) {
+ if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx2_offload;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx2;
+ }
+ } else {
+ PMD_DRV_LOG(DEBUG,
+ "Using Vector Scattered Rx (port %d).",
dev->data->port_id);
- dev->rx_pkt_burst = use_avx2 ?
- ice_recv_scattered_pkts_vec_avx2 :
- ice_recv_scattered_pkts_vec;
+ dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
+ }
} else {
- PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
- use_avx2 ? "avx2 " : "",
+ if (ad->rx_use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 OFFLOAD Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx512_offload;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx512;
+ }
+#endif
+ } else if (ad->rx_use_avx2) {
+ if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 OFFLOAD Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx2_offload;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx2;
+ }
+ } else {
+ PMD_DRV_LOG(DEBUG,
+ "Using Vector Rx (port %d).",
dev->data->port_id);
- dev->rx_pkt_burst = use_avx2 ?
- ice_recv_pkts_vec_avx2 :
- ice_recv_pkts_vec;
+ dev->rx_pkt_burst = ice_recv_pkts_vec;
+ }
}
return;
}
{ ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
{ ice_recv_pkts, "Scalar" },
#ifdef RTE_ARCH_X86
+#ifdef CC_AVX512_SUPPORT
+ { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
+ { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
+ { ice_recv_pkts_vec_avx512, "Vector AVX512" },
+ { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
+#endif
{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
+ { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
{ ice_recv_pkts_vec_avx2, "Vector AVX2" },
+ { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
{ ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
{ ice_recv_pkts_vec, "Vector SSE" },
#endif
return ret;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
{
struct ice_adapter *ad =
/* Use a simple Tx queue if possible (only fast free is allowed) */
ad->tx_simple_allowed =
(txq->offloads ==
- (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
if (ad->tx_simple_allowed)
m = tx_pkts[i];
ol_flags = m->ol_flags;
- if (ol_flags & PKT_TX_TCP_SEG &&
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
(m->tso_segsz < ICE_MIN_TSO_MSS ||
m->tso_segsz > ICE_MAX_TSO_MSS ||
m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
return i;
}
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = -ret;
return i;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_tx_function(struct rte_eth_dev *dev)
{
struct ice_adapter *ad =
#ifdef RTE_ARCH_X86
struct ice_tx_queue *txq;
int i;
- bool use_avx2 = false;
+ int tx_check_ret = -1;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (!ice_tx_vec_dev_check(dev)) {
+ ad->tx_use_avx2 = false;
+ ad->tx_use_avx512 = false;
+ tx_check_ret = ice_tx_vec_dev_check(dev);
+ if (tx_check_ret >= 0 &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->tx_vec_allowed = true;
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq && ice_txq_vec_setup(txq)) {
- ad->tx_vec_allowed = false;
- break;
+
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
+#ifdef CC_AVX512_SUPPORT
+ ad->tx_use_avx512 = true;
+#else
+ PMD_DRV_LOG(NOTICE,
+ "AVX512 is not supported in build env");
+#endif
+ if (!ad->tx_use_avx512 &&
+ (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+ ad->tx_use_avx2 = true;
+
+ if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
+ tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
+ ad->tx_vec_allowed = false;
+
+ if (ad->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq && ice_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
+ }
}
}
-
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
- use_avx2 = true;
-
} else {
ad->tx_vec_allowed = false;
}
}
if (ad->tx_vec_allowed) {
- PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
- use_avx2 ? "avx2 " : "",
- dev->data->port_id);
- dev->tx_pkt_burst = use_avx2 ?
- ice_xmit_pkts_vec_avx2 :
- ice_xmit_pkts_vec;
dev->tx_pkt_prepare = NULL;
+ if (ad->tx_use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 OFFLOAD Vector Tx (port %d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst =
+ ice_xmit_pkts_vec_avx512_offload;
+ dev->tx_pkt_prepare = ice_prep_pkts;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Tx (port %d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+ }
+#endif
+ } else {
+ if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 OFFLOAD Vector Tx (port %d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst =
+ ice_xmit_pkts_vec_avx2_offload;
+ dev->tx_pkt_prepare = ice_prep_pkts;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
+ ad->tx_use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->tx_pkt_burst = ad->tx_use_avx2 ?
+ ice_xmit_pkts_vec_avx2 :
+ ice_xmit_pkts_vec;
+ }
+ }
return;
}
{ ice_xmit_pkts_simple, "Scalar Simple" },
{ ice_xmit_pkts, "Scalar" },
#ifdef RTE_ARCH_X86
+#ifdef CC_AVX512_SUPPORT
+ { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
+ { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
+#endif
{ ice_xmit_pkts_vec_avx2, "Vector AVX2" },
{ ice_xmit_pkts_vec, "Vector SSE" },
#endif
RTE_PTYPE_L4_TCP,
[93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_SCTP,
- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_ICMP,
/* IPv6 --> IPv4 */
RTE_PTYPE_TUNNEL_GTPU |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> UDP ECPRI */
+ [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+
+ /* IPV6 --> UDP ECPRI */
+ [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
/* All others reserved */
};
return type_table[ptype];
}
-void __attribute__((cold))
+void __rte_cold
ice_set_default_ptype_table(struct rte_eth_dev *dev)
{
struct ice_adapter *ad =