uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
+uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
static inline uint64_t
-ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid)
-{
- static uint64_t *ol_flag_map[] = {
- [ICE_RXDID_COMMS_AUX_VLAN] =
- &rte_net_ice_dynflag_proto_xtr_vlan_mask,
- [ICE_RXDID_COMMS_AUX_IPV4] =
- &rte_net_ice_dynflag_proto_xtr_ipv4_mask,
- [ICE_RXDID_COMMS_AUX_IPV6] =
- &rte_net_ice_dynflag_proto_xtr_ipv6_mask,
- [ICE_RXDID_COMMS_AUX_IPV6_FLOW] =
- &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
- [ICE_RXDID_COMMS_AUX_TCP] =
- &rte_net_ice_dynflag_proto_xtr_tcp_mask,
+ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid)
+{
+ static struct {
+ uint64_t *ol_flag;
+ bool chk_valid;
+ } ol_flag_map[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] = {
+ &rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
+ [ICE_RXDID_COMMS_AUX_IPV4] = {
+ &rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
+ [ICE_RXDID_COMMS_AUX_IPV6] = {
+ &rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
+ &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, true },
+ [ICE_RXDID_COMMS_AUX_TCP] = {
+ &rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
+ [ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
+ &rte_net_ice_dynflag_proto_xtr_ip_offset_mask, false },
};
uint64_t *ol_flag;
- ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL;
+ if (rxdid < RTE_DIM(ol_flag_map)) {
+ ol_flag = ol_flag_map[rxdid].ol_flag;
+ if (!ol_flag)
+ return 0ULL;
- return ol_flag != NULL ? *ol_flag : 0ULL;
+ *chk_valid = ol_flag_map[rxdid].chk_valid;
+ return *ol_flag;
+ }
+
+ return 0ULL;
}
static inline uint8_t
[PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
[PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
[PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
+ [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
};
return xtr_type < RTE_DIM(rxdid_map) ?
rxq->sw_ring[i].mbuf = NULL;
}
}
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
- if (rxq->rx_nb_avail == 0)
- return;
- for (i = 0; i < rxq->rx_nb_avail; i++) {
- struct rte_mbuf *mbuf;
-
- mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
- rte_pktmbuf_free_seg(mbuf);
- }
- rxq->rx_nb_avail = 0;
-#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
-}
+ if (rxq->rx_nb_avail == 0)
+ return;
+ for (i = 0; i < rxq->rx_nb_avail; i++)
+ rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
-static void
-ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
-{
- rxq->rx_rel_mbufs(rxq);
+ rxq->rx_nb_avail = 0;
}
/* turn on or off rx queue
}
static inline int
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
-#else
-ice_check_rx_burst_bulk_alloc_preconditions
- (__rte_unused struct ice_rx_queue *rxq)
-#endif
{
int ret = 0;
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
"rxq->rx_free_thresh=%d, "
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
}
-#else
- ret = -EINVAL;
-#endif
return ret;
}
return;
}
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
- if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
- len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
- else
-#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
- len = rxq->nb_rx_desc;
+ len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
((volatile char *)rxq->rx_ring)[i] = 0;
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
for (i = 0; i < ICE_RX_MAX_BURST; ++i)
rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
-#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
rx_queue_id);
- ice_rx_queue_release_mbufs(rxq);
+ rxq->rx_rel_mbufs(rxq);
ice_reset_rx_queue(rxq);
return -EINVAL;
}
rx_queue_id);
return -EINVAL;
}
- ice_rx_queue_release_mbufs(rxq);
+ rxq->rx_rel_mbufs(rxq);
ice_reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] =
RTE_ETH_QUEUE_STATE_STOPPED;
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
- ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+ ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
ice_tlan_ctx_info);
txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
- ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+ ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
ice_tlan_ctx_info);
txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
}
}
}
-static void
-ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
-{
- txq->tx_rel_mbufs(txq);
-}
static void
ice_reset_tx_queue(struct ice_tx_queue *txq)
return -EINVAL;
}
- ice_tx_queue_release_mbufs(txq);
+ txq->tx_rel_mbufs(txq);
ice_reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
rx_queue_id);
return -EINVAL;
}
- ice_rx_queue_release_mbufs(rxq);
+ rxq->rx_rel_mbufs(rxq);
return 0;
}
return -EINVAL;
}
- ice_tx_queue_release_mbufs(txq);
+ txq->tx_rel_mbufs(txq);
return 0;
}
/* Allocate the maximun number of RX ring hardware descriptor. */
len = ICE_MAX_RING_DESC;
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
/**
* Allocating a little more memory because vectorized/bulk_alloc Rx
* functions doesn't check boundaries each time.
*/
len += ICE_RX_MAX_BURST;
-#endif
/* Allocate the maximum number of RX ring hardware descriptor. */
ring_size = sizeof(union ice_rx_flex_desc) * len;
rxq->rx_ring_dma = rz->iova;
rxq->rx_ring = rz->addr;
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
+ /* always reserve more for bulk alloc */
len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
-#else
- len = nb_desc;
-#endif
/* Allocate the software ring. */
rxq->sw_ring = rte_zmalloc_socket(NULL,
use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
if (!use_def_burst_func) {
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function will be "
"used on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
-#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
- "not enabled on port=%d, queue=%d.",
+ "not satisfied, Scattered Rx is requested. "
+ "on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
ad->rx_bulk_alloc_allowed = false;
}
return;
}
- ice_rx_queue_release_mbufs(q);
+ q->rx_rel_mbufs(q);
rte_free(q->sw_ring);
rte_free(q);
}
return;
}
- ice_tx_queue_release_mbufs(q);
+ q->tx_rel_mbufs(q);
rte_free(q->sw_ring);
rte_free(q);
}
volatile struct ice_32b_rx_flex_desc_comms *desc)
{
uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
- uint32_t metadata;
+ uint32_t metadata = 0;
uint64_t ol_flag;
+ bool chk_valid;
- if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID)))
- return;
-
- ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid);
+ ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
if (unlikely(!ol_flag))
return;
- mb->ol_flags |= ol_flag;
+ if (chk_valid) {
+ if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
- metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ?
- rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0;
+ if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
+ metadata |=
+ rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
+ } else {
+ if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+ else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
+ }
+
+ if (!metadata)
+ return;
- if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)))
- metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
+ mb->ol_flags |= ol_flag;
*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
}
#endif
}
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
#define ICE_LOOK_AHEAD 8
#if (ICE_LOOK_AHEAD != 8)
#error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
return nb_rx;
}
-#else
-static uint16_t
-ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
- struct rte_mbuf __rte_unused **rx_pkts,
- uint16_t __rte_unused nb_pkts)
-{
- return 0;
-}
-#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
static uint16_t
ice_recv_scattered_pkts(void *rx_queue,
ptypes = ptypes_os;
if (dev->rx_pkt_burst == ice_recv_pkts ||
-#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
-#endif
dev->rx_pkt_burst == ice_recv_scattered_pkts)
return ptypes;
return RTE_ETH_TX_DESC_FULL;
}
-void
-ice_clear_queues(struct rte_eth_dev *dev)
-{
- uint16_t i;
-
- PMD_INIT_FUNC_TRACE();
-
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
- ice_reset_tx_queue(dev->data->tx_queues[i]);
- }
-
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
- ice_reset_rx_queue(dev->data->rx_queues[i]);
- }
-}
-
void
ice_free_queues(struct rte_eth_dev *dev)
{
continue;
ice_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
+ rte_eth_dma_zone_free(dev, "rx_ring", i);
}
dev->data->nb_rx_queues = 0;
continue;
ice_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
+ rte_eth_dma_zone_free(dev, "tx_ring", i);
}
dev->data->nb_tx_queues = 0;
}
/* Check if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
- ice_xmit_cleanup(txq);
+ (void)ice_xmit_cleanup(txq);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
tx_pkt = *tx_pkts++;
td_cmd = 0;
+ td_tag = 0;
+ td_offset = 0;
ol_flags = tx_pkt->ol_flags;
tx_offload.l2_len = tx_pkt->l2_len;
tx_offload.l3_len = tx_pkt->l3_len;
&cd_tunneling_params);
/* Enable checksum offloading */
- if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
+ if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
ice_txd_enable_checksum(ol_flags, &td_cmd,
&td_offset, tx_offload);
- }
if (nb_ctx) {
/* Setup TX context descriptor if required */
return nb_tx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
ice_tx_free_bufs(struct ice_tx_queue *txq)
{
struct ice_tx_entry *txep;
return nb_tx;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_rx_function(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
return ret;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
{
struct ice_adapter *ad =
return i;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_tx_function(struct rte_eth_dev *dev)
{
struct ice_adapter *ad =
return type_table[ptype];
}
-void __attribute__((cold))
+void __rte_cold
ice_set_default_ptype_table(struct rte_eth_dev *dev)
{
struct ice_adapter *ad =