1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #ifndef _ICE_RXTX_VEC_COMMON_H_
6 #define _ICE_RXTX_VEC_COMMON_H_
10 #ifndef __INTEL_COMPILER
11 #pragma GCC diagnostic ignored "-Wcast-qual"
14 static inline uint16_t
15 ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
16 uint16_t nb_bufs, uint8_t *split_flags)
18 struct rte_mbuf *pkts[ICE_VPMD_RX_BURST] = {0}; /*finished pkts*/
19 struct rte_mbuf *start = rxq->pkt_first_seg;
20 struct rte_mbuf *end = rxq->pkt_last_seg;
21 unsigned int pkt_idx, buf_idx;
23 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
25 /* processing a split packet */
26 end->next = rx_bufs[buf_idx];
27 rx_bufs[buf_idx]->data_len += rxq->crc_len;
30 start->pkt_len += rx_bufs[buf_idx]->data_len;
33 if (!split_flags[buf_idx]) {
34 /* it's the last packet of the set */
35 start->hash = end->hash;
36 start->vlan_tci = end->vlan_tci;
37 start->ol_flags = end->ol_flags;
38 /* we need to strip crc for the whole packet */
39 start->pkt_len -= rxq->crc_len;
40 if (end->data_len > rxq->crc_len) {
41 end->data_len -= rxq->crc_len;
43 /* free up last mbuf */
44 struct rte_mbuf *secondlast = start;
47 while (secondlast->next != end)
48 secondlast = secondlast->next;
49 secondlast->data_len -= (rxq->crc_len -
51 secondlast->next = NULL;
52 rte_pktmbuf_free_seg(end);
54 pkts[pkt_idx++] = start;
59 /* not processing a split packet */
60 if (!split_flags[buf_idx]) {
61 /* not a split packet, save and skip */
62 pkts[pkt_idx++] = rx_bufs[buf_idx];
65 start = rx_bufs[buf_idx];
67 rx_bufs[buf_idx]->data_len += rxq->crc_len;
68 rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
72 /* save the partial packet for next time */
73 rxq->pkt_first_seg = start;
74 rxq->pkt_last_seg = end;
75 rte_memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
79 static __rte_always_inline int
80 ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
82 struct ice_tx_entry *txep;
86 struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
88 /* check DD bits on threshold descriptor */
89 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
90 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
91 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
94 n = txq->tx_rs_thresh;
96 /* first buffer to free from S/W ring is at index
97 * tx_next_dd - (tx_rs_thresh-1)
99 txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
100 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
104 for (i = 1; i < n; i++) {
105 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
107 if (likely(m->pool == free[0]->pool)) {
110 rte_mempool_put_bulk(free[0]->pool,
118 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
120 for (i = 1; i < n; i++) {
121 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
123 rte_mempool_put(m->pool, m);
127 /* buffers were freed, update counters */
128 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
129 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
130 if (txq->tx_next_dd >= txq->nb_tx_desc)
131 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
133 return txq->tx_rs_thresh;
136 static __rte_always_inline void
137 ice_tx_backlog_entry(struct ice_tx_entry *txep,
138 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
142 for (i = 0; i < (int)nb_pkts; ++i)
143 txep[i].mbuf = tx_pkts[i];
147 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
149 const unsigned int mask = rxq->nb_rx_desc - 1;
152 if (unlikely(!rxq->sw_ring)) {
153 PMD_DRV_LOG(DEBUG, "sw_ring is NULL");
157 if (rxq->rxrearm_nb >= rxq->nb_rx_desc)
160 /* free all mbufs that are valid in the ring */
161 if (rxq->rxrearm_nb == 0) {
162 for (i = 0; i < rxq->nb_rx_desc; i++) {
163 if (rxq->sw_ring[i].mbuf)
164 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
167 for (i = rxq->rx_tail;
168 i != rxq->rxrearm_start;
169 i = (i + 1) & mask) {
170 if (rxq->sw_ring[i].mbuf)
171 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
175 rxq->rxrearm_nb = rxq->nb_rx_desc;
177 /* set all entries to NULL */
178 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
182 _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
186 if (unlikely(!txq || !txq->sw_ring)) {
187 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
192 * vPMD tx will not set sw_ring's mbuf to NULL after free,
193 * so need to free remains more carefully.
195 i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
198 struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id];
200 if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
201 dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
202 struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
204 if (txq->tx_tail < i) {
205 for (; i < txq->nb_tx_desc; i++) {
206 rte_pktmbuf_free_seg(swr[i].mbuf);
211 for (; i < txq->tx_tail; i++) {
212 rte_pktmbuf_free_seg(swr[i].mbuf);
218 if (txq->tx_tail < i) {
219 for (; i < txq->nb_tx_desc; i++) {
220 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
221 txq->sw_ring[i].mbuf = NULL;
225 for (; i < txq->tx_tail; i++) {
226 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
227 txq->sw_ring[i].mbuf = NULL;
233 ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
236 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
239 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
240 mb_def.port = rxq->port_id;
241 rte_mbuf_refcnt_set(&mb_def, 1);
243 /* prevent compiler reordering: rearm_data covers previous fields */
244 rte_compiler_barrier();
245 p = (uintptr_t)&mb_def.rearm_data;
246 rxq->mbuf_initializer = *(uint64_t *)p;
250 #define ICE_TX_NO_VECTOR_FLAGS ( \
251 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
252 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
253 RTE_ETH_TX_OFFLOAD_TCP_TSO | \
254 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
256 #define ICE_TX_VECTOR_OFFLOAD ( \
257 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
258 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
259 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
260 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
261 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
262 RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
264 #define ICE_RX_VECTOR_OFFLOAD ( \
265 RTE_ETH_RX_OFFLOAD_CHECKSUM | \
266 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
267 RTE_ETH_RX_OFFLOAD_VLAN | \
268 RTE_ETH_RX_OFFLOAD_RSS_HASH)
270 #define ICE_VECTOR_PATH 0
271 #define ICE_VECTOR_OFFLOAD_PATH 1
274 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
279 if (!rte_is_power_of_2(rxq->nb_rx_desc))
282 if (rxq->rx_free_thresh < ICE_VPMD_RX_BURST)
285 if (rxq->nb_rx_desc % rxq->rx_free_thresh)
288 if (rxq->proto_xtr != PROTO_XTR_NONE)
291 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
294 if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
295 return ICE_VECTOR_OFFLOAD_PATH;
297 return ICE_VECTOR_PATH;
301 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
306 if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
307 txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
310 if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
313 if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
314 return ICE_VECTOR_OFFLOAD_PATH;
316 return ICE_VECTOR_PATH;
320 ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
323 struct ice_rx_queue *rxq;
327 for (i = 0; i < dev->data->nb_rx_queues; i++) {
328 rxq = dev->data->rx_queues[i];
329 ret = (ice_rx_vec_queue_default(rxq));
332 if (ret == ICE_VECTOR_OFFLOAD_PATH)
340 ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
343 struct ice_tx_queue *txq;
347 for (i = 0; i < dev->data->nb_tx_queues; i++) {
348 txq = dev->data->tx_queues[i];
349 ret = ice_tx_vec_queue_default(txq);
352 if (ret == ICE_VECTOR_OFFLOAD_PATH)
360 ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
363 uint64_t ol_flags = tx_pkt->ol_flags;
365 uint32_t td_offset = 0;
367 /* Tx Checksum Offload */
369 td_offset |= (tx_pkt->l2_len >> 1) <<
370 ICE_TX_DESC_LEN_MACLEN_S;
372 /* Enable L3 checksum offload */
373 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
374 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
375 td_offset |= (tx_pkt->l3_len >> 2) <<
376 ICE_TX_DESC_LEN_IPLEN_S;
377 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
378 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
379 td_offset |= (tx_pkt->l3_len >> 2) <<
380 ICE_TX_DESC_LEN_IPLEN_S;
381 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
382 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
383 td_offset |= (tx_pkt->l3_len >> 2) <<
384 ICE_TX_DESC_LEN_IPLEN_S;
387 /* Enable L4 checksum offloads */
388 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
389 case RTE_MBUF_F_TX_TCP_CKSUM:
390 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
391 td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
392 ICE_TX_DESC_LEN_L4_LEN_S;
394 case RTE_MBUF_F_TX_SCTP_CKSUM:
395 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
396 td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
397 ICE_TX_DESC_LEN_L4_LEN_S;
399 case RTE_MBUF_F_TX_UDP_CKSUM:
400 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
401 td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
402 ICE_TX_DESC_LEN_L4_LEN_S;
408 *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
410 /* Tx VLAN/QINQ insertion Offload */
411 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
412 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
413 *txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
414 ICE_TXD_QW1_L2TAG1_S);
417 *txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;