1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #ifndef _ICE_RXTX_VEC_COMMON_H_
6 #define _ICE_RXTX_VEC_COMMON_H_
10 static inline uint16_t
11 ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
12 uint16_t nb_bufs, uint8_t *split_flags)
14 struct rte_mbuf *pkts[ICE_VPMD_RX_BURST] = {0}; /*finished pkts*/
15 struct rte_mbuf *start = rxq->pkt_first_seg;
16 struct rte_mbuf *end = rxq->pkt_last_seg;
17 unsigned int pkt_idx, buf_idx;
19 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
21 /* processing a split packet */
22 end->next = rx_bufs[buf_idx];
23 rx_bufs[buf_idx]->data_len += rxq->crc_len;
26 start->pkt_len += rx_bufs[buf_idx]->data_len;
29 if (!split_flags[buf_idx]) {
30 /* it's the last packet of the set */
31 start->hash = end->hash;
32 start->ol_flags = end->ol_flags;
33 /* we need to strip crc for the whole packet */
34 start->pkt_len -= rxq->crc_len;
35 if (end->data_len > rxq->crc_len) {
36 end->data_len -= rxq->crc_len;
38 /* free up last mbuf */
39 struct rte_mbuf *secondlast = start;
42 while (secondlast->next != end)
43 secondlast = secondlast->next;
44 secondlast->data_len -= (rxq->crc_len -
46 secondlast->next = NULL;
47 rte_pktmbuf_free_seg(end);
49 pkts[pkt_idx++] = start;
54 /* not processing a split packet */
55 if (!split_flags[buf_idx]) {
56 /* not a split packet, save and skip */
57 pkts[pkt_idx++] = rx_bufs[buf_idx];
60 start = rx_bufs[buf_idx];
62 rx_bufs[buf_idx]->data_len += rxq->crc_len;
63 rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
67 /* save the partial packet for next time */
68 rxq->pkt_first_seg = start;
69 rxq->pkt_last_seg = end;
70 rte_memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
75 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
77 const unsigned int mask = rxq->nb_rx_desc - 1;
80 if (unlikely(!rxq->sw_ring)) {
81 PMD_DRV_LOG(DEBUG, "sw_ring is NULL");
85 if (rxq->rxrearm_nb >= rxq->nb_rx_desc)
88 /* free all mbufs that are valid in the ring */
89 if (rxq->rxrearm_nb == 0) {
90 for (i = 0; i < rxq->nb_rx_desc; i++) {
91 if (rxq->sw_ring[i].mbuf)
92 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
95 for (i = rxq->rx_tail;
96 i != rxq->rxrearm_start;
98 if (rxq->sw_ring[i].mbuf)
99 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
103 rxq->rxrearm_nb = rxq->nb_rx_desc;
105 /* set all entries to NULL */
106 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
110 ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
113 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
116 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
117 mb_def.port = rxq->port_id;
118 rte_mbuf_refcnt_set(&mb_def, 1);
120 /* prevent compiler reordering: rearm_data covers previous fields */
121 rte_compiler_barrier();
122 p = (uintptr_t)&mb_def.rearm_data;
123 rxq->mbuf_initializer = *(uint64_t *)p;
128 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
133 if (!rte_is_power_of_2(rxq->nb_rx_desc))
136 if (rxq->rx_free_thresh < ICE_VPMD_RX_BURST)
139 if (rxq->nb_rx_desc % rxq->rx_free_thresh)
146 ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
149 struct ice_rx_queue *rxq;
151 for (i = 0; i < dev->data->nb_rx_queues; i++) {
152 rxq = dev->data->rx_queues[i];
153 if (ice_rx_vec_queue_default(rxq))