4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _IXGBE_RXTX_VEC_COMMON_H_
35 #define _IXGBE_RXTX_VEC_COMMON_H_
37 #include <rte_ethdev.h>
39 #include "ixgbe_ethdev.h"
40 #include "ixgbe_rxtx.h"
42 static inline uint16_t
43 reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
44 uint16_t nb_bufs, uint8_t *split_flags)
46 struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
47 struct rte_mbuf *start = rxq->pkt_first_seg;
48 struct rte_mbuf *end = rxq->pkt_last_seg;
49 unsigned int pkt_idx, buf_idx;
51 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
53 /* processing a split packet */
54 end->next = rx_bufs[buf_idx];
55 rx_bufs[buf_idx]->data_len += rxq->crc_len;
58 start->pkt_len += rx_bufs[buf_idx]->data_len;
61 if (!split_flags[buf_idx]) {
62 /* it's the last packet of the set */
63 start->hash = end->hash;
64 start->ol_flags = end->ol_flags;
65 /* we need to strip crc for the whole packet */
66 start->pkt_len -= rxq->crc_len;
67 if (end->data_len > rxq->crc_len)
68 end->data_len -= rxq->crc_len;
70 /* free up last mbuf */
71 struct rte_mbuf *secondlast = start;
74 while (secondlast->next != end)
75 secondlast = secondlast->next;
76 secondlast->data_len -= (rxq->crc_len -
78 secondlast->next = NULL;
79 rte_pktmbuf_free_seg(end);
82 pkts[pkt_idx++] = start;
86 /* not processing a split packet */
87 if (!split_flags[buf_idx]) {
88 /* not a split packet, save and skip */
89 pkts[pkt_idx++] = rx_bufs[buf_idx];
92 end = start = rx_bufs[buf_idx];
93 rx_bufs[buf_idx]->data_len += rxq->crc_len;
94 rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
98 /* save the partial packet for next time */
99 rxq->pkt_first_seg = start;
100 rxq->pkt_last_seg = end;
101 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
105 static inline int __attribute__((always_inline))
106 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
108 struct ixgbe_tx_entry_v *txep;
113 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
115 /* check DD bit on threshold descriptor */
116 status = txq->tx_ring[txq->tx_next_dd].wb.status;
117 if (!(status & IXGBE_ADVTXD_STAT_DD))
120 n = txq->tx_rs_thresh;
123 * first buffer to free from S/W ring is at index
124 * tx_next_dd - (tx_rs_thresh-1)
126 txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
127 m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
128 if (likely(m != NULL)) {
131 for (i = 1; i < n; i++) {
132 m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
133 if (likely(m != NULL)) {
134 if (likely(m->pool == free[0]->pool))
137 rte_mempool_put_bulk(free[0]->pool,
138 (void *)free, nb_free);
144 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
146 for (i = 1; i < n; i++) {
147 m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
149 rte_mempool_put(m->pool, m);
153 /* buffers were freed, update counters */
154 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
155 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
156 if (txq->tx_next_dd >= txq->nb_tx_desc)
157 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
159 return txq->tx_rs_thresh;
162 static inline void __attribute__((always_inline))
163 tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
164 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
168 for (i = 0; i < (int)nb_pkts; ++i)
169 txep[i].mbuf = tx_pkts[i];
173 _ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
176 struct ixgbe_tx_entry_v *txe;
177 const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
179 if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
182 /* release the used mbufs in sw_ring */
183 for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
185 i = (i + 1) & max_desc) {
186 txe = &txq->sw_ring_v[i];
187 rte_pktmbuf_free_seg(txe->mbuf);
189 txq->nb_tx_free = max_desc;
192 for (i = 0; i < txq->nb_tx_desc; i++) {
193 txe = &txq->sw_ring_v[i];
199 _ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
201 const unsigned int mask = rxq->nb_rx_desc - 1;
204 if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
207 /* free all mbufs that are valid in the ring */
208 for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
209 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
210 rxq->rxrearm_nb = rxq->nb_rx_desc;
212 /* set all entries to NULL */
213 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
217 _ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq)
222 if (txq->sw_ring != NULL) {
223 rte_free(txq->sw_ring_v - 1);
224 txq->sw_ring_v = NULL;
229 _ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
231 static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
232 struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
235 /* Zero out HW ring memory */
236 for (i = 0; i < txq->nb_tx_desc; i++)
237 txq->tx_ring[i] = zeroed_desc;
239 /* Initialize SW ring entries */
240 for (i = 0; i < txq->nb_tx_desc; i++) {
241 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
243 txd->wb.status = IXGBE_TXD_STAT_DD;
247 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
248 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
253 * Always allow 1 descriptor to be un-allocated to avoid
254 * a H/W race condition
256 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
257 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
259 memset((void *)&txq->ctx_cache, 0,
260 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
264 ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq)
267 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
270 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
271 mb_def.port = rxq->port_id;
272 rte_mbuf_refcnt_set(&mb_def, 1);
274 /* prevent compiler reordering: rearm_data covers previous fields */
275 rte_compiler_barrier();
276 p = (uintptr_t)&mb_def.rearm_data;
277 rxq->mbuf_initializer = *(uint64_t *)p;
282 ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq,
283 const struct ixgbe_txq_ops *txq_ops)
285 if (txq->sw_ring_v == NULL)
288 /* leave the first one for overflow */
289 txq->sw_ring_v = txq->sw_ring_v + 1;
296 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
298 #ifndef RTE_LIBRTE_IEEE1588
299 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
300 struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
302 #ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
303 /* whithout rx ol_flags, no VP flag report */
304 if (rxmode->hw_vlan_strip != 0 ||
305 rxmode->hw_vlan_extend != 0)
309 /* no fdir support */
310 if (fconf->mode != RTE_FDIR_MODE_NONE)
314 * - no csum error report support
315 * - no header split support
317 if (rxmode->hw_ip_checksum == 1 ||
318 rxmode->header_split == 1)