4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
38 #include "ixgbe_ethdev.h"
39 #include "ixgbe_rxtx.h"
41 #include <tmmintrin.h>
43 #ifndef __INTEL_COMPILER
44 #pragma GCC diagnostic ignored "-Wcast-qual"
48 ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
52 volatile union ixgbe_adv_rx_desc *rxdp;
53 struct igb_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
54 struct rte_mbuf *mb0, *mb1;
55 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
56 RTE_PKTMBUF_HEADROOM);
58 /* Pull 'n' more MBUFs into the software ring */
59 if (rte_mempool_get_bulk(rxq->mb_pool,
60 (void *)rxep, RTE_IXGBE_RXQ_REARM_THRESH) < 0)
63 rxdp = rxq->rx_ring + rxq->rxrearm_start;
65 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
66 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
67 __m128i dma_addr0, dma_addr1;
68 __m128i vaddr0, vaddr1;
73 /* flush mbuf with pkt template */
74 mb0->rearm_data[0] = rxq->mbuf_initializer;
75 mb1->rearm_data[0] = rxq->mbuf_initializer;
77 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
78 vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
79 vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
81 /* convert pa to dma_addr hdr/data */
82 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
83 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
85 /* add headroom to pa values */
86 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
87 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
89 /* flush desc with pa dma_addr */
90 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
91 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
94 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
95 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
96 rxq->rxrearm_start = 0;
98 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
100 rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
101 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
103 /* Update the tail pointer on the NIC */
104 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
107 /* Handling the offload flags (olflags) field takes computation
108 * time when receiving packets. Therefore we provide a flag to disable
109 * the processing of the olflags field when they are not needed. This
110 * gives improved performance, at the cost of losing the offload info
111 * in the received packet
113 #ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
115 #define OLFLAGS_MASK ((uint16_t)(PKT_RX_VLAN_PKT | PKT_RX_IPV4_HDR |\
116 PKT_RX_IPV4_HDR_EXT | PKT_RX_IPV6_HDR |\
117 PKT_RX_IPV6_HDR_EXT))
118 #define OLFLAGS_MASK_V (((uint64_t)OLFLAGS_MASK << 48) | \
119 ((uint64_t)OLFLAGS_MASK << 32) | \
120 ((uint64_t)OLFLAGS_MASK << 16) | \
121 ((uint64_t)OLFLAGS_MASK))
122 #define PTYPE_SHIFT (1)
123 #define VTAG_SHIFT (3)
126 desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
128 __m128i ptype0, ptype1, vtag0, vtag1;
134 ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
135 ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
136 vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
137 vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
139 ptype1 = _mm_unpacklo_epi32(ptype0, ptype1);
140 vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
142 ptype1 = _mm_slli_epi16(ptype1, PTYPE_SHIFT);
143 vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
145 ptype1 = _mm_or_si128(ptype1, vtag1);
146 vol.dword = _mm_cvtsi128_si64(ptype1) & OLFLAGS_MASK_V;
148 rx_pkts[0]->ol_flags = vol.e[0];
149 rx_pkts[1]->ol_flags = vol.e[1];
150 rx_pkts[2]->ol_flags = vol.e[2];
151 rx_pkts[3]->ol_flags = vol.e[3];
154 #define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
158 * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
162 * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
163 * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
165 * - don't support ol_flags for rss and csum err
168 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
171 volatile union ixgbe_adv_rx_desc *rxdp;
172 struct igb_rx_queue *rxq = rx_queue;
173 struct igb_rx_entry *sw_ring;
174 uint16_t nb_pkts_recd;
178 __m128i crc_adjust = _mm_set_epi16(
179 0, 0, 0, 0, /* ignore non-length fields */
180 0, /* ignore high-16bits of pkt_len */
181 -rxq->crc_len, /* sub crc on pkt_len */
182 -rxq->crc_len, /* sub crc on data_len */
183 0 /* ignore pkt_type field */
187 if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST))
190 /* Just the act of getting into the function from the application is
191 * going to cost about 7 cycles */
192 rxdp = rxq->rx_ring + rxq->rx_tail;
194 _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
196 /* See if we need to rearm the RX queue - gives the prefetch a bit
198 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
199 ixgbe_rxq_rearm(rxq);
201 /* Before we start moving massive data around, check to see if
202 * there is actually a packet available */
203 if (!(rxdp->wb.upper.status_error &
204 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
207 /* 4 packets DD mask */
208 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
210 /* mask to shuffle from desc. to mbuf */
211 shuf_msk = _mm_set_epi8(
212 7, 6, 5, 4, /* octet 4~7, 32bits rss */
213 0xFF, 0xFF, /* skip high 16 bits vlan_macip, zero out */
214 15, 14, /* octet 14~15, low 16 bits vlan_macip */
215 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
216 13, 12, /* octet 12~13, low 16 bits pkt_len */
217 13, 12, /* octet 12~13, 16 bits data_len */
218 0xFF, 0xFF /* skip pkt_type field */
222 /* Cache is empty -> need to scan the buffer rings, but first move
223 * the next 'n' mbufs into the cache */
224 sw_ring = &rxq->sw_ring[rxq->rx_tail];
227 * A. load 4 packet in one loop
228 * B. copy 4 mbuf point from swring to rx_pkts
229 * C. calc the number of DD bits among the 4 packets
230 * D. fill info. from desc to mbuf
232 for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST;
233 pos += RTE_IXGBE_DESCS_PER_LOOP,
234 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
235 __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
236 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
237 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
238 __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
240 /* B.1 load 1 mbuf point */
241 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
243 /* Read desc statuses backwards to avoid race condition */
244 /* A.1 load 4 pkts desc */
245 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
247 /* B.2 copy 2 mbuf point into rx_pkts */
248 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
250 /* B.1 load 1 mbuf point */
251 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
253 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
254 /* B.1 load 2 mbuf point */
255 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
256 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
258 /* B.2 copy 2 mbuf point into rx_pkts */
259 _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
261 /* avoid compiler reorder optimization */
262 rte_compiler_barrier();
264 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
265 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
266 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
268 /* C.1 4=>2 filter staterr info only */
269 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
270 /* C.1 4=>2 filter staterr info only */
271 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
273 /* set ol_flags with packet type and vlan tag */
274 desc_to_olflags_v(descs, &rx_pkts[pos]);
276 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
277 pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
278 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
280 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
281 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
282 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
284 /* C.2 get 4 pkts staterr value */
285 zero = _mm_xor_si128(dd_check, dd_check);
286 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
288 /* D.3 copy final 3,4 data to rx_pkts */
289 _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
291 _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
294 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
295 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
296 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
298 /* C.3 calc avaialbe number of desc */
299 staterr = _mm_and_si128(staterr, dd_check);
300 staterr = _mm_packs_epi32(staterr, zero);
302 /* D.3 copy final 1,2 data to rx_pkts */
303 _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
305 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
308 /* C.4 calc avaialbe number of desc */
309 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
311 if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
315 /* Update our internal tail pointer */
316 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
317 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
318 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
323 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
324 struct rte_mbuf *pkt, uint64_t flags)
326 __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
327 flags | pkt->data_len,
328 pkt->buf_physaddr + pkt->data_off);
329 _mm_store_si128((__m128i *)&txdp->read, descriptor);
333 vtx(volatile union ixgbe_adv_tx_desc *txdp,
334 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
337 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
338 vtx1(txdp, *pkt, flags);
341 static inline int __attribute__((always_inline))
342 ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
344 struct igb_tx_entry_v *txep;
349 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
351 /* check DD bit on threshold descriptor */
352 status = txq->tx_ring[txq->tx_next_dd].wb.status;
353 if (!(status & IXGBE_ADVTXD_STAT_DD))
356 n = txq->tx_rs_thresh;
359 * first buffer to free from S/W ring is at index
360 * tx_next_dd - (tx_rs_thresh-1)
362 txep = &((struct igb_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
364 #ifdef RTE_MBUF_REFCNT
365 m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
369 if (likely(m != NULL)) {
372 for (i = 1; i < n; i++) {
373 #ifdef RTE_MBUF_REFCNT
374 m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
378 if (likely(m != NULL)) {
379 if (likely(m->pool == free[0]->pool))
382 rte_mempool_put_bulk(free[0]->pool,
383 (void *)free, nb_free);
389 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
391 for (i = 1; i < n; i++) {
392 m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
394 rte_mempool_put(m->pool, m);
398 /* buffers were freed, update counters */
399 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
400 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
401 if (txq->tx_next_dd >= txq->nb_tx_desc)
402 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
404 return txq->tx_rs_thresh;
407 static inline void __attribute__((always_inline))
408 tx_backlog_entry(struct igb_tx_entry_v *txep,
409 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
412 for (i = 0; i < (int)nb_pkts; ++i)
413 txep[i].mbuf = tx_pkts[i];
417 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
420 struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
421 volatile union ixgbe_adv_tx_desc *txdp;
422 struct igb_tx_entry_v *txep;
423 uint16_t n, nb_commit, tx_id;
424 uint64_t flags = DCMD_DTYP_FLAGS;
425 uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
428 if (unlikely(nb_pkts > RTE_IXGBE_VPMD_TX_BURST))
429 nb_pkts = RTE_IXGBE_VPMD_TX_BURST;
431 if (txq->nb_tx_free < txq->tx_free_thresh)
432 ixgbe_tx_free_bufs(txq);
434 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
435 if (unlikely(nb_pkts == 0))
438 tx_id = txq->tx_tail;
439 txdp = &txq->tx_ring[tx_id];
440 txep = &((struct igb_tx_entry_v *)txq->sw_ring)[tx_id];
442 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
444 n = (uint16_t)(txq->nb_tx_desc - tx_id);
445 if (nb_commit >= n) {
447 tx_backlog_entry(txep, tx_pkts, n);
449 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
450 vtx1(txdp, *tx_pkts, flags);
452 vtx1(txdp, *tx_pkts++, rs);
454 nb_commit = (uint16_t)(nb_commit - n);
457 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
459 /* avoid reach the end of ring */
460 txdp = &(txq->tx_ring[tx_id]);
461 txep = &(((struct igb_tx_entry_v *)txq->sw_ring)[tx_id]);
464 tx_backlog_entry(txep, tx_pkts, nb_commit);
466 vtx(txdp, tx_pkts, nb_commit, flags);
468 tx_id = (uint16_t)(tx_id + nb_commit);
469 if (tx_id > txq->tx_next_rs) {
470 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
471 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
472 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
476 txq->tx_tail = tx_id;
478 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
484 ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
487 struct igb_tx_entry_v *txe;
488 uint16_t nb_free, max_desc;
490 if (txq->sw_ring != NULL) {
491 /* release the used mbufs in sw_ring */
492 nb_free = txq->nb_tx_free;
493 max_desc = (uint16_t)(txq->nb_tx_desc - 1);
494 for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
495 nb_free < max_desc && i != txq->tx_tail;
496 i = (i + 1) & max_desc) {
497 txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
498 if (txe->mbuf != NULL)
499 rte_pktmbuf_free_seg(txe->mbuf);
502 for (i = 0; i < txq->nb_tx_desc; i++) {
503 txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
510 ixgbe_tx_free_swring(struct igb_tx_queue *txq)
515 if (txq->sw_ring != NULL) {
516 rte_free((struct igb_rx_entry *)txq->sw_ring - 1);
522 ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
524 static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
526 struct igb_tx_entry_v *txe = (struct igb_tx_entry_v *)txq->sw_ring;
529 /* Zero out HW ring memory */
530 for (i = 0; i < txq->nb_tx_desc; i++)
531 txq->tx_ring[i] = zeroed_desc;
533 /* Initialize SW ring entries */
534 for (i = 0; i < txq->nb_tx_desc; i++) {
535 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
536 txd->wb.status = IXGBE_TXD_STAT_DD;
540 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
541 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
546 * Always allow 1 descriptor to be un-allocated to avoid
547 * a H/W race condition
549 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
550 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
552 memset((void *)&txq->ctx_cache, 0,
553 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
556 static struct ixgbe_txq_ops vec_txq_ops = {
557 .release_mbufs = ixgbe_tx_queue_release_mbufs,
558 .free_swring = ixgbe_tx_free_swring,
559 .reset = ixgbe_reset_tx_queue,
563 ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)
565 static struct rte_mbuf mb_def = {
567 .data_off = RTE_PKTMBUF_HEADROOM,
568 #ifdef RTE_MBUF_REFCNT
573 mb_def.buf_len = rxq->mb_pool->elt_size - sizeof(struct rte_mbuf);
574 mb_def.port = rxq->port_id;
575 rxq->mbuf_initializer = *((uint64_t *)&mb_def.rearm_data);
579 int ixgbe_txq_vec_setup(struct igb_tx_queue *txq)
581 if (txq->sw_ring == NULL)
584 /* leave the first one for overflow */
585 txq->sw_ring = (struct igb_tx_entry *)
586 ((struct igb_tx_entry_v *)txq->sw_ring + 1);
587 txq->ops = &vec_txq_ops;
592 int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev)
594 #ifndef RTE_LIBRTE_IEEE1588
595 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
596 struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
598 #ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
599 /* whithout rx ol_flags, no VP flag report */
600 if (rxmode->hw_vlan_strip != 0 ||
601 rxmode->hw_vlan_extend != 0)
605 /* no fdir support */
606 if (fconf->mode != RTE_FDIR_MODE_NONE)
610 * - no csum error report support
611 * - no header split support
613 if (rxmode->hw_ip_checksum == 1 ||
614 rxmode->header_split == 1)