*/
#include <stdint.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_malloc.h>
+#include <rte_vect.h>
#include "base/i40e_prototype.h"
#include "base/i40e_type.h"
#include "i40e_rxtx.h"
#include "i40e_rxtx_vec_common.h"
-#include <arm_neon.h>
#pragma GCC diagnostic ignored "-Wcast-qual"
rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
(rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
- rte_cio_wmb();
+ rte_io_wmb();
/* Update the tail pointer on the NIC */
I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id);
}
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
const uint32x4_t cksum_mask = {
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_EIP_CKSUM_BAD};
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD};
/* map rss and vlan type to rss hash and vlan flag */
const uint8x16_t vlan_flags = {
0, 0, 0, 0,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0};
const uint8x16_t rss_flags = {
- 0, PKT_RX_FDIR, 0, 0,
- 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, RTE_MBUF_F_RX_FDIR, 0, 0,
+ 0, 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
0, 0, 0, 0,
0, 0, 0, 0};
const uint8x16_t l3_l4e_flags = {
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
0, 0, 0, 0, 0, 0, 0, 0};
vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
#define I40E_UINT16_BIT (CHAR_BIT * sizeof(uint16_t))
static inline void
-desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **__restrict rx_pkts,
- uint32_t *__restrict ptype_tbl)
+desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **__rte_restrict rx_pkts,
+ uint32_t *__rte_restrict ptype_tbl)
{
int i;
uint8_t ptype;
}
- /*
+/**
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_I40E_DESCS_PER_LOOP)
+ *
* Notice:
* - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
- * numbers of DD bits
+ * - floor align nb_pkts to a RTE_I40E_DESCS_PER_LOOP power-of-two
*/
static inline uint16_t
-_recv_raw_pkts_vec(struct i40e_rx_queue *__restrict rxq, struct rte_mbuf
- **__restrict rx_pkts, uint16_t nb_pkts, uint8_t *split_packet)
+_recv_raw_pkts_vec(struct i40e_rx_queue *__rte_restrict rxq,
+ struct rte_mbuf **__rte_restrict rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
{
volatile union i40e_rx_desc *rxdp;
struct i40e_rx_entry *sw_ring;
0, 0, 0 /* ignore non-length fields */
};
- /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
- nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
-
/* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT};
- /* B.1 load 1 mbuf point */
- mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
- /* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3-0] */
descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
-
- /* B.2 copy 2 mbuf point into rx_pkts */
- vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
-
- /* B.1 load 1 mbuf point */
- mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
-
descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
- /* B.1 load 2 mbuf point */
descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
descs[0] = vld1q_u64((uint64_t *)(rxdp));
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* Use acquire fence to order loads of descriptor qwords */
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ /* A.2 reload qword0 to make it ordered after qword1 load */
+ descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
+ descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
+ descs[1] = vld1q_lane_u64((uint64_t *)(rxdp + 1), descs[1], 0);
+ descs[0] = vld1q_lane_u64((uint64_t *)(rxdp), descs[0], 0);
+
+ /* B.1 load 4 mbuf point */
+ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
+ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
+
+ /* B.2 copy 4 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
}
- /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ /* pkts shift the pktlen field to be 16-bit aligned*/
uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]),
len_shl);
- descs[3] = vreinterpretq_u64_u32(len3);
+ descs[3] = vreinterpretq_u64_u16(vsetq_lane_u16
+ (vgetq_lane_u16(vreinterpretq_u16_u32(len3), 7),
+ vreinterpretq_u16_u64(descs[3]),
+ 7));
uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]),
len_shl);
- descs[2] = vreinterpretq_u64_u32(len2);
+ descs[2] = vreinterpretq_u64_u16(vsetq_lane_u16
+ (vgetq_lane_u16(vreinterpretq_u16_u32(len2), 7),
+ vreinterpretq_u16_u64(descs[2]),
+ 7));
+ uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]),
+ len_shl);
+ descs[1] = vreinterpretq_u64_u16(vsetq_lane_u16
+ (vgetq_lane_u16(vreinterpretq_u16_u32(len1), 7),
+ vreinterpretq_u16_u64(descs[1]),
+ 7));
+ uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]),
+ len_shl);
+ descs[0] = vreinterpretq_u64_u16(vsetq_lane_u16
+ (vgetq_lane_u16(vreinterpretq_u16_u32(len0), 7),
+ vreinterpretq_u16_u64(descs[0]),
+ 7));
- /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ /* D.1 pkts convert format from desc to pktmbuf */
pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
+ pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
+ pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
+
+ /* D.2 pkts set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
+ pkt_mb4 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
+ pkt_mb3 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
+ pkt_mb2 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
+ pkt_mb1 = vreinterpretq_u8_u16(tmp);
+
+ /* D.3 copy final data to rx_pkts */
+ vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb4);
+ vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb3);
+ vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb2);
+ vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
+
+ if (likely(pos + RTE_I40E_DESCS_PER_LOOP < nb_pkts)) {
+ rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP);
+ }
/* C.1 4=>2 filter staterr info only */
sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]),
vreinterpretq_u16_u64(descs[3]));
- /* C.1 4=>2 filter staterr info only */
sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]),
vreinterpretq_u16_u64(descs[2]));
staterr = vzipq_u16(sterr_tmp1.val[1],
sterr_tmp2.val[1]).val[0];
- desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
-
- /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
- tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
- pkt_mb4 = vreinterpretq_u8_u16(tmp);
- tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
- pkt_mb3 = vreinterpretq_u8_u16(tmp);
-
- /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
- uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]),
- len_shl);
- descs[1] = vreinterpretq_u64_u32(len1);
- uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]),
- len_shl);
- descs[0] = vreinterpretq_u64_u32(len0);
-
- /* D.1 pkt 1,2 convert format from desc to pktmbuf */
- pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
- pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
-
- /* D.3 copy final 3,4 data to rx_pkts */
- vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
- pkt_mb4);
- vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
- pkt_mb3);
-
- /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
- tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
- pkt_mb2 = vreinterpretq_u8_u16(tmp);
- tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
- pkt_mb1 = vreinterpretq_u8_u16(tmp);
-
/* C* extract and record EOP bit */
if (split_packet) {
uint8x16_t eop_shuf_mask = {
eop_bits = vmvnq_u8(vreinterpretq_u8_u16(staterr));
eop_bits = vandq_u8(eop_bits, eop_check);
/* the staterr values are not in order, as the count
- * count of dd bits doesn't care. However, for end of
+ * of dd bits doesn't care. However, for end of
* packet tracking, we do care, so shuffle. This also
* compresses the 32-bit values to 8-bit
*/
I40E_UINT16_BIT - 1));
stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
- rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP);
-
- /* D.3 copy final 1,2 data to rx_pkts */
- vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
- pkt_mb2);
- vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
- pkt_mb1);
- desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
/* C.4 calc avaialbe number of desc */
if (unlikely(stat == 0)) {
nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP;
* numbers of DD bits
*/
uint16_t
-i40e_recv_pkts_vec(void *__restrict rx_queue,
- struct rte_mbuf **__restrict rx_pkts, uint16_t nb_pkts)
+i40e_recv_pkts_vec(void *__rte_restrict rx_queue,
+ struct rte_mbuf **__rte_restrict rx_pkts, uint16_t nb_pkts)
{
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
- /* vPMD receive routine that reassembles scattered packets
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ *
* Notice:
* - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
- * numbers of DD bits
*/
-uint16_t
-i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+static uint16_t
+i40e_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct i40e_rx_queue *rxq = rx_queue;
&split_flags[i]);
}
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+
+ while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
+ uint16_t burst;
+
+ burst = i40e_recv_scattered_burst_vec(rx_queue,
+ rx_pkts + retval,
+ RTE_I40E_VPMD_RX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < RTE_I40E_VPMD_RX_BURST)
+ return retval;
+ }
+
+ return retval + i40e_recv_scattered_burst_vec(rx_queue,
+ rx_pkts + retval,
+ nb_pkts);
+}
+
static inline void
vtx1(volatile struct i40e_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
}
uint16_t
-i40e_xmit_fixed_burst_vec(void *__restrict tx_queue,
- struct rte_mbuf **__restrict tx_pkts, uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
+ struct rte_mbuf **__rte_restrict tx_pkts, uint16_t nb_pkts)
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
txq->tx_tail = tx_id;
- rte_cio_wmb();
+ rte_io_wmb();
I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
return nb_pkts;