X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fthunderx%2Fnicvf_rxtx.c;h=d0ea95f1d11cdaec83d8d5eb29c42a057a83f7d6;hb=25ae7f1a5d9d127a46f8d62d1d689f77a78138fd;hp=4980dab79d5cce1fab600f34f9536351cf5b14b6;hpb=5e64c8120c82c0c9d4de42a42f6d38922c3b5740;p=dpdk.git diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c index 4980dab79d..d0ea95f1d1 100644 --- a/drivers/net/thunderx/nicvf_rxtx.c +++ b/drivers/net/thunderx/nicvf_rxtx.c @@ -25,7 +25,7 @@ #include "nicvf_rxtx.h" #include "nicvf_logs.h" -static inline void __hot +static inline void __rte_hot fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt) { /* Local variable sqe to avoid read from sq desc memory*/ @@ -61,7 +61,15 @@ fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt) entry->buff[0] = sqe.buff[0]; } -void __hot +static inline void __rte_hot +fill_sq_desc_header_zero_w1(union sq_entry_t *entry, + struct rte_mbuf *pkt) +{ + fill_sq_desc_header(entry, pkt); + entry->buff[1] = 0ULL; +} + +void __rte_hot nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq) { int j = 0; @@ -84,7 +92,7 @@ nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq) NICVF_TX_ASSERT(sq->xmit_bufs >= 0); } -void __hot +void __rte_hot nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq) { uint32_t n = 0; @@ -107,7 +115,7 @@ nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq) NICVF_TX_ASSERT(sq->xmit_bufs >= 0); } -static inline uint32_t __hot +static inline uint32_t __rte_hot nicvf_free_tx_desc(struct nicvf_txq *sq) { return ((sq->head - sq->tail - 1) & sq->qlen_mask); @@ -116,7 +124,7 @@ nicvf_free_tx_desc(struct nicvf_txq *sq) /* Send Header + Packet */ #define TX_DESC_PER_PKT 2 -static inline uint32_t __hot +static inline uint32_t __rte_hot nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -134,7 +142,7 @@ nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts, return free_desc; } -uint16_t __hot +uint16_t __rte_hot nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { int i; @@ -173,7 +181,7 @@ nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return i; } -uint16_t __hot +uint16_t __rte_hot nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -204,7 +212,7 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts, used_bufs += nb_segs; txbuffs[tail] = NULL; - fill_sq_desc_header(desc_ptr + tail, pkt); + fill_sq_desc_header_zero_w1(desc_ptr + tail, pkt); tail = (tail + 1) & qlen_mask; txbuffs[tail] = pkt; @@ -325,13 +333,13 @@ static const uint32_t ptype_table[16][16] __rte_cache_aligned = { [L3_OTHER][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE, }; -static inline uint32_t __hot +static inline uint32_t __rte_hot nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0) { return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type]; } -static inline uint64_t __hot +static inline uint64_t __rte_hot nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0) { static const uint64_t flag_table[3] __rte_cache_aligned = { @@ -345,7 +353,7 @@ nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0) return flag_table[idx]; } -static inline int __hot +static inline int __rte_hot nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill) { int i; @@ -377,15 +385,14 @@ nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill) ltail++; } - while (__atomic_load_n(&rbdr->tail, __ATOMIC_RELAXED) != next_tail) - rte_pause(); + rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED); __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE); nicvf_addr_write(door, to_fill); return to_fill; } -static inline int32_t __hot +static inline int32_t __rte_hot nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts, int32_t available_space) { @@ -396,7 +403,7 @@ nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts, return RTE_MIN(nb_pkts, available_space); } -static inline void __hot +static inline void __rte_hot nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2, struct rte_mbuf *pkt) { @@ -445,6 +452,14 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, pkt->ol_flags = 0; if (flag & NICVF_RX_OFFLOAD_CKSUM) pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0); + if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) { + if (unlikely(cqe_rx_w0.vlan_stripped)) { + pkt->ol_flags |= PKT_RX_VLAN + | PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = + rte_cpu_to_be_16(cqe_rx_w2.vlan_tci); + } + } pkt->data_len = cqe_rx_w3.rb0_sz; pkt->pkt_len = cqe_rx_w3.rb0_sz; pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0); @@ -469,7 +484,7 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, return to_process; } -uint16_t __hot +uint16_t __rte_hot nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -477,7 +492,7 @@ nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts, NICVF_RX_OFFLOAD_NONE); } -uint16_t __hot +uint16_t __rte_hot nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -485,7 +500,23 @@ nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts, NICVF_RX_OFFLOAD_CKSUM); } -static __rte_always_inline uint16_t __hot +uint16_t __rte_hot +nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + +uint16_t __rte_hot +nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + +static __rte_always_inline uint16_t __rte_hot nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx, uint64_t mbuf_phys_off, struct rte_mbuf **rx_pkt, uint8_t rbptr_offset, @@ -516,6 +547,13 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx, pkt->ol_flags = 0; if (flag & NICVF_RX_OFFLOAD_CKSUM) pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0); + if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) { + if (unlikely(cqe_rx_w0.vlan_stripped)) { + pkt->ol_flags |= PKT_RX_VLAN + | PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci); + } + } nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt); *rx_pkt = pkt; @@ -534,7 +572,7 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx, return nb_segs; } -static __rte_always_inline uint16_t __hot +static __rte_always_inline uint16_t __rte_hot nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, const uint32_t flag) { @@ -578,7 +616,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, return to_process; } -uint16_t __hot +uint16_t __rte_hot nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -586,7 +624,7 @@ nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts, NICVF_RX_OFFLOAD_NONE); } -uint16_t __hot +uint16_t __rte_hot nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -594,6 +632,22 @@ nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts, NICVF_RX_OFFLOAD_CKSUM); } +uint16_t __rte_hot +nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + +uint16_t __rte_hot +nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) {