mbuf: add namespace to offload flags
[dpdk.git] / drivers / net / thunderx / nicvf_rxtx.c
index 247c356..defa551 100644 (file)
@@ -13,7 +13,7 @@
 #include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_errno.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_ether.h>
 #include <rte_log.h>
 #include <rte_mbuf.h>
@@ -25,7 +25,7 @@
 #include "nicvf_rxtx.h"
 #include "nicvf_logs.h"
 
-static inline void __hot
+static inline void __rte_hot
 fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
 {
        /* Local variable sqe to avoid read from sq desc memory*/
@@ -42,10 +42,10 @@ fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
        ol_flags = pkt->ol_flags & NICVF_TX_OFFLOAD_MASK;
        if (unlikely(ol_flags)) {
                /* L4 cksum */
-               uint64_t l4_flags = ol_flags & PKT_TX_L4_MASK;
-               if (l4_flags == PKT_TX_TCP_CKSUM)
+               uint64_t l4_flags = ol_flags & RTE_MBUF_F_TX_L4_MASK;
+               if (l4_flags == RTE_MBUF_F_TX_TCP_CKSUM)
                        sqe.hdr.csum_l4 = SEND_L4_CSUM_TCP;
-               else if (l4_flags == PKT_TX_UDP_CKSUM)
+               else if (l4_flags == RTE_MBUF_F_TX_UDP_CKSUM)
                        sqe.hdr.csum_l4 = SEND_L4_CSUM_UDP;
                else
                        sqe.hdr.csum_l4 = SEND_L4_CSUM_DISABLE;
@@ -54,14 +54,22 @@ fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
                sqe.hdr.l4_offset = pkt->l3_len + pkt->l2_len;
 
                /* L3 cksum */
-               if (ol_flags & PKT_TX_IP_CKSUM)
+               if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
                        sqe.hdr.csum_l3 = 1;
        }
 
        entry->buff[0] = sqe.buff[0];
 }
 
-void __hot
+static inline void __rte_hot
+fill_sq_desc_header_zero_w1(union sq_entry_t *entry,
+                               struct rte_mbuf *pkt)
+{
+       fill_sq_desc_header(entry, pkt);
+       entry->buff[1] = 0ULL;
+}
+
+void __rte_hot
 nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
 {
        int j = 0;
@@ -84,7 +92,7 @@ nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
        NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
 }
 
-void __hot
+void __rte_hot
 nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq)
 {
        uint32_t n = 0;
@@ -107,7 +115,7 @@ nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq)
        NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
 }
 
-static inline uint32_t __hot
+static inline uint32_t __rte_hot
 nicvf_free_tx_desc(struct nicvf_txq *sq)
 {
        return ((sq->head - sq->tail - 1) & sq->qlen_mask);
@@ -116,7 +124,7 @@ nicvf_free_tx_desc(struct nicvf_txq *sq)
 /* Send Header + Packet */
 #define TX_DESC_PER_PKT 2
 
-static inline uint32_t __hot
+static inline uint32_t __rte_hot
 nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts,
                            uint16_t nb_pkts)
 {
@@ -134,7 +142,7 @@ nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts,
        return free_desc;
 }
 
-uint16_t __hot
+uint16_t __rte_hot
 nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
        int i;
@@ -173,7 +181,7 @@ nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        return i;
 }
 
-uint16_t __hot
+uint16_t __rte_hot
 nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
                         uint16_t nb_pkts)
 {
@@ -204,7 +212,7 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
                used_bufs += nb_segs;
 
                txbuffs[tail] = NULL;
-               fill_sq_desc_header(desc_ptr + tail, pkt);
+               fill_sq_desc_header_zero_w1(desc_ptr + tail, pkt);
                tail = (tail + 1) & qlen_mask;
 
                txbuffs[tail] = pkt;
@@ -325,19 +333,19 @@ static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
        [L3_OTHER][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
 };
 
-static inline uint32_t __hot
+static inline uint32_t __rte_hot
 nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
 {
        return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
 }
 
-static inline uint64_t __hot
+static inline uint64_t __rte_hot
 nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
 {
        static const uint64_t flag_table[3] __rte_cache_aligned = {
-               PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
-               PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN,
-               PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+               RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+               RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN,
+               RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
        };
 
        const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 |
@@ -345,7 +353,7 @@ nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
        return flag_table[idx];
 }
 
-static inline int __hot
+static inline int __rte_hot
 nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
 {
        int i;
@@ -377,15 +385,14 @@ nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
                ltail++;
        }
 
-       while (__atomic_load_n(&rbdr->tail, __ATOMIC_RELAXED) != next_tail)
-               rte_pause();
+       rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED);
 
        __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
        nicvf_addr_write(door, to_fill);
        return to_fill;
 }
 
-static inline int32_t __hot
+static inline int32_t __rte_hot
 nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts,
                         int32_t available_space)
 {
@@ -396,13 +403,13 @@ nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts,
        return RTE_MIN(nb_pkts, available_space);
 }
 
-static inline void __hot
+static inline void __rte_hot
 nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
                 struct rte_mbuf *pkt)
 {
        if (likely(cqe_rx_w0.rss_alg)) {
                pkt->hash.rss = cqe_rx_w2.rss_tag;
-               pkt->ol_flags |= PKT_RX_RSS_HASH;
+               pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
 
        }
 }
@@ -447,8 +454,8 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
                        pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
                if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
                        if (unlikely(cqe_rx_w0.vlan_stripped)) {
-                               pkt->ol_flags |= PKT_RX_VLAN
-                                                       | PKT_RX_VLAN_STRIPPED;
+                               pkt->ol_flags |= RTE_MBUF_F_RX_VLAN
+                                                       | RTE_MBUF_F_RX_VLAN_STRIPPED;
                                pkt->vlan_tci =
                                        rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
                        }
@@ -477,7 +484,7 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
        return to_process;
 }
 
-uint16_t __hot
+uint16_t __rte_hot
 nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
@@ -485,7 +492,7 @@ nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
                        NICVF_RX_OFFLOAD_NONE);
 }
 
-uint16_t __hot
+uint16_t __rte_hot
 nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
@@ -493,7 +500,7 @@ nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
                        NICVF_RX_OFFLOAD_CKSUM);
 }
 
-uint16_t __hot
+uint16_t __rte_hot
 nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
@@ -501,7 +508,7 @@ nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
                        NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
 }
 
-uint16_t __hot
+uint16_t __rte_hot
 nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
@@ -509,7 +516,7 @@ nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
                        NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
 }
 
-static __rte_always_inline uint16_t __hot
+static __rte_always_inline uint16_t __rte_hot
 nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
                        uint64_t mbuf_phys_off,
                        struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
@@ -542,8 +549,8 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
                pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
        if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
                if (unlikely(cqe_rx_w0.vlan_stripped)) {
-                       pkt->ol_flags |= PKT_RX_VLAN
-                               | PKT_RX_VLAN_STRIPPED;
+                       pkt->ol_flags |= RTE_MBUF_F_RX_VLAN
+                               | RTE_MBUF_F_RX_VLAN_STRIPPED;
                        pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
                }
        }
@@ -565,7 +572,7 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
        return nb_segs;
 }
 
-static __rte_always_inline uint16_t __hot
+static __rte_always_inline uint16_t __rte_hot
 nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts, const uint32_t flag)
 {
@@ -609,7 +616,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
        return to_process;
 }
 
-uint16_t __hot
+uint16_t __rte_hot
 nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
@@ -617,7 +624,7 @@ nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
                        NICVF_RX_OFFLOAD_NONE);
 }
 
-uint16_t __hot
+uint16_t __rte_hot
 nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
@@ -625,7 +632,7 @@ nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
                        NICVF_RX_OFFLOAD_CKSUM);
 }
 
-uint16_t __hot
+uint16_t __rte_hot
 nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
@@ -633,7 +640,7 @@ nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
                        NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
 }
 
-uint16_t __hot
+uint16_t __rte_hot
 nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
                struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
@@ -642,11 +649,11 @@ nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
 }
 
 uint32_t
-nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+nicvf_dev_rx_queue_count(void *rx_queue)
 {
        struct nicvf_rxq *rxq;
 
-       rxq = dev->data->rx_queues[queue_idx];
+       rxq = rx_queue;
        return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
 }