#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_errno.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_ether.h>
#include <rte_log.h>
#include <rte_mbuf.h>
#include "nicvf_rxtx.h"
#include "nicvf_logs.h"
-static inline void __hot
+static inline void __rte_hot
fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
{
/* Local variable sqe to avoid read from sq desc memory*/
ol_flags = pkt->ol_flags & NICVF_TX_OFFLOAD_MASK;
if (unlikely(ol_flags)) {
/* L4 cksum */
- uint64_t l4_flags = ol_flags & PKT_TX_L4_MASK;
- if (l4_flags == PKT_TX_TCP_CKSUM)
+ uint64_t l4_flags = ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if (l4_flags == RTE_MBUF_F_TX_TCP_CKSUM)
sqe.hdr.csum_l4 = SEND_L4_CSUM_TCP;
- else if (l4_flags == PKT_TX_UDP_CKSUM)
+ else if (l4_flags == RTE_MBUF_F_TX_UDP_CKSUM)
sqe.hdr.csum_l4 = SEND_L4_CSUM_UDP;
else
sqe.hdr.csum_l4 = SEND_L4_CSUM_DISABLE;
sqe.hdr.l4_offset = pkt->l3_len + pkt->l2_len;
/* L3 cksum */
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
sqe.hdr.csum_l3 = 1;
}
entry->buff[0] = sqe.buff[0];
}
-void __hot
+static inline void __rte_hot
+fill_sq_desc_header_zero_w1(union sq_entry_t *entry,
+ struct rte_mbuf *pkt)
+{
+ fill_sq_desc_header(entry, pkt);
+ entry->buff[1] = 0ULL;
+}
+
+void __rte_hot
nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
{
int j = 0;
NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
}
-void __hot
+void __rte_hot
nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq)
{
uint32_t n = 0;
NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
}
-static inline uint32_t __hot
+static inline uint32_t __rte_hot
nicvf_free_tx_desc(struct nicvf_txq *sq)
{
return ((sq->head - sq->tail - 1) & sq->qlen_mask);
/* Send Header + Packet */
#define TX_DESC_PER_PKT 2
-static inline uint32_t __hot
+static inline uint32_t __rte_hot
nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
return free_desc;
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
return i;
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
used_bufs += nb_segs;
txbuffs[tail] = NULL;
- fill_sq_desc_header(desc_ptr + tail, pkt);
+ fill_sq_desc_header_zero_w1(desc_ptr + tail, pkt);
tail = (tail + 1) & qlen_mask;
txbuffs[tail] = pkt;
[L3_OTHER][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
};
-static inline uint32_t __hot
+static inline uint32_t __rte_hot
nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
{
return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
}
-static inline uint64_t __hot
+static inline uint64_t __rte_hot
nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
{
static const uint64_t flag_table[3] __rte_cache_aligned = {
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
};
const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 |
return flag_table[idx];
}
-static inline int __hot
+static inline int __rte_hot
nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
{
int i;
ltail++;
}
- while (__atomic_load_n(&rbdr->tail, __ATOMIC_RELAXED) != next_tail)
- rte_pause();
+ rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED);
__atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
nicvf_addr_write(door, to_fill);
return to_fill;
}
-static inline int32_t __hot
+static inline int32_t __rte_hot
nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts,
int32_t available_space)
{
return RTE_MIN(nb_pkts, available_space);
}
-static inline void __hot
+static inline void __rte_hot
nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
struct rte_mbuf *pkt)
{
if (likely(cqe_rx_w0.rss_alg)) {
pkt->hash.rss = cqe_rx_w2.rss_tag;
- pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
}
pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
if (unlikely(cqe_rx_w0.vlan_stripped)) {
- pkt->ol_flags |= PKT_RX_VLAN
- | PKT_RX_VLAN_STRIPPED;
+ pkt->ol_flags |= RTE_MBUF_F_RX_VLAN
+ | RTE_MBUF_F_RX_VLAN_STRIPPED;
pkt->vlan_tci =
rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
}
return to_process;
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_NONE);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_CKSUM);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
}
-static __rte_always_inline uint16_t __hot
+static __rte_always_inline uint16_t __rte_hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
uint64_t mbuf_phys_off,
struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
if (unlikely(cqe_rx_w0.vlan_stripped)) {
- pkt->ol_flags |= PKT_RX_VLAN
- | PKT_RX_VLAN_STRIPPED;
+ pkt->ol_flags |= RTE_MBUF_F_RX_VLAN
+ | RTE_MBUF_F_RX_VLAN_STRIPPED;
pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
}
}
return nb_segs;
}
-static __rte_always_inline uint16_t __hot
+static __rte_always_inline uint16_t __rte_hot
nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts, const uint32_t flag)
{
return to_process;
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_NONE);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_CKSUM);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
}
uint32_t
-nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+nicvf_dev_rx_queue_count(void *rx_queue)
{
struct nicvf_rxq *rxq;
- rxq = dev->data->rx_queues[queue_idx];
+ rxq = rx_queue;
return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
}