1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
5 #ifndef __THUNDERX_NICVF_RXTX_H__
6 #define __THUNDERX_NICVF_RXTX_H__
8 #include <rte_byteorder.h>
9 #include <rte_ethdev_driver.h>
11 #define NICVF_RX_OFFLOAD_NONE 0x1
12 #define NICVF_RX_OFFLOAD_CKSUM 0x2
14 #define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
17 #define __hot __attribute__((hot))
20 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
21 static inline uint16_t __attribute__((const))
22 nicvf_frag_num(uint16_t i)
24 return (i & ~3) + 3 - (i & 3);
27 static inline void __hot
28 fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
30 /* Local variable sqe to avoid read from sq desc memory*/
33 /* Fill the SQ gather entry */
34 sqe.buff[0] = 0; sqe.buff[1] = 0;
35 sqe.gather.subdesc_type = SQ_DESC_TYPE_GATHER;
36 sqe.gather.ld_type = NIC_SEND_LD_TYPE_E_LDT;
37 sqe.gather.size = pkt->data_len;
38 sqe.gather.addr = rte_mbuf_data_iova(pkt);
40 entry->buff[0] = sqe.buff[0];
41 entry->buff[1] = sqe.buff[1];
46 static inline uint16_t __attribute__((const))
47 nicvf_frag_num(uint16_t i)
52 static inline void __hot
53 fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
55 entry->buff[0] = (uint64_t)SQ_DESC_TYPE_GATHER << 60 |
56 (uint64_t)NIC_SEND_LD_TYPE_E_LDT << 58 |
58 entry->buff[1] = rte_mbuf_data_iova(pkt);
63 nicvf_mbuff_init_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
66 union mbuf_initializer init = {.value = mbuf_init};
67 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
68 init.fields.data_off += apad;
72 *(uint64_t *)(&pkt->rearm_data) = init.value;
76 nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
77 uint16_t apad, uint16_t nb_segs)
79 union mbuf_initializer init = {.value = mbuf_init};
80 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
81 init.fields.data_off += apad;
85 init.fields.nb_segs = nb_segs;
86 *(uint64_t *)(&pkt->rearm_data) = init.value;
89 uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
90 uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
92 uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
94 uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts,
97 uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
98 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
99 uint16_t nicvf_recv_pkts_multiseg_cksum(void *rx_queue,
100 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
102 uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
103 uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
106 void nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq);
107 void nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq);
109 #endif /* __THUNDERX_NICVF_RXTX_H__ */