4 * Copyright (C) Cavium, Inc. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifndef __THUNDERX_NICVF_RXTX_H__
34 #define __THUNDERX_NICVF_RXTX_H__
36 #include <rte_byteorder.h>
37 #include <rte_ethdev.h>
39 #define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
42 #define __hot __attribute__((hot))
45 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
46 static inline uint16_t __attribute__((const))
47 nicvf_frag_num(uint16_t i)
49 return (i & ~3) + 3 - (i & 3);
52 static inline void __hot
53 fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
55 /* Local variable sqe to avoid read from sq desc memory*/
58 /* Fill the SQ gather entry */
59 sqe.buff[0] = 0; sqe.buff[1] = 0;
60 sqe.gather.subdesc_type = SQ_DESC_TYPE_GATHER;
61 sqe.gather.ld_type = NIC_SEND_LD_TYPE_E_LDT;
62 sqe.gather.size = pkt->data_len;
63 sqe.gather.addr = rte_mbuf_data_dma_addr(pkt);
65 entry->buff[0] = sqe.buff[0];
66 entry->buff[1] = sqe.buff[1];
71 static inline uint16_t __attribute__((const))
72 nicvf_frag_num(uint16_t i)
77 static inline void __hot
78 fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
80 entry->buff[0] = (uint64_t)SQ_DESC_TYPE_GATHER << 60 |
81 (uint64_t)NIC_SEND_LD_TYPE_E_LDT << 58 |
83 entry->buff[1] = rte_mbuf_data_dma_addr(pkt);
88 nicvf_mbuff_init_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
91 union mbuf_initializer init = {.value = mbuf_init};
92 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
93 init.fields.data_off += apad;
97 *(uint64_t *)(&pkt->rearm_data) = init.value;
101 nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
102 uint16_t apad, uint16_t nb_segs)
104 union mbuf_initializer init = {.value = mbuf_init};
105 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
106 init.fields.data_off += apad;
110 init.fields.nb_segs = nb_segs;
111 *(uint64_t *)(&pkt->rearm_data) = init.value;
114 uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
115 uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
117 uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
118 uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
121 uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
122 uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
125 void nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq);
126 void nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq);
128 #endif /* __THUNDERX_NICVF_RXTX_H__ */