drivers: use SPDX tag for Cavium copyright files
[dpdk.git] / drivers / net / thunderx / nicvf_ethdev.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #ifndef __THUNDERX_NICVF_ETHDEV_H__
6 #define __THUNDERX_NICVF_ETHDEV_H__
7
8 #include <rte_ethdev.h>
9
10 #define THUNDERX_NICVF_PMD_VERSION      "2.0"
11 #define THUNDERX_REG_BYTES              8
12
13 #define NICVF_INTR_POLL_INTERVAL_MS     50
14 #define NICVF_HALF_DUPLEX               0x00
15 #define NICVF_FULL_DUPLEX               0x01
16 #define NICVF_UNKNOWN_DUPLEX            0xff
17
18 #define NICVF_RSS_OFFLOAD_PASS1 ( \
19         ETH_RSS_PORT | \
20         ETH_RSS_IPV4 | \
21         ETH_RSS_NONFRAG_IPV4_TCP | \
22         ETH_RSS_NONFRAG_IPV4_UDP | \
23         ETH_RSS_IPV6 | \
24         ETH_RSS_NONFRAG_IPV6_TCP | \
25         ETH_RSS_NONFRAG_IPV6_UDP)
26
27 #define NICVF_RSS_OFFLOAD_TUNNEL ( \
28         ETH_RSS_VXLAN | \
29         ETH_RSS_GENEVE | \
30         ETH_RSS_NVGRE)
31
32 #define NICVF_DEFAULT_RX_FREE_THRESH    224
33 #define NICVF_DEFAULT_TX_FREE_THRESH    224
34 #define NICVF_TX_FREE_MPOOL_THRESH      16
35 #define NICVF_MAX_RX_FREE_THRESH        1024
36 #define NICVF_MAX_TX_FREE_THRESH        1024
37
38 #define VLAN_TAG_SIZE                   4       /* 802.3ac tag */
39
40 static inline struct nicvf *
41 nicvf_pmd_priv(struct rte_eth_dev *eth_dev)
42 {
43         return eth_dev->data->dev_private;
44 }
45
46 static inline uint64_t
47 nicvf_mempool_phy_offset(struct rte_mempool *mp)
48 {
49         struct rte_mempool_memhdr *hdr;
50
51         hdr = STAILQ_FIRST(&mp->mem_list);
52         assert(hdr != NULL);
53         return (uint64_t)((uintptr_t)hdr->addr - hdr->iova);
54 }
55
56 static inline uint16_t
57 nicvf_mbuff_meta_length(struct rte_mbuf *mbuf)
58 {
59         return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
60 }
61
62 static inline uint16_t
63 nicvf_netdev_qidx(struct nicvf *nic, uint8_t local_qidx)
64 {
65         uint16_t global_qidx = local_qidx;
66
67         if (nic->sqs_mode)
68                 global_qidx += ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
69
70         return global_qidx;
71 }
72
73 /*
74  * Simple phy2virt functions assuming mbufs are in a single huge page
75  * V = P + offset
76  * P = V - offset
77  */
78 static inline uintptr_t
79 nicvf_mbuff_phy2virt(rte_iova_t phy, uint64_t mbuf_phys_off)
80 {
81         return (uintptr_t)(phy + mbuf_phys_off);
82 }
83
84 static inline uintptr_t
85 nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off)
86 {
87         return (rte_iova_t)(virt - mbuf_phys_off);
88 }
89
90 static inline void
91 nicvf_tx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *tx_start,
92                uint16_t *tx_end)
93 {
94         uint16_t tmp;
95
96         *tx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
97                                     MAX_SND_QUEUES_PER_QS);
98         tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
99                              MAX_SND_QUEUES_PER_QS) - 1;
100         *tx_end = dev->data->nb_tx_queues ?
101                 RTE_MIN(tmp, dev->data->nb_tx_queues - 1) : 0;
102 }
103
104 static inline void
105 nicvf_rx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *rx_start,
106                uint16_t *rx_end)
107 {
108         uint16_t tmp;
109
110         *rx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
111                                     MAX_RCV_QUEUES_PER_QS);
112         tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
113                              MAX_RCV_QUEUES_PER_QS) - 1;
114         *rx_end = dev->data->nb_rx_queues ?
115                 RTE_MIN(tmp, dev->data->nb_rx_queues - 1) : 0;
116 }
117
118 #endif /* __THUNDERX_NICVF_ETHDEV_H__  */