net/virtio: add virtio-user ops to set owner
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2020 NXP
5  *
6  */
7
8 #ifndef _DPAA2_ETHDEV_H
9 #define _DPAA2_ETHDEV_H
10
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_pmd_dpaa2.h>
13
14 #include <dpaa2_hw_pvt.h>
15
16 #include <mc/fsl_dpni.h>
17 #include <mc/fsl_mc_sys.h>
18
19 #define DPAA2_MIN_RX_BUF_SIZE 512
20 #define DPAA2_MAX_RX_PKT_LEN  10240 /*WRIOP support*/
21
22 #define MAX_TCS                 DPNI_MAX_TC
23 #define MAX_RX_QUEUES           128
24 #define MAX_TX_QUEUES           16
25 #define MAX_DPNI                8
26
27 #define DPAA2_RX_DEFAULT_NBDESC 512
28
29 #define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \
30                            RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
31                            VLAN_TAG_SIZE)
32
33 /*default tc to be used for ,congestion, distribution etc configuration. */
34 #define DPAA2_DEF_TC            0
35
36 /* Threshold for a Tx queue to *Enter* Congestion state.
37  */
38 #define CONG_ENTER_TX_THRESHOLD   512
39
40 /* Threshold for a queue to *Exit* Congestion state.
41  */
42 #define CONG_EXIT_TX_THRESHOLD    480
43
44 #define CONG_RETRY_COUNT 18000
45
46 /* RX queue tail drop threshold
47  * currently considering 64 KB packets
48  */
49 #define CONG_THRESHOLD_RX_BYTES_Q  (64 * 1024)
50 #define CONG_RX_OAL     128
51
52 /* Size of the input SMMU mapped memory required by MC */
53 #define DIST_PARAM_IOVA_SIZE 256
54
55 /* Enable TX Congestion control support
56  * default is disable
57  */
58 #define DPAA2_TX_CGR_OFF        0x01
59
60 /* Disable RX tail drop, default is enable */
61 #define DPAA2_RX_TAILDROP_OFF   0x04
62
63 #define DPAA2_RSS_OFFLOAD_ALL ( \
64         ETH_RSS_L2_PAYLOAD | \
65         ETH_RSS_IP | \
66         ETH_RSS_UDP | \
67         ETH_RSS_TCP | \
68         ETH_RSS_SCTP | \
69         ETH_RSS_MPLS)
70
71 /* LX2 FRC Parsed values (Little Endian) */
72 #define DPAA2_PKT_TYPE_ETHER            0x0060
73 #define DPAA2_PKT_TYPE_IPV4             0x0000
74 #define DPAA2_PKT_TYPE_IPV6             0x0020
75 #define DPAA2_PKT_TYPE_IPV4_EXT \
76                         (0x0001 | DPAA2_PKT_TYPE_IPV4)
77 #define DPAA2_PKT_TYPE_IPV6_EXT \
78                         (0x0001 | DPAA2_PKT_TYPE_IPV6)
79 #define DPAA2_PKT_TYPE_IPV4_TCP \
80                         (0x000e | DPAA2_PKT_TYPE_IPV4)
81 #define DPAA2_PKT_TYPE_IPV6_TCP \
82                         (0x000e | DPAA2_PKT_TYPE_IPV6)
83 #define DPAA2_PKT_TYPE_IPV4_UDP \
84                         (0x0010 | DPAA2_PKT_TYPE_IPV4)
85 #define DPAA2_PKT_TYPE_IPV6_UDP \
86                         (0x0010 | DPAA2_PKT_TYPE_IPV6)
87 #define DPAA2_PKT_TYPE_IPV4_SCTP        \
88                         (0x000f | DPAA2_PKT_TYPE_IPV4)
89 #define DPAA2_PKT_TYPE_IPV6_SCTP        \
90                         (0x000f | DPAA2_PKT_TYPE_IPV6)
91 #define DPAA2_PKT_TYPE_IPV4_ICMP \
92                         (0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
93 #define DPAA2_PKT_TYPE_IPV6_ICMP \
94                         (0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
95 #define DPAA2_PKT_TYPE_VLAN_1           0x0160
96 #define DPAA2_PKT_TYPE_VLAN_2           0x0260
97
98 /* enable timestamp in mbuf*/
99 extern bool dpaa2_enable_ts[];
100 extern uint64_t dpaa2_timestamp_rx_dynflag;
101 extern int dpaa2_timestamp_dynfield_offset;
102
103 #define DPAA2_QOS_TABLE_RECONFIGURE     1
104 #define DPAA2_FS_TABLE_RECONFIGURE      2
105
106 #define DPAA2_QOS_TABLE_IPADDR_EXTRACT 4
107 #define DPAA2_FS_TABLE_IPADDR_EXTRACT 8
108
109 #define DPAA2_FLOW_MAX_KEY_SIZE         16
110
111 /*Externaly defined*/
112 extern const struct rte_flow_ops dpaa2_flow_ops;
113 extern enum rte_filter_type dpaa2_filter_type;
114
115 #define IP_ADDRESS_OFFSET_INVALID (-1)
116
117 struct dpaa2_key_info {
118         uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS];
119         uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS];
120         /* Special for IP address. */
121         int ipv4_src_offset;
122         int ipv4_dst_offset;
123         int ipv6_src_offset;
124         int ipv6_dst_offset;
125         uint8_t key_total_size;
126 };
127
128 struct dpaa2_key_extract {
129         struct dpkg_profile_cfg dpkg;
130         struct dpaa2_key_info key_info;
131 };
132
133 struct extract_s {
134         struct dpaa2_key_extract qos_key_extract;
135         struct dpaa2_key_extract tc_key_extract[MAX_TCS];
136         uint64_t qos_extract_param;
137         uint64_t tc_extract_param[MAX_TCS];
138 };
139
140 struct dpaa2_dev_priv {
141         void *hw;
142         int32_t hw_id;
143         int32_t qdid;
144         uint16_t token;
145         uint8_t nb_tx_queues;
146         uint8_t nb_rx_queues;
147         uint32_t options;
148         void *rx_vq[MAX_RX_QUEUES];
149         void *tx_vq[MAX_TX_QUEUES];
150         struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
151         void *tx_conf_vq[MAX_TX_QUEUES];
152         uint8_t tx_conf_en;
153         uint8_t max_mac_filters;
154         uint8_t max_vlan_filters;
155         uint8_t num_rx_tc;
156         uint16_t qos_entries;
157         uint16_t fs_entries;
158         uint8_t dist_queues;
159         uint8_t flags; /*dpaa2 config flags */
160         uint8_t en_ordered;
161         uint8_t en_loose_ordered;
162         uint8_t max_cgs;
163         uint8_t cgid_in_use[MAX_RX_QUEUES];
164
165         struct extract_s extract;
166
167         uint16_t ss_offset;
168         uint64_t ss_iova;
169         uint64_t ss_param_iova;
170         /*stores timestamp of last received packet on dev*/
171         uint64_t rx_timestamp;
172         /*stores timestamp of last received tx confirmation packet on dev*/
173         uint64_t tx_timestamp;
174         /* stores pointer to next tx_conf queue that should be processed,
175          * it corresponds to last packet transmitted
176          */
177         struct dpaa2_queue *next_tx_conf_queue;
178
179         struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
180
181         LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
182 };
183
184 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
185                                       struct dpkg_profile_cfg *kg_cfg);
186
187 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
188                 uint64_t req_dist_set, int tc_index);
189
190 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
191                            uint8_t tc_index);
192
193 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
194
195 __rte_internal
196 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
197                 int eth_rx_queue_id,
198                 struct dpaa2_dpcon_dev *dpcon,
199                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
200
201 __rte_internal
202 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
203                 int eth_rx_queue_id);
204
205 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
206
207 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
208                                 uint16_t nb_pkts);
209
210 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
211                                uint16_t nb_pkts);
212 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
213                                       const struct qbman_fd *fd,
214                                       const struct qbman_result *dq,
215                                       struct dpaa2_queue *rxq,
216                                       struct rte_event *ev);
217 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
218                                     const struct qbman_fd *fd,
219                                     const struct qbman_result *dq,
220                                     struct dpaa2_queue *rxq,
221                                     struct rte_event *ev);
222 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
223                                      const struct qbman_fd *fd,
224                                      const struct qbman_result *dq,
225                                      struct dpaa2_queue *rxq,
226                                      struct rte_event *ev);
227 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
228 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
229                               uint16_t nb_pkts);
230 uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
231 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
232 void dpaa2_flow_clean(struct rte_eth_dev *dev);
233 uint16_t dpaa2_dev_tx_conf(void *queue)  __rte_unused;
234
235 int dpaa2_timesync_enable(struct rte_eth_dev *dev);
236 int dpaa2_timesync_disable(struct rte_eth_dev *dev);
237 int dpaa2_timesync_read_time(struct rte_eth_dev *dev,
238                                         struct timespec *timestamp);
239 int dpaa2_timesync_write_time(struct rte_eth_dev *dev,
240                                         const struct timespec *timestamp);
241 int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
242 int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
243                                                 struct timespec *timestamp,
244                                                 uint32_t flags __rte_unused);
245 int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
246                                           struct timespec *timestamp);
247 #endif /* _DPAA2_ETHDEV_H */