1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2021 NXP
8 #ifndef _DPAA2_ETHDEV_H
9 #define _DPAA2_ETHDEV_H
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_pmd_dpaa2.h>
14 #include <dpaa2_hw_pvt.h>
17 #include <mc/fsl_dpni.h>
18 #include <mc/fsl_mc_sys.h>
20 #define DPAA2_MIN_RX_BUF_SIZE 512
21 #define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/
22 #define NET_DPAA2_PMD_DRIVER_NAME net_dpaa2
24 #define MAX_TCS DPNI_MAX_TC
25 #define MAX_RX_QUEUES 128
26 #define MAX_TX_QUEUES 16
28 #define DPAA2_MAX_CHANNELS 16
30 #define DPAA2_RX_DEFAULT_NBDESC 512
32 #define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \
33 RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
36 /*default tc to be used for ,congestion, distribution etc configuration. */
37 #define DPAA2_DEF_TC 0
39 /* Threshold for a Tx queue to *Enter* Congestion state.
41 #define CONG_ENTER_TX_THRESHOLD 512
43 /* Threshold for a queue to *Exit* Congestion state.
45 #define CONG_EXIT_TX_THRESHOLD 480
47 #define CONG_RETRY_COUNT 18000
49 /* RX queue tail drop threshold
50 * currently considering 64 KB packets
52 #define CONG_THRESHOLD_RX_BYTES_Q (64 * 1024)
53 #define CONG_RX_OAL 128
55 /* Size of the input SMMU mapped memory required by MC */
56 #define DIST_PARAM_IOVA_SIZE 256
58 /* Enable TX Congestion control support
61 #define DPAA2_TX_CGR_OFF 0x01
63 /* Disable RX tail drop, default is enable */
64 #define DPAA2_RX_TAILDROP_OFF 0x04
65 /* Tx confirmation enabled */
66 #define DPAA2_TX_CONF_ENABLE 0x06
68 #define DPAA2_RSS_OFFLOAD_ALL ( \
69 RTE_ETH_RSS_L2_PAYLOAD | \
75 RTE_ETH_RSS_C_VLAN | \
76 RTE_ETH_RSS_S_VLAN | \
81 /* LX2 FRC Parsed values (Little Endian) */
82 #define DPAA2_PKT_TYPE_ETHER 0x0060
83 #define DPAA2_PKT_TYPE_IPV4 0x0000
84 #define DPAA2_PKT_TYPE_IPV6 0x0020
85 #define DPAA2_PKT_TYPE_IPV4_EXT \
86 (0x0001 | DPAA2_PKT_TYPE_IPV4)
87 #define DPAA2_PKT_TYPE_IPV6_EXT \
88 (0x0001 | DPAA2_PKT_TYPE_IPV6)
89 #define DPAA2_PKT_TYPE_IPV4_TCP \
90 (0x000e | DPAA2_PKT_TYPE_IPV4)
91 #define DPAA2_PKT_TYPE_IPV6_TCP \
92 (0x000e | DPAA2_PKT_TYPE_IPV6)
93 #define DPAA2_PKT_TYPE_IPV4_UDP \
94 (0x0010 | DPAA2_PKT_TYPE_IPV4)
95 #define DPAA2_PKT_TYPE_IPV6_UDP \
96 (0x0010 | DPAA2_PKT_TYPE_IPV6)
97 #define DPAA2_PKT_TYPE_IPV4_SCTP \
98 (0x000f | DPAA2_PKT_TYPE_IPV4)
99 #define DPAA2_PKT_TYPE_IPV6_SCTP \
100 (0x000f | DPAA2_PKT_TYPE_IPV6)
101 #define DPAA2_PKT_TYPE_IPV4_ICMP \
102 (0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
103 #define DPAA2_PKT_TYPE_IPV6_ICMP \
104 (0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
105 #define DPAA2_PKT_TYPE_VLAN_1 0x0160
106 #define DPAA2_PKT_TYPE_VLAN_2 0x0260
108 /* enable timestamp in mbuf*/
109 extern bool dpaa2_enable_ts[];
110 extern uint64_t dpaa2_timestamp_rx_dynflag;
111 extern int dpaa2_timestamp_dynfield_offset;
113 #define DPAA2_QOS_TABLE_RECONFIGURE 1
114 #define DPAA2_FS_TABLE_RECONFIGURE 2
116 #define DPAA2_QOS_TABLE_IPADDR_EXTRACT 4
117 #define DPAA2_FS_TABLE_IPADDR_EXTRACT 8
119 #define DPAA2_FLOW_MAX_KEY_SIZE 16
121 /* Externally defined */
122 extern const struct rte_flow_ops dpaa2_flow_ops;
124 extern const struct rte_tm_ops dpaa2_tm_ops;
126 extern bool dpaa2_enable_err_queue;
128 #define IP_ADDRESS_OFFSET_INVALID (-1)
130 struct dpaa2_key_info {
131 uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS];
132 uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS];
133 /* Special for IP address. */
138 uint8_t key_total_size;
141 struct dpaa2_key_extract {
142 struct dpkg_profile_cfg dpkg;
143 struct dpaa2_key_info key_info;
147 struct dpaa2_key_extract qos_key_extract;
148 struct dpaa2_key_extract tc_key_extract[MAX_TCS];
149 uint64_t qos_extract_param;
150 uint64_t tc_extract_param[MAX_TCS];
153 struct dpaa2_dev_priv {
158 uint8_t nb_tx_queues;
159 uint8_t nb_rx_queues;
161 void *rx_vq[MAX_RX_QUEUES];
162 void *tx_vq[MAX_TX_QUEUES];
163 struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
164 void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
166 uint8_t flags; /*dpaa2 config flags */
167 uint8_t max_mac_filters;
168 uint8_t max_vlan_filters;
171 uint16_t qos_entries;
174 uint8_t num_channels;
176 uint8_t en_loose_ordered;
178 uint8_t cgid_in_use[MAX_RX_QUEUES];
180 struct extract_s extract;
184 uint64_t ss_param_iova;
185 /*stores timestamp of last received packet on dev*/
186 uint64_t rx_timestamp;
187 /*stores timestamp of last received tx confirmation packet on dev*/
188 uint64_t tx_timestamp;
189 /* stores pointer to next tx_conf queue that should be processed,
190 * it corresponds to last packet transmitted
192 struct dpaa2_queue *next_tx_conf_queue;
194 struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
196 uint8_t channel_inuse;
197 LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
198 LIST_HEAD(nodes, dpaa2_tm_node) nodes;
199 LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
202 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
203 struct dpkg_profile_cfg *kg_cfg);
205 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
206 uint64_t req_dist_set, int tc_index);
208 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
211 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
214 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
216 struct dpaa2_dpcon_dev *dpcon,
217 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
220 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
221 int eth_rx_queue_id);
223 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
225 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
228 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
230 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
231 const struct qbman_fd *fd,
232 const struct qbman_result *dq,
233 struct dpaa2_queue *rxq,
234 struct rte_event *ev);
235 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
236 const struct qbman_fd *fd,
237 const struct qbman_result *dq,
238 struct dpaa2_queue *rxq,
239 struct rte_event *ev);
240 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
241 const struct qbman_fd *fd,
242 const struct qbman_result *dq,
243 struct dpaa2_queue *rxq,
244 struct rte_event *ev);
245 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
246 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
249 uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
250 struct rte_mbuf **bufs, uint16_t nb_pkts);
252 uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
253 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
254 void dpaa2_flow_clean(struct rte_eth_dev *dev);
255 uint16_t dpaa2_dev_tx_conf(void *queue) __rte_unused;
256 int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev);
258 int dpaa2_timesync_enable(struct rte_eth_dev *dev);
259 int dpaa2_timesync_disable(struct rte_eth_dev *dev);
260 int dpaa2_timesync_read_time(struct rte_eth_dev *dev,
261 struct timespec *timestamp);
262 int dpaa2_timesync_write_time(struct rte_eth_dev *dev,
263 const struct timespec *timestamp);
264 int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
265 int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
266 struct timespec *timestamp,
267 uint32_t flags __rte_unused);
268 int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
269 struct timespec *timestamp);
270 #endif /* _DPAA2_ETHDEV_H */