1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2021 NXP
8 #ifndef _DPAA2_ETHDEV_H
9 #define _DPAA2_ETHDEV_H
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_pmd_dpaa2.h>
14 #include <dpaa2_hw_pvt.h>
17 #include <mc/fsl_dpni.h>
18 #include <mc/fsl_mc_sys.h>
20 #define DPAA2_MIN_RX_BUF_SIZE 512
21 #define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/
22 #define NET_DPAA2_PMD_DRIVER_NAME net_dpaa2
24 #define MAX_TCS DPNI_MAX_TC
25 #define MAX_RX_QUEUES 128
26 #define MAX_TX_QUEUES 16
29 #define DPAA2_RX_DEFAULT_NBDESC 512
31 #define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \
32 RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
35 /*default tc to be used for ,congestion, distribution etc configuration. */
36 #define DPAA2_DEF_TC 0
38 /* Threshold for a Tx queue to *Enter* Congestion state.
40 #define CONG_ENTER_TX_THRESHOLD 512
42 /* Threshold for a queue to *Exit* Congestion state.
44 #define CONG_EXIT_TX_THRESHOLD 480
46 #define CONG_RETRY_COUNT 18000
48 /* RX queue tail drop threshold
49 * currently considering 64 KB packets
51 #define CONG_THRESHOLD_RX_BYTES_Q (64 * 1024)
52 #define CONG_RX_OAL 128
54 /* Size of the input SMMU mapped memory required by MC */
55 #define DIST_PARAM_IOVA_SIZE 256
57 /* Enable TX Congestion control support
60 #define DPAA2_TX_CGR_OFF 0x01
62 /* Disable RX tail drop, default is enable */
63 #define DPAA2_RX_TAILDROP_OFF 0x04
64 /* Tx confirmation enabled */
65 #define DPAA2_TX_CONF_ENABLE 0x08
67 #define DPAA2_RSS_OFFLOAD_ALL ( \
68 ETH_RSS_L2_PAYLOAD | \
75 /* LX2 FRC Parsed values (Little Endian) */
76 #define DPAA2_PKT_TYPE_ETHER 0x0060
77 #define DPAA2_PKT_TYPE_IPV4 0x0000
78 #define DPAA2_PKT_TYPE_IPV6 0x0020
79 #define DPAA2_PKT_TYPE_IPV4_EXT \
80 (0x0001 | DPAA2_PKT_TYPE_IPV4)
81 #define DPAA2_PKT_TYPE_IPV6_EXT \
82 (0x0001 | DPAA2_PKT_TYPE_IPV6)
83 #define DPAA2_PKT_TYPE_IPV4_TCP \
84 (0x000e | DPAA2_PKT_TYPE_IPV4)
85 #define DPAA2_PKT_TYPE_IPV6_TCP \
86 (0x000e | DPAA2_PKT_TYPE_IPV6)
87 #define DPAA2_PKT_TYPE_IPV4_UDP \
88 (0x0010 | DPAA2_PKT_TYPE_IPV4)
89 #define DPAA2_PKT_TYPE_IPV6_UDP \
90 (0x0010 | DPAA2_PKT_TYPE_IPV6)
91 #define DPAA2_PKT_TYPE_IPV4_SCTP \
92 (0x000f | DPAA2_PKT_TYPE_IPV4)
93 #define DPAA2_PKT_TYPE_IPV6_SCTP \
94 (0x000f | DPAA2_PKT_TYPE_IPV6)
95 #define DPAA2_PKT_TYPE_IPV4_ICMP \
96 (0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
97 #define DPAA2_PKT_TYPE_IPV6_ICMP \
98 (0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
99 #define DPAA2_PKT_TYPE_VLAN_1 0x0160
100 #define DPAA2_PKT_TYPE_VLAN_2 0x0260
102 /* enable timestamp in mbuf*/
103 extern bool dpaa2_enable_ts[];
104 extern uint64_t dpaa2_timestamp_rx_dynflag;
105 extern int dpaa2_timestamp_dynfield_offset;
107 #define DPAA2_QOS_TABLE_RECONFIGURE 1
108 #define DPAA2_FS_TABLE_RECONFIGURE 2
110 #define DPAA2_QOS_TABLE_IPADDR_EXTRACT 4
111 #define DPAA2_FS_TABLE_IPADDR_EXTRACT 8
113 #define DPAA2_FLOW_MAX_KEY_SIZE 16
115 /*Externaly defined*/
116 extern const struct rte_flow_ops dpaa2_flow_ops;
118 extern const struct rte_tm_ops dpaa2_tm_ops;
120 extern bool dpaa2_enable_err_queue;
122 #define IP_ADDRESS_OFFSET_INVALID (-1)
124 struct dpaa2_key_info {
125 uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS];
126 uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS];
127 /* Special for IP address. */
132 uint8_t key_total_size;
135 struct dpaa2_key_extract {
136 struct dpkg_profile_cfg dpkg;
137 struct dpaa2_key_info key_info;
141 struct dpaa2_key_extract qos_key_extract;
142 struct dpaa2_key_extract tc_key_extract[MAX_TCS];
143 uint64_t qos_extract_param;
144 uint64_t tc_extract_param[MAX_TCS];
147 struct dpaa2_dev_priv {
152 uint8_t nb_tx_queues;
153 uint8_t nb_rx_queues;
155 void *rx_vq[MAX_RX_QUEUES];
156 void *tx_vq[MAX_TX_QUEUES];
157 struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
158 void *tx_conf_vq[MAX_TX_QUEUES];
160 uint8_t flags; /*dpaa2 config flags */
161 uint8_t max_mac_filters;
162 uint8_t max_vlan_filters;
164 uint16_t qos_entries;
168 uint8_t en_loose_ordered;
170 uint8_t cgid_in_use[MAX_RX_QUEUES];
172 struct extract_s extract;
176 uint64_t ss_param_iova;
177 /*stores timestamp of last received packet on dev*/
178 uint64_t rx_timestamp;
179 /*stores timestamp of last received tx confirmation packet on dev*/
180 uint64_t tx_timestamp;
181 /* stores pointer to next tx_conf queue that should be processed,
182 * it corresponds to last packet transmitted
184 struct dpaa2_queue *next_tx_conf_queue;
186 struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
188 LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
189 LIST_HEAD(nodes, dpaa2_tm_node) nodes;
190 LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
193 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
194 struct dpkg_profile_cfg *kg_cfg);
196 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
197 uint64_t req_dist_set, int tc_index);
199 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
202 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
205 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
207 struct dpaa2_dpcon_dev *dpcon,
208 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
211 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
212 int eth_rx_queue_id);
214 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
216 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
219 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
221 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
222 const struct qbman_fd *fd,
223 const struct qbman_result *dq,
224 struct dpaa2_queue *rxq,
225 struct rte_event *ev);
226 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
227 const struct qbman_fd *fd,
228 const struct qbman_result *dq,
229 struct dpaa2_queue *rxq,
230 struct rte_event *ev);
231 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
232 const struct qbman_fd *fd,
233 const struct qbman_result *dq,
234 struct dpaa2_queue *rxq,
235 struct rte_event *ev);
236 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
237 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
239 uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
240 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
241 void dpaa2_flow_clean(struct rte_eth_dev *dev);
242 uint16_t dpaa2_dev_tx_conf(void *queue) __rte_unused;
243 int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev);
245 int dpaa2_timesync_enable(struct rte_eth_dev *dev);
246 int dpaa2_timesync_disable(struct rte_eth_dev *dev);
247 int dpaa2_timesync_read_time(struct rte_eth_dev *dev,
248 struct timespec *timestamp);
249 int dpaa2_timesync_write_time(struct rte_eth_dev *dev,
250 const struct timespec *timestamp);
251 int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
252 int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
253 struct timespec *timestamp,
254 uint32_t flags __rte_unused);
255 int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
256 struct timespec *timestamp);
257 #endif /* _DPAA2_ETHDEV_H */