1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
7 #ifndef __ECORE_L2_API_H__
8 #define __ECORE_L2_API_H__
10 #include "ecore_status.h"
11 #include "ecore_sp_api.h"
12 #include "ecore_int_api.h"
14 #ifndef __EXTRACT__LINUX__
18 ECORE_RSS_IPV4_TCP = 0x4,
19 ECORE_RSS_IPV6_TCP = 0x8,
20 ECORE_RSS_IPV4_UDP = 0x10,
21 ECORE_RSS_IPV6_UDP = 0x20,
24 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
25 #define ECORE_RSS_IND_TABLE_SIZE 128
26 #define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
29 struct ecore_queue_start_common_params {
30 /* Should always be relative to entity sending this. */
34 /* Relative, but relevant only for PFs */
37 struct ecore_sb_info *p_sb;
41 struct ecore_rxq_start_ret_params {
42 void OSAL_IOMEM *p_prod;
46 struct ecore_txq_start_ret_params {
47 void OSAL_IOMEM *p_doorbell;
51 struct ecore_rss_params {
55 u8 update_rss_capabilities;
56 u8 update_rss_ind_table;
59 u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
61 /* Indirection table consist of rx queue handles */
62 void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
63 u32 rss_key[ECORE_RSS_KEY_SIZE];
66 struct ecore_sge_tpa_params {
67 u8 max_buffers_per_cqe;
72 u8 tpa_ipv4_tunn_en_flg;
73 u8 tpa_ipv6_tunn_en_flg;
75 u8 update_tpa_param_flg;
77 u8 tpa_hdr_data_split_flg;
78 u8 tpa_gro_consistent_flg;
81 u16 tpa_min_size_to_start;
82 u16 tpa_min_size_to_cont;
85 enum ecore_filter_opcode {
89 ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
90 ECORE_FILTER_FLUSH, /* Removes all filters */
93 enum ecore_filter_ucast_type {
96 ECORE_FILTER_MAC_VLAN,
97 ECORE_FILTER_INNER_MAC,
98 ECORE_FILTER_INNER_VLAN,
99 ECORE_FILTER_INNER_PAIR,
100 ECORE_FILTER_INNER_MAC_VNI_PAIR,
101 ECORE_FILTER_MAC_VNI_PAIR,
103 ECORE_FILTER_UNUSED, /* @DPDK */
106 struct ecore_filter_ucast {
107 enum ecore_filter_opcode opcode;
108 enum ecore_filter_ucast_type type;
112 u8 vport_to_remove_from;
113 unsigned char mac[ETH_ALEN];
119 struct ecore_filter_mcast {
120 /* MOVE is not supported for multicast */
121 enum ecore_filter_opcode opcode;
123 u8 vport_to_remove_from;
125 #define ECORE_MAX_MC_ADDRS 64
126 unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
129 struct ecore_filter_accept_flags {
130 u8 update_rx_mode_config;
131 u8 update_tx_mode_config;
134 #define ECORE_ACCEPT_NONE 0x01
135 #define ECORE_ACCEPT_UCAST_MATCHED 0x02
136 #define ECORE_ACCEPT_UCAST_UNMATCHED 0x04
137 #define ECORE_ACCEPT_MCAST_MATCHED 0x08
138 #define ECORE_ACCEPT_MCAST_UNMATCHED 0x10
139 #define ECORE_ACCEPT_BCAST 0x20
142 enum ecore_filter_config_mode {
143 ECORE_FILTER_CONFIG_MODE_DISABLE,
144 ECORE_FILTER_CONFIG_MODE_5_TUPLE,
145 ECORE_FILTER_CONFIG_MODE_L4_PORT,
146 ECORE_FILTER_CONFIG_MODE_IP_DEST,
147 ECORE_FILTER_CONFIG_MODE_TUNN_TYPE,
148 ECORE_FILTER_CONFIG_MODE_IP_SRC,
151 struct ecore_arfs_config_params {
156 enum ecore_filter_config_mode mode;
159 /* Add / remove / move / remove-all unicast MAC-VLAN filters.
160 * FW will assert in the following cases, so driver should take care...:
161 * 1. Adding a filter to a full table.
162 * 2. Adding a filter which already exists on that vport.
163 * 3. Removing a filter which doesn't exist.
167 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
168 struct ecore_filter_ucast *p_filter_cmd,
169 enum spq_mode comp_mode,
170 struct ecore_spq_comp_cb *p_comp_data);
172 /* Add / remove / move multicast MAC filters. */
174 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
175 struct ecore_filter_mcast *p_filter_cmd,
176 enum spq_mode comp_mode,
177 struct ecore_spq_comp_cb *p_comp_data);
179 /* Set "accept" filters */
181 ecore_filter_accept_cmd(
182 struct ecore_dev *p_dev,
184 struct ecore_filter_accept_flags accept_flags,
185 u8 update_accept_any_vlan,
187 enum spq_mode comp_mode,
188 struct ecore_spq_comp_cb *p_comp_data);
191 * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
193 * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
194 * the VPort ID is not currently initialized.
198 * @p_params Inputs; Relative for PF [SB being an exception]
199 * @param bd_max_bytes Maximum bytes that can be placed on a BD
200 * @param bd_chain_phys_addr Physical address of BDs for receive.
201 * @param cqe_pbl_addr Physical address of the CQE PBL Table.
202 * @param cqe_pbl_size Size of the CQE PBL Table
203 * @param p_ret_params Pointed struct to be filled with outputs.
205 * @return enum _ecore_status_t
208 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
210 struct ecore_queue_start_common_params *p_params,
212 dma_addr_t bd_chain_phys_addr,
213 dma_addr_t cqe_pbl_addr,
215 struct ecore_rxq_start_ret_params *p_ret_params);
218 * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
221 * @param p_rxq Handler of queue to close
222 * @param eq_completion_only If True completion will be on
223 * EQe, if False completion will be
224 * on EQe if p_hwfn opaque
225 * different from the RXQ opaque
227 * @param cqe_completion If True completion will be
229 * @return enum _ecore_status_t
232 ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
234 bool eq_completion_only,
235 bool cqe_completion);
238 * @brief - TX Queue Start Ramrod
240 * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
241 * the VPort is not currently initialized.
246 * @param tc traffic class to use with this L2 txq
247 * @param pbl_addr address of the pbl array
248 * @param pbl_size number of entries in pbl
249 * @param p_ret_params Pointer to fill the return parameters in.
251 * @return enum _ecore_status_t
254 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
256 struct ecore_queue_start_common_params *p_params,
260 struct ecore_txq_start_ret_params *p_ret_params);
263 * @brief ecore_eth_tx_queue_stop - closes a Tx queue
266 * @param p_txq - handle to Tx queue needed to be closed
268 * @return enum _ecore_status_t
270 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
273 enum ecore_tpa_mode {
280 struct ecore_sp_vport_start_params {
281 enum ecore_tpa_mode tpa_mode;
282 bool remove_inner_vlan; /* Inner VLAN removal is enabled */
283 bool tx_switching; /* Vport supports tx-switching */
284 bool handle_ptp_pkts; /* Handle PTP packets */
285 bool only_untagged; /* Untagged pkt control */
286 bool drop_ttl0; /* Drop packets with TTL = 0 */
287 u8 max_buffers_per_cqe;
290 u8 vport_id; /* VPORT ID */
291 u16 mtu; /* VPORT MTU */
292 bool zero_placement_offset;
296 /* Strict behavior on transmission errors */
297 bool b_err_illegal_vlan_mode;
298 bool b_err_illegal_inband_mode;
299 bool b_err_vlan_insert_with_inband;
300 bool b_err_small_pkt;
302 bool b_err_anti_spoof;
303 bool b_err_ctrl_frame;
307 * @brief ecore_sp_vport_start -
309 * This ramrod initializes a VPort. An Assert if generated if the Function ID
310 * of the VPort is not enabled.
313 * @param p_params VPORT start params
315 * @return enum _ecore_status_t
318 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
319 struct ecore_sp_vport_start_params *p_params);
321 struct ecore_sp_vport_update_params {
324 u8 update_vport_active_rx_flg;
325 u8 vport_active_rx_flg;
326 u8 update_vport_active_tx_flg;
327 u8 vport_active_tx_flg;
328 u8 update_inner_vlan_removal_flg;
329 u8 inner_vlan_removal_flg;
330 u8 silent_vlan_removal_flg;
331 u8 update_default_vlan_enable_flg;
332 u8 default_vlan_enable_flg;
333 u8 update_default_vlan_flg;
335 u8 update_tx_switching_flg;
337 u8 update_approx_mcast_flg;
338 u8 update_anti_spoofing_en_flg;
340 u8 update_accept_any_vlan_flg;
343 struct ecore_rss_params *rss_params;
344 struct ecore_filter_accept_flags accept_flags;
345 struct ecore_sge_tpa_params *sge_tpa_params;
346 /* MTU change - notice this requires the vport to be disabled.
347 * If non-zero, value would be used.
353 * @brief ecore_sp_vport_update -
355 * This ramrod updates the parameters of the VPort. Every field can be updated
356 * independently, according to flags.
358 * This ramrod is also used to set the VPort state to active after creation.
359 * An Assert is generated if the VPort does not contain an RX queue.
364 * @return enum _ecore_status_t
367 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
368 struct ecore_sp_vport_update_params *p_params,
369 enum spq_mode comp_mode,
370 struct ecore_spq_comp_cb *p_comp_data);
372 * @brief ecore_sp_vport_stop -
374 * This ramrod closes a VPort after all its RX and TX queues are terminated.
375 * An Assert is generated if any queues are left open.
379 * @param vport_id VPort ID
381 * @return enum _ecore_status_t
383 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
388 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
390 struct ecore_filter_ucast *p_filter_cmd,
391 enum spq_mode comp_mode,
392 struct ecore_spq_comp_cb *p_comp_data);
395 * @brief ecore_sp_rx_eth_queues_update -
397 * This ramrod updates an RX queue. It is used for setting the active state
398 * of the queue and updating the TPA and SGE parameters.
400 * @note Final phase API.
403 * @param pp_rxq_handlers An array of queue handlers to be updated.
404 * @param num_rxqs number of queues to update.
405 * @param complete_cqe_flg Post completion to the CQE Ring if set
406 * @param complete_event_flg Post completion to the Event Ring if set
410 * @return enum _ecore_status_t
414 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
415 void **pp_rxq_handlers,
418 u8 complete_event_flg,
419 enum spq_mode comp_mode,
420 struct ecore_spq_comp_cb *p_comp_data);
422 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
423 struct ecore_ptt *p_ptt,
424 struct ecore_eth_stats *stats,
425 u16 statistics_bin, bool b_get_port_stats);
427 void ecore_get_vport_stats(struct ecore_dev *p_dev,
428 struct ecore_eth_stats *stats);
430 void ecore_reset_vport_stats(struct ecore_dev *p_dev);
433 *@brief ecore_arfs_mode_configure -
435 *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
436 *and atleast one of ipv4 or ipv6 true to enable rfs mode.
440 *@param p_cfg_params arfs mode configuration parameters.
443 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
444 struct ecore_ptt *p_ptt,
445 struct ecore_arfs_config_params *p_cfg_params);
448 * @brief - ecore_configure_rfs_ntuple_filter
450 * This ramrod should be used to add or remove arfs hw filter
453 * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
454 * it with cookie and callback function address, if not
455 * using this mode then client must pass NULL.
456 * @params p_addr p_addr is an actual packet header that needs to be
457 * filter. It has to mapped with IO to read prior to
458 * calling this, [contains 4 tuples- src ip, dest ip,
459 * src port, dest port].
460 * @params length length of p_addr header up to past the transport header.
461 * @params qid receive packet will be directed to this queue.
463 * @params b_is_add flag to add or remove filter.
467 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
468 struct ecore_spq_comp_cb *p_cb,
469 dma_addr_t p_addr, u16 length,
470 u16 qid, u8 vport_id,