1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
15 #include <rte_string_fns.h>
17 #include <rte_bus_pci.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_alarm.h>
26 #include <rte_eth_ctrl.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
30 #include "i40e_logs.h"
31 #include "base/i40e_prototype.h"
32 #include "base/i40e_adminq_cmd.h"
33 #include "base/i40e_type.h"
34 #include "base/i40e_register.h"
35 #include "base/i40e_dcb.h"
36 #include "base/i40e_diag.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
43 #define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
46 #define I40E_CLEAR_PXE_WAIT_MS 200
48 /* Maximun number of capability elements */
49 #define I40E_MAX_CAP_ELE_NUM 128
51 /* Wait count and interval */
52 #define I40E_CHK_Q_ENA_COUNT 1000
53 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
55 /* Maximun number of VSI */
56 #define I40E_MAX_NUM_VSIS (384UL)
58 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
60 /* Flow control default timer */
61 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
63 /* Flow control enable fwd bit */
64 #define I40E_PRTMAC_FWD_CTRL 0x00000001
66 /* Receive Packet Buffer size */
67 #define I40E_RXPBSIZE (968 * 1024)
70 #define I40E_KILOSHIFT 10
72 /* Flow control default high water */
73 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
75 /* Flow control default low water */
76 #define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT)
78 /* Receive Average Packet Size in Byte*/
79 #define I40E_PACKET_AVERAGE_SIZE 128
81 /* Mask of PF interrupt causes */
82 #define I40E_PFINT_ICR0_ENA_MASK ( \
83 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
84 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
85 I40E_PFINT_ICR0_ENA_GRST_MASK | \
86 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
87 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
88 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
89 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
90 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
91 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
93 #define I40E_FLOW_TYPES ( \
94 (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
95 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
96 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
97 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
98 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
99 (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
100 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
101 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
102 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
103 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
104 (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
106 /* Additional timesync values. */
107 #define I40E_PTP_40GB_INCVAL 0x0199999999ULL
108 #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
109 #define I40E_PTP_1GB_INCVAL 0x2000000000ULL
110 #define I40E_PRTTSYN_TSYNENA 0x80000000
111 #define I40E_PRTTSYN_TSYNTYPE 0x0e000000
112 #define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL
115 * Below are values for writing un-exposed registers suggested
118 /* Destination MAC address */
119 #define I40E_REG_INSET_L2_DMAC 0xE000000000000000ULL
120 /* Source MAC address */
121 #define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL
122 /* Outer (S-Tag) VLAN tag in the outer L2 header */
123 #define I40E_REG_INSET_L2_OUTER_VLAN 0x0000000004000000ULL
124 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
125 #define I40E_REG_INSET_L2_INNER_VLAN 0x0080000000000000ULL
126 /* Single VLAN tag in the inner L2 header */
127 #define I40E_REG_INSET_TUNNEL_VLAN 0x0100000000000000ULL
128 /* Source IPv4 address */
129 #define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL
130 /* Destination IPv4 address */
131 #define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL
132 /* Source IPv4 address for X722 */
133 #define I40E_X722_REG_INSET_L3_SRC_IP4 0x0006000000000000ULL
134 /* Destination IPv4 address for X722 */
135 #define I40E_X722_REG_INSET_L3_DST_IP4 0x0000060000000000ULL
136 /* IPv4 Protocol for X722 */
137 #define I40E_X722_REG_INSET_L3_IP4_PROTO 0x0010000000000000ULL
138 /* IPv4 Time to Live for X722 */
139 #define I40E_X722_REG_INSET_L3_IP4_TTL 0x0010000000000000ULL
140 /* IPv4 Type of Service (TOS) */
141 #define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL
143 #define I40E_REG_INSET_L3_IP4_PROTO 0x0004000000000000ULL
144 /* IPv4 Time to Live */
145 #define I40E_REG_INSET_L3_IP4_TTL 0x0004000000000000ULL
146 /* Source IPv6 address */
147 #define I40E_REG_INSET_L3_SRC_IP6 0x0007F80000000000ULL
148 /* Destination IPv6 address */
149 #define I40E_REG_INSET_L3_DST_IP6 0x000007F800000000ULL
150 /* IPv6 Traffic Class (TC) */
151 #define I40E_REG_INSET_L3_IP6_TC 0x0040000000000000ULL
152 /* IPv6 Next Header */
153 #define I40E_REG_INSET_L3_IP6_NEXT_HDR 0x0008000000000000ULL
155 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT 0x0008000000000000ULL
157 #define I40E_REG_INSET_L4_SRC_PORT 0x0000000400000000ULL
158 /* Destination L4 port */
159 #define I40E_REG_INSET_L4_DST_PORT 0x0000000200000000ULL
160 /* SCTP verification tag */
161 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG 0x0000000180000000ULL
162 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
163 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC 0x0000000001C00000ULL
164 /* Source port of tunneling UDP */
165 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT 0x0000000000200000ULL
166 /* Destination port of tunneling UDP */
167 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT 0x0000000000100000ULL
168 /* UDP Tunneling ID, NVGRE/GRE key */
169 #define I40E_REG_INSET_TUNNEL_ID 0x00000000000C0000ULL
170 /* Last ether type */
171 #define I40E_REG_INSET_LAST_ETHER_TYPE 0x0000000000004000ULL
172 /* Tunneling outer destination IPv4 address */
173 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4 0x00000000000000C0ULL
174 /* Tunneling outer destination IPv6 address */
175 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6 0x0000000000003FC0ULL
176 /* 1st word of flex payload */
177 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1 0x0000000000002000ULL
178 /* 2nd word of flex payload */
179 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2 0x0000000000001000ULL
180 /* 3rd word of flex payload */
181 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3 0x0000000000000800ULL
182 /* 4th word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4 0x0000000000000400ULL
184 /* 5th word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5 0x0000000000000200ULL
186 /* 6th word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6 0x0000000000000100ULL
188 /* 7th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7 0x0000000000000080ULL
190 /* 8th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8 0x0000000000000040ULL
192 /* all 8 words flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS 0x0000000000003FC0ULL
194 #define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL
196 #define I40E_TRANSLATE_INSET 0
197 #define I40E_TRANSLATE_REG 1
199 #define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL
200 #define I40E_INSET_IPv4_TTL_MASK 0x000D00FFUL
201 #define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL
202 #define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL
203 #define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
204 #define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
206 /* PCI offset for querying capability */
207 #define PCI_DEV_CAP_REG 0xA4
208 /* PCI offset for enabling/disabling Extended Tag */
209 #define PCI_DEV_CTRL_REG 0xA8
210 /* Bit mask of Extended Tag capability */
211 #define PCI_DEV_CAP_EXT_TAG_MASK 0x20
212 /* Bit shift of Extended Tag enable/disable */
213 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
214 /* Bit mask of Extended Tag enable/disable */
215 #define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
217 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
218 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
219 static int i40e_dev_configure(struct rte_eth_dev *dev);
220 static int i40e_dev_start(struct rte_eth_dev *dev);
221 static void i40e_dev_stop(struct rte_eth_dev *dev);
222 static void i40e_dev_close(struct rte_eth_dev *dev);
223 static int i40e_dev_reset(struct rte_eth_dev *dev);
224 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
225 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
226 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
227 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
228 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
229 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
230 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
231 struct rte_eth_stats *stats);
232 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
233 struct rte_eth_xstat *xstats, unsigned n);
234 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
235 struct rte_eth_xstat_name *xstats_names,
237 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
238 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
242 static int i40e_fw_version_get(struct rte_eth_dev *dev,
243 char *fw_version, size_t fw_size);
244 static void i40e_dev_info_get(struct rte_eth_dev *dev,
245 struct rte_eth_dev_info *dev_info);
246 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
249 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
250 enum rte_vlan_type vlan_type,
252 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
253 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
256 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
257 static int i40e_dev_led_on(struct rte_eth_dev *dev);
258 static int i40e_dev_led_off(struct rte_eth_dev *dev);
259 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
260 struct rte_eth_fc_conf *fc_conf);
261 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
262 struct rte_eth_fc_conf *fc_conf);
263 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
264 struct rte_eth_pfc_conf *pfc_conf);
265 static int i40e_macaddr_add(struct rte_eth_dev *dev,
266 struct ether_addr *mac_addr,
269 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
270 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
271 struct rte_eth_rss_reta_entry64 *reta_conf,
273 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
274 struct rte_eth_rss_reta_entry64 *reta_conf,
277 static int i40e_get_cap(struct i40e_hw *hw);
278 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
279 static int i40e_pf_setup(struct i40e_pf *pf);
280 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
281 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
282 static int i40e_dcb_setup(struct rte_eth_dev *dev);
283 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
284 bool offset_loaded, uint64_t *offset, uint64_t *stat);
285 static void i40e_stat_update_48(struct i40e_hw *hw,
291 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
292 static void i40e_dev_interrupt_handler(void *param);
293 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
294 uint32_t base, uint32_t num);
295 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
296 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
298 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
300 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
301 static int i40e_veb_release(struct i40e_veb *veb);
302 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
303 struct i40e_vsi *vsi);
304 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
305 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
306 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
307 struct i40e_macvlan_filter *mv_f,
310 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
311 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
312 struct rte_eth_rss_conf *rss_conf);
313 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
314 struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
316 struct rte_eth_udp_tunnel *udp_tunnel);
317 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
318 struct rte_eth_udp_tunnel *udp_tunnel);
319 static void i40e_filter_input_set_init(struct i40e_pf *pf);
320 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
321 enum rte_filter_op filter_op,
323 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
324 enum rte_filter_type filter_type,
325 enum rte_filter_op filter_op,
327 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
328 struct rte_eth_dcb_info *dcb_info);
329 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
330 static void i40e_configure_registers(struct i40e_hw *hw);
331 static void i40e_hw_init(struct rte_eth_dev *dev);
332 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
333 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
339 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
340 struct rte_eth_mirror_conf *mirror_conf,
341 uint8_t sw_id, uint8_t on);
342 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
344 static int i40e_timesync_enable(struct rte_eth_dev *dev);
345 static int i40e_timesync_disable(struct rte_eth_dev *dev);
346 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
347 struct timespec *timestamp,
349 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
350 struct timespec *timestamp);
351 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
353 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
355 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
356 struct timespec *timestamp);
357 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
358 const struct timespec *timestamp);
360 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
362 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
365 static int i40e_get_regs(struct rte_eth_dev *dev,
366 struct rte_dev_reg_info *regs);
368 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
370 static int i40e_get_eeprom(struct rte_eth_dev *dev,
371 struct rte_dev_eeprom_info *eeprom);
373 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
374 struct ether_addr *mac_addr);
376 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
378 static int i40e_ethertype_filter_convert(
379 const struct rte_eth_ethertype_filter *input,
380 struct i40e_ethertype_filter *filter);
381 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
382 struct i40e_ethertype_filter *filter);
384 static int i40e_tunnel_filter_convert(
385 struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
386 struct i40e_tunnel_filter *tunnel_filter);
387 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
388 struct i40e_tunnel_filter *tunnel_filter);
389 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
391 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
392 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
393 static void i40e_filter_restore(struct i40e_pf *pf);
394 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
396 int i40e_logtype_init;
397 int i40e_logtype_driver;
399 static const struct rte_pci_id pci_id_i40e_map[] = {
400 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
401 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
402 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
403 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
404 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
405 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
406 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
407 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
408 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
409 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
410 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
411 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
412 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
413 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
414 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
415 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
416 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
417 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
418 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
419 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
420 { .vendor_id = 0, /* sentinel */ },
423 static const struct eth_dev_ops i40e_eth_dev_ops = {
424 .dev_configure = i40e_dev_configure,
425 .dev_start = i40e_dev_start,
426 .dev_stop = i40e_dev_stop,
427 .dev_close = i40e_dev_close,
428 .dev_reset = i40e_dev_reset,
429 .promiscuous_enable = i40e_dev_promiscuous_enable,
430 .promiscuous_disable = i40e_dev_promiscuous_disable,
431 .allmulticast_enable = i40e_dev_allmulticast_enable,
432 .allmulticast_disable = i40e_dev_allmulticast_disable,
433 .dev_set_link_up = i40e_dev_set_link_up,
434 .dev_set_link_down = i40e_dev_set_link_down,
435 .link_update = i40e_dev_link_update,
436 .stats_get = i40e_dev_stats_get,
437 .xstats_get = i40e_dev_xstats_get,
438 .xstats_get_names = i40e_dev_xstats_get_names,
439 .stats_reset = i40e_dev_stats_reset,
440 .xstats_reset = i40e_dev_stats_reset,
441 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
442 .fw_version_get = i40e_fw_version_get,
443 .dev_infos_get = i40e_dev_info_get,
444 .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
445 .vlan_filter_set = i40e_vlan_filter_set,
446 .vlan_tpid_set = i40e_vlan_tpid_set,
447 .vlan_offload_set = i40e_vlan_offload_set,
448 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
449 .vlan_pvid_set = i40e_vlan_pvid_set,
450 .rx_queue_start = i40e_dev_rx_queue_start,
451 .rx_queue_stop = i40e_dev_rx_queue_stop,
452 .tx_queue_start = i40e_dev_tx_queue_start,
453 .tx_queue_stop = i40e_dev_tx_queue_stop,
454 .rx_queue_setup = i40e_dev_rx_queue_setup,
455 .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
456 .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
457 .rx_queue_release = i40e_dev_rx_queue_release,
458 .rx_queue_count = i40e_dev_rx_queue_count,
459 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
460 .rx_descriptor_status = i40e_dev_rx_descriptor_status,
461 .tx_descriptor_status = i40e_dev_tx_descriptor_status,
462 .tx_queue_setup = i40e_dev_tx_queue_setup,
463 .tx_queue_release = i40e_dev_tx_queue_release,
464 .dev_led_on = i40e_dev_led_on,
465 .dev_led_off = i40e_dev_led_off,
466 .flow_ctrl_get = i40e_flow_ctrl_get,
467 .flow_ctrl_set = i40e_flow_ctrl_set,
468 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
469 .mac_addr_add = i40e_macaddr_add,
470 .mac_addr_remove = i40e_macaddr_remove,
471 .reta_update = i40e_dev_rss_reta_update,
472 .reta_query = i40e_dev_rss_reta_query,
473 .rss_hash_update = i40e_dev_rss_hash_update,
474 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
475 .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add,
476 .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del,
477 .filter_ctrl = i40e_dev_filter_ctrl,
478 .rxq_info_get = i40e_rxq_info_get,
479 .txq_info_get = i40e_txq_info_get,
480 .mirror_rule_set = i40e_mirror_rule_set,
481 .mirror_rule_reset = i40e_mirror_rule_reset,
482 .timesync_enable = i40e_timesync_enable,
483 .timesync_disable = i40e_timesync_disable,
484 .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
485 .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp,
486 .get_dcb_info = i40e_dev_get_dcb_info,
487 .timesync_adjust_time = i40e_timesync_adjust_time,
488 .timesync_read_time = i40e_timesync_read_time,
489 .timesync_write_time = i40e_timesync_write_time,
490 .get_reg = i40e_get_regs,
491 .get_eeprom_length = i40e_get_eeprom_length,
492 .get_eeprom = i40e_get_eeprom,
493 .mac_addr_set = i40e_set_default_mac_addr,
494 .mtu_set = i40e_dev_mtu_set,
495 .tm_ops_get = i40e_tm_ops_get,
498 /* store statistics names and its offset in stats structure */
499 struct rte_i40e_xstats_name_off {
500 char name[RTE_ETH_XSTATS_NAME_SIZE];
504 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
505 {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
506 {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
507 {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
508 {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
509 {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
510 rx_unknown_protocol)},
511 {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
512 {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
513 {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
514 {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
517 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
518 sizeof(rte_i40e_stats_strings[0]))
520 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
521 {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
522 tx_dropped_link_down)},
523 {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
524 {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
526 {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
527 {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
529 {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
531 {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
533 {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
534 {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
535 {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
536 {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
537 {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
538 {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
540 {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
542 {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
544 {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
546 {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
548 {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
550 {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
552 {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
554 {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
555 mac_short_packet_dropped)},
556 {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
558 {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
559 {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
560 {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
562 {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
564 {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
566 {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
568 {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
570 {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
572 {"rx_flow_director_atr_match_packets",
573 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
574 {"rx_flow_director_sb_match_packets",
575 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
576 {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
578 {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
580 {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
582 {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
586 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
587 sizeof(rte_i40e_hw_port_strings[0]))
589 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
590 {"xon_packets", offsetof(struct i40e_hw_port_stats,
592 {"xoff_packets", offsetof(struct i40e_hw_port_stats,
596 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
597 sizeof(rte_i40e_rxq_prio_strings[0]))
599 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
600 {"xon_packets", offsetof(struct i40e_hw_port_stats,
602 {"xoff_packets", offsetof(struct i40e_hw_port_stats,
604 {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
605 priority_xon_2_xoff)},
608 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
609 sizeof(rte_i40e_txq_prio_strings[0]))
611 static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
612 struct rte_pci_device *pci_dev)
614 return rte_eth_dev_pci_generic_probe(pci_dev,
615 sizeof(struct i40e_adapter), eth_i40e_dev_init);
618 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
620 return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
623 static struct rte_pci_driver rte_i40e_pmd = {
624 .id_table = pci_id_i40e_map,
625 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
626 RTE_PCI_DRV_IOVA_AS_VA,
627 .probe = eth_i40e_pci_probe,
628 .remove = eth_i40e_pci_remove,
632 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
633 struct rte_eth_link *link)
635 struct rte_eth_link *dst = link;
636 struct rte_eth_link *src = &(dev->data->dev_link);
638 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
639 *(uint64_t *)src) == 0)
646 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
647 struct rte_eth_link *link)
649 struct rte_eth_link *dst = &(dev->data->dev_link);
650 struct rte_eth_link *src = link;
652 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
653 *(uint64_t *)src) == 0)
659 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
660 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
661 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
663 #ifndef I40E_GLQF_ORT
664 #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
666 #ifndef I40E_GLQF_PIT
667 #define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
669 #ifndef I40E_GLQF_L3_MAP
670 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
673 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
676 * Initialize registers for parsing packet type of QinQ
677 * This should be removed from code once proper
678 * configuration API is added to avoid configuration conflicts
679 * between ports of the same device.
681 I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
682 I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
685 #define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
688 * Add a ethertype filter to drop all flow control frames transmitted
692 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
694 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
695 uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
696 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
697 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
700 ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
701 I40E_FLOW_CONTROL_ETHERTYPE, flags,
702 pf->main_vsi_seid, 0,
706 "Failed to add filter to drop flow control frames from VSIs.");
710 floating_veb_list_handler(__rte_unused const char *key,
711 const char *floating_veb_value,
715 unsigned int count = 0;
718 bool *vf_floating_veb = opaque;
720 while (isblank(*floating_veb_value))
721 floating_veb_value++;
723 /* Reset floating VEB configuration for VFs */
724 for (idx = 0; idx < I40E_MAX_VF; idx++)
725 vf_floating_veb[idx] = false;
729 while (isblank(*floating_veb_value))
730 floating_veb_value++;
731 if (*floating_veb_value == '\0')
734 idx = strtoul(floating_veb_value, &end, 10);
735 if (errno || end == NULL)
737 while (isblank(*end))
741 } else if ((*end == ';') || (*end == '\0')) {
743 if (min == I40E_MAX_VF)
745 if (max >= I40E_MAX_VF)
746 max = I40E_MAX_VF - 1;
747 for (idx = min; idx <= max; idx++) {
748 vf_floating_veb[idx] = true;
755 floating_veb_value = end + 1;
756 } while (*end != '\0');
765 config_vf_floating_veb(struct rte_devargs *devargs,
766 uint16_t floating_veb,
767 bool *vf_floating_veb)
769 struct rte_kvargs *kvlist;
771 const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
775 /* All the VFs attach to the floating VEB by default
776 * when the floating VEB is enabled.
778 for (i = 0; i < I40E_MAX_VF; i++)
779 vf_floating_veb[i] = true;
784 kvlist = rte_kvargs_parse(devargs->args, NULL);
788 if (!rte_kvargs_count(kvlist, floating_veb_list)) {
789 rte_kvargs_free(kvlist);
792 /* When the floating_veb_list parameter exists, all the VFs
793 * will attach to the legacy VEB firstly, then configure VFs
794 * to the floating VEB according to the floating_veb_list.
796 if (rte_kvargs_process(kvlist, floating_veb_list,
797 floating_veb_list_handler,
798 vf_floating_veb) < 0) {
799 rte_kvargs_free(kvlist);
802 rte_kvargs_free(kvlist);
806 i40e_check_floating_handler(__rte_unused const char *key,
808 __rte_unused void *opaque)
810 if (strcmp(value, "1"))
817 is_floating_veb_supported(struct rte_devargs *devargs)
819 struct rte_kvargs *kvlist;
820 const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
825 kvlist = rte_kvargs_parse(devargs->args, NULL);
829 if (!rte_kvargs_count(kvlist, floating_veb_key)) {
830 rte_kvargs_free(kvlist);
833 /* Floating VEB is enabled when there's key-value:
834 * enable_floating_veb=1
836 if (rte_kvargs_process(kvlist, floating_veb_key,
837 i40e_check_floating_handler, NULL) < 0) {
838 rte_kvargs_free(kvlist);
841 rte_kvargs_free(kvlist);
847 config_floating_veb(struct rte_eth_dev *dev)
849 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
850 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
851 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
853 memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
855 if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
857 is_floating_veb_supported(pci_dev->device.devargs);
858 config_vf_floating_veb(pci_dev->device.devargs,
860 pf->floating_veb_list);
862 pf->floating_veb = false;
866 #define I40E_L2_TAGS_S_TAG_SHIFT 1
867 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
870 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
872 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
873 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
874 char ethertype_hash_name[RTE_HASH_NAMESIZE];
877 struct rte_hash_parameters ethertype_hash_params = {
878 .name = ethertype_hash_name,
879 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
880 .key_len = sizeof(struct i40e_ethertype_filter_input),
881 .hash_func = rte_hash_crc,
882 .hash_func_init_val = 0,
883 .socket_id = rte_socket_id(),
886 /* Initialize ethertype filter rule list and hash */
887 TAILQ_INIT(ðertype_rule->ethertype_list);
888 snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
889 "ethertype_%s", dev->device->name);
890 ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params);
891 if (!ethertype_rule->hash_table) {
892 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
895 ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
896 sizeof(struct i40e_ethertype_filter *) *
897 I40E_MAX_ETHERTYPE_FILTER_NUM,
899 if (!ethertype_rule->hash_map) {
901 "Failed to allocate memory for ethertype hash map!");
903 goto err_ethertype_hash_map_alloc;
908 err_ethertype_hash_map_alloc:
909 rte_hash_free(ethertype_rule->hash_table);
915 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
917 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
918 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
919 char tunnel_hash_name[RTE_HASH_NAMESIZE];
922 struct rte_hash_parameters tunnel_hash_params = {
923 .name = tunnel_hash_name,
924 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
925 .key_len = sizeof(struct i40e_tunnel_filter_input),
926 .hash_func = rte_hash_crc,
927 .hash_func_init_val = 0,
928 .socket_id = rte_socket_id(),
931 /* Initialize tunnel filter rule list and hash */
932 TAILQ_INIT(&tunnel_rule->tunnel_list);
933 snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
934 "tunnel_%s", dev->device->name);
935 tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
936 if (!tunnel_rule->hash_table) {
937 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
940 tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
941 sizeof(struct i40e_tunnel_filter *) *
942 I40E_MAX_TUNNEL_FILTER_NUM,
944 if (!tunnel_rule->hash_map) {
946 "Failed to allocate memory for tunnel hash map!");
948 goto err_tunnel_hash_map_alloc;
953 err_tunnel_hash_map_alloc:
954 rte_hash_free(tunnel_rule->hash_table);
960 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
962 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
963 struct i40e_fdir_info *fdir_info = &pf->fdir;
964 char fdir_hash_name[RTE_HASH_NAMESIZE];
967 struct rte_hash_parameters fdir_hash_params = {
968 .name = fdir_hash_name,
969 .entries = I40E_MAX_FDIR_FILTER_NUM,
970 .key_len = sizeof(struct i40e_fdir_input),
971 .hash_func = rte_hash_crc,
972 .hash_func_init_val = 0,
973 .socket_id = rte_socket_id(),
976 /* Initialize flow director filter rule list and hash */
977 TAILQ_INIT(&fdir_info->fdir_list);
978 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
979 "fdir_%s", dev->device->name);
980 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
981 if (!fdir_info->hash_table) {
982 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
985 fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
986 sizeof(struct i40e_fdir_filter *) *
987 I40E_MAX_FDIR_FILTER_NUM,
989 if (!fdir_info->hash_map) {
991 "Failed to allocate memory for fdir hash map!");
993 goto err_fdir_hash_map_alloc;
997 err_fdir_hash_map_alloc:
998 rte_hash_free(fdir_info->hash_table);
1004 i40e_init_customized_info(struct i40e_pf *pf)
1008 /* Initialize customized pctype */
1009 for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1010 pf->customized_pctype[i].index = i;
1011 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1012 pf->customized_pctype[i].valid = false;
1015 pf->gtp_support = false;
1019 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1021 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1022 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1023 struct i40e_queue_regions *info = &pf->queue_region;
1026 for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1027 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1029 memset(info, 0, sizeof(struct i40e_queue_regions));
1033 eth_i40e_dev_init(struct rte_eth_dev *dev)
1035 struct rte_pci_device *pci_dev;
1036 struct rte_intr_handle *intr_handle;
1037 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1038 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1039 struct i40e_vsi *vsi;
1042 uint8_t aq_fail = 0;
1044 PMD_INIT_FUNC_TRACE();
1046 dev->dev_ops = &i40e_eth_dev_ops;
1047 dev->rx_pkt_burst = i40e_recv_pkts;
1048 dev->tx_pkt_burst = i40e_xmit_pkts;
1049 dev->tx_pkt_prepare = i40e_prep_pkts;
1051 /* for secondary processes, we don't initialise any further as primary
1052 * has already done this work. Only check we don't need a different
1054 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1055 i40e_set_rx_function(dev);
1056 i40e_set_tx_function(dev);
1059 i40e_set_default_ptype_table(dev);
1060 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1061 intr_handle = &pci_dev->intr_handle;
1063 rte_eth_copy_pci_info(dev, pci_dev);
1065 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1066 pf->adapter->eth_dev = dev;
1067 pf->dev_data = dev->data;
1069 hw->back = I40E_PF_TO_ADAPTER(pf);
1070 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1073 "Hardware is not available, as address is NULL");
1077 hw->vendor_id = pci_dev->id.vendor_id;
1078 hw->device_id = pci_dev->id.device_id;
1079 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1080 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1081 hw->bus.device = pci_dev->addr.devid;
1082 hw->bus.func = pci_dev->addr.function;
1083 hw->adapter_stopped = 0;
1085 /* Make sure all is clean before doing PF reset */
1088 /* Initialize the hardware */
1091 /* Reset here to make sure all is clean for each PF */
1092 ret = i40e_pf_reset(hw);
1094 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1098 /* Initialize the shared code (base driver) */
1099 ret = i40e_init_shared_code(hw);
1101 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1105 i40e_set_default_pctype_table(dev);
1108 * To work around the NVM issue, initialize registers
1109 * for packet type of QinQ by software.
1110 * It should be removed once issues are fixed in NVM.
1112 i40e_GLQF_reg_init(hw);
1114 /* Initialize the input set for filters (hash and fd) to default value */
1115 i40e_filter_input_set_init(pf);
1117 /* Initialize the parameters for adminq */
1118 i40e_init_adminq_parameter(hw);
1119 ret = i40e_init_adminq(hw);
1120 if (ret != I40E_SUCCESS) {
1121 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1124 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1125 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1126 hw->aq.api_maj_ver, hw->aq.api_min_ver,
1127 ((hw->nvm.version >> 12) & 0xf),
1128 ((hw->nvm.version >> 4) & 0xff),
1129 (hw->nvm.version & 0xf), hw->nvm.eetrack);
1131 /* initialise the L3_MAP register */
1132 ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1135 PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret);
1137 /* Need the special FW version to support floating VEB */
1138 config_floating_veb(dev);
1139 /* Clear PXE mode */
1140 i40e_clear_pxe_mode(hw);
1141 i40e_dev_sync_phy_type(hw);
1144 * On X710, performance number is far from the expectation on recent
1145 * firmware versions. The fix for this issue may not be integrated in
1146 * the following firmware version. So the workaround in software driver
1147 * is needed. It needs to modify the initial values of 3 internal only
1148 * registers. Note that the workaround can be removed when it is fixed
1149 * in firmware in the future.
1151 i40e_configure_registers(hw);
1153 /* Get hw capabilities */
1154 ret = i40e_get_cap(hw);
1155 if (ret != I40E_SUCCESS) {
1156 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1157 goto err_get_capabilities;
1160 /* Initialize parameters for PF */
1161 ret = i40e_pf_parameter_init(dev);
1163 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1164 goto err_parameter_init;
1167 /* Initialize the queue management */
1168 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1170 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1171 goto err_qp_pool_init;
1173 ret = i40e_res_pool_init(&pf->msix_pool, 1,
1174 hw->func_caps.num_msix_vectors - 1);
1176 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1177 goto err_msix_pool_init;
1180 /* Initialize lan hmc */
1181 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1182 hw->func_caps.num_rx_qp, 0, 0);
1183 if (ret != I40E_SUCCESS) {
1184 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1185 goto err_init_lan_hmc;
1188 /* Configure lan hmc */
1189 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1190 if (ret != I40E_SUCCESS) {
1191 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1192 goto err_configure_lan_hmc;
1195 /* Get and check the mac address */
1196 i40e_get_mac_addr(hw, hw->mac.addr);
1197 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1198 PMD_INIT_LOG(ERR, "mac address is not valid");
1200 goto err_get_mac_addr;
1202 /* Copy the permanent MAC address */
1203 ether_addr_copy((struct ether_addr *) hw->mac.addr,
1204 (struct ether_addr *) hw->mac.perm_addr);
1206 /* Disable flow control */
1207 hw->fc.requested_mode = I40E_FC_NONE;
1208 i40e_set_fc(hw, &aq_fail, TRUE);
1210 /* Set the global registers with default ether type value */
1211 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
1212 if (ret != I40E_SUCCESS) {
1214 "Failed to set the default outer VLAN ether type");
1215 goto err_setup_pf_switch;
1218 /* PF setup, which includes VSI setup */
1219 ret = i40e_pf_setup(pf);
1221 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1222 goto err_setup_pf_switch;
1225 /* reset all stats of the device, including pf and main vsi */
1226 i40e_dev_stats_reset(dev);
1230 /* Disable double vlan by default */
1231 i40e_vsi_config_double_vlan(vsi, FALSE);
1233 /* Disable S-TAG identification when floating_veb is disabled */
1234 if (!pf->floating_veb) {
1235 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1236 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1237 ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1238 I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1242 if (!vsi->max_macaddrs)
1243 len = ETHER_ADDR_LEN;
1245 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1247 /* Should be after VSI initialized */
1248 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1249 if (!dev->data->mac_addrs) {
1251 "Failed to allocated memory for storing mac address");
1254 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1255 &dev->data->mac_addrs[0]);
1257 /* Init dcb to sw mode by default */
1258 ret = i40e_dcb_init_configure(dev, TRUE);
1259 if (ret != I40E_SUCCESS) {
1260 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1261 pf->flags &= ~I40E_FLAG_DCB;
1263 /* Update HW struct after DCB configuration */
1266 /* initialize pf host driver to setup SRIOV resource if applicable */
1267 i40e_pf_host_init(dev);
1269 /* register callback func to eal lib */
1270 rte_intr_callback_register(intr_handle,
1271 i40e_dev_interrupt_handler, dev);
1273 /* configure and enable device interrupt */
1274 i40e_pf_config_irq0(hw, TRUE);
1275 i40e_pf_enable_irq0(hw);
1277 /* enable uio intr after callback register */
1278 rte_intr_enable(intr_handle);
1280 /* By default disable flexible payload in global configuration */
1281 i40e_flex_payload_reg_set_default(hw);
1284 * Add an ethertype filter to drop all flow control frames transmitted
1285 * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1288 i40e_add_tx_flow_control_drop_filter(pf);
1290 /* Set the max frame size to 0x2600 by default,
1291 * in case other drivers changed the default value.
1293 i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1295 /* initialize mirror rule list */
1296 TAILQ_INIT(&pf->mirror_list);
1298 /* initialize Traffic Manager configuration */
1299 i40e_tm_conf_init(dev);
1301 /* Initialize customized information */
1302 i40e_init_customized_info(pf);
1304 ret = i40e_init_ethtype_filter_list(dev);
1306 goto err_init_ethtype_filter_list;
1307 ret = i40e_init_tunnel_filter_list(dev);
1309 goto err_init_tunnel_filter_list;
1310 ret = i40e_init_fdir_filter_list(dev);
1312 goto err_init_fdir_filter_list;
1314 /* initialize queue region configuration */
1315 i40e_init_queue_region_conf(dev);
1317 /* initialize rss configuration from rte_flow */
1318 memset(&pf->rss_info, 0,
1319 sizeof(struct i40e_rte_flow_rss_conf));
1323 err_init_fdir_filter_list:
1324 rte_free(pf->tunnel.hash_table);
1325 rte_free(pf->tunnel.hash_map);
1326 err_init_tunnel_filter_list:
1327 rte_free(pf->ethertype.hash_table);
1328 rte_free(pf->ethertype.hash_map);
1329 err_init_ethtype_filter_list:
1330 rte_free(dev->data->mac_addrs);
1332 i40e_vsi_release(pf->main_vsi);
1333 err_setup_pf_switch:
1335 err_configure_lan_hmc:
1336 (void)i40e_shutdown_lan_hmc(hw);
1338 i40e_res_pool_destroy(&pf->msix_pool);
1340 i40e_res_pool_destroy(&pf->qp_pool);
1343 err_get_capabilities:
1344 (void)i40e_shutdown_adminq(hw);
1350 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1352 struct i40e_ethertype_filter *p_ethertype;
1353 struct i40e_ethertype_rule *ethertype_rule;
1355 ethertype_rule = &pf->ethertype;
1356 /* Remove all ethertype filter rules and hash */
1357 if (ethertype_rule->hash_map)
1358 rte_free(ethertype_rule->hash_map);
1359 if (ethertype_rule->hash_table)
1360 rte_hash_free(ethertype_rule->hash_table);
1362 while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
1363 TAILQ_REMOVE(ðertype_rule->ethertype_list,
1364 p_ethertype, rules);
1365 rte_free(p_ethertype);
1370 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1372 struct i40e_tunnel_filter *p_tunnel;
1373 struct i40e_tunnel_rule *tunnel_rule;
1375 tunnel_rule = &pf->tunnel;
1376 /* Remove all tunnel director rules and hash */
1377 if (tunnel_rule->hash_map)
1378 rte_free(tunnel_rule->hash_map);
1379 if (tunnel_rule->hash_table)
1380 rte_hash_free(tunnel_rule->hash_table);
1382 while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1383 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1389 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1391 struct i40e_fdir_filter *p_fdir;
1392 struct i40e_fdir_info *fdir_info;
1394 fdir_info = &pf->fdir;
1395 /* Remove all flow director rules and hash */
1396 if (fdir_info->hash_map)
1397 rte_free(fdir_info->hash_map);
1398 if (fdir_info->hash_table)
1399 rte_hash_free(fdir_info->hash_table);
1401 while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1402 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1407 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1410 * Disable by default flexible payload
1411 * for corresponding L2/L3/L4 layers.
1413 I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1414 I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1415 I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1419 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1422 struct rte_pci_device *pci_dev;
1423 struct rte_intr_handle *intr_handle;
1425 struct i40e_filter_control_settings settings;
1426 struct rte_flow *p_flow;
1428 uint8_t aq_fail = 0;
1430 PMD_INIT_FUNC_TRACE();
1432 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1435 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1436 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1437 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1438 intr_handle = &pci_dev->intr_handle;
1440 if (hw->adapter_stopped == 0)
1441 i40e_dev_close(dev);
1443 dev->dev_ops = NULL;
1444 dev->rx_pkt_burst = NULL;
1445 dev->tx_pkt_burst = NULL;
1447 /* Clear PXE mode */
1448 i40e_clear_pxe_mode(hw);
1450 /* Unconfigure filter control */
1451 memset(&settings, 0, sizeof(settings));
1452 ret = i40e_set_filter_control(hw, &settings);
1454 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1457 /* Disable flow control */
1458 hw->fc.requested_mode = I40E_FC_NONE;
1459 i40e_set_fc(hw, &aq_fail, TRUE);
1461 /* uninitialize pf host driver */
1462 i40e_pf_host_uninit(dev);
1464 rte_free(dev->data->mac_addrs);
1465 dev->data->mac_addrs = NULL;
1467 /* disable uio intr before callback unregister */
1468 rte_intr_disable(intr_handle);
1470 /* register callback func to eal lib */
1471 rte_intr_callback_unregister(intr_handle,
1472 i40e_dev_interrupt_handler, dev);
1474 i40e_rm_ethtype_filter_list(pf);
1475 i40e_rm_tunnel_filter_list(pf);
1476 i40e_rm_fdir_filter_list(pf);
1478 /* Remove all flows */
1479 while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1480 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1484 /* Remove all Traffic Manager configuration */
1485 i40e_tm_conf_uninit(dev);
1491 i40e_dev_configure(struct rte_eth_dev *dev)
1493 struct i40e_adapter *ad =
1494 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1495 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1496 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1497 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1500 ret = i40e_dev_sync_phy_type(hw);
1504 /* Initialize to TRUE. If any of Rx queues doesn't meet the
1505 * bulk allocation or vector Rx preconditions we will reset it.
1507 ad->rx_bulk_alloc_allowed = true;
1508 ad->rx_vec_allowed = true;
1509 ad->tx_simple_allowed = true;
1510 ad->tx_vec_allowed = true;
1512 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1513 ret = i40e_fdir_setup(pf);
1514 if (ret != I40E_SUCCESS) {
1515 PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1518 ret = i40e_fdir_configure(dev);
1520 PMD_DRV_LOG(ERR, "failed to configure fdir.");
1524 i40e_fdir_teardown(pf);
1526 ret = i40e_dev_init_vlan(dev);
1531 * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1532 * RSS setting have different requirements.
1533 * General PMD driver call sequence are NIC init, configure,
1534 * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1535 * will try to lookup the VSI that specific queue belongs to if VMDQ
1536 * applicable. So, VMDQ setting has to be done before
1537 * rx/tx_queue_setup(). This function is good to place vmdq_setup.
1538 * For RSS setting, it will try to calculate actual configured RX queue
1539 * number, which will be available after rx_queue_setup(). dev_start()
1540 * function is good to place RSS setup.
1542 if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1543 ret = i40e_vmdq_setup(dev);
1548 if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1549 ret = i40e_dcb_setup(dev);
1551 PMD_DRV_LOG(ERR, "failed to configure DCB.");
1556 TAILQ_INIT(&pf->flow_list);
1561 /* need to release vmdq resource if exists */
1562 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1563 i40e_vsi_release(pf->vmdq[i].vsi);
1564 pf->vmdq[i].vsi = NULL;
1569 /* need to release fdir resource if exists */
1570 i40e_fdir_teardown(pf);
1575 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1577 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1578 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1579 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1580 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1581 uint16_t msix_vect = vsi->msix_intr;
1584 for (i = 0; i < vsi->nb_qps; i++) {
1585 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1586 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1590 if (vsi->type != I40E_VSI_SRIOV) {
1591 if (!rte_intr_allow_others(intr_handle)) {
1592 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1593 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1595 I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1598 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1599 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1601 I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1606 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1607 vsi->user_param + (msix_vect - 1);
1609 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1610 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1612 I40E_WRITE_FLUSH(hw);
1616 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1617 int base_queue, int nb_queue,
1622 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1624 /* Bind all RX queues to allocated MSIX interrupt */
1625 for (i = 0; i < nb_queue; i++) {
1626 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1627 itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1628 ((base_queue + i + 1) <<
1629 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1630 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1631 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1633 if (i == nb_queue - 1)
1634 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1635 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1638 /* Write first RX queue to Link list register as the head element */
1639 if (vsi->type != I40E_VSI_SRIOV) {
1641 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1);
1643 if (msix_vect == I40E_MISC_VEC_ID) {
1644 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1646 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1648 I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1650 I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1653 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1655 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1657 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1659 I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1666 if (msix_vect == I40E_MISC_VEC_ID) {
1668 I40E_VPINT_LNKLST0(vsi->user_param),
1670 I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1672 I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1674 /* num_msix_vectors_vf needs to minus irq0 */
1675 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1676 vsi->user_param + (msix_vect - 1);
1678 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1680 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1682 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1686 I40E_WRITE_FLUSH(hw);
1690 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1692 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1693 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1694 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1695 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1696 uint16_t msix_vect = vsi->msix_intr;
1697 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1698 uint16_t queue_idx = 0;
1703 for (i = 0; i < vsi->nb_qps; i++) {
1704 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1705 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1708 /* INTENA flag is not auto-cleared for interrupt */
1709 val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1710 val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1711 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1712 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1713 I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1715 /* VF bind interrupt */
1716 if (vsi->type == I40E_VSI_SRIOV) {
1717 __vsi_queues_bind_intr(vsi, msix_vect,
1718 vsi->base_queue, vsi->nb_qps,
1723 /* PF & VMDq bind interrupt */
1724 if (rte_intr_dp_is_en(intr_handle)) {
1725 if (vsi->type == I40E_VSI_MAIN) {
1728 } else if (vsi->type == I40E_VSI_VMDQ2) {
1729 struct i40e_vsi *main_vsi =
1730 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1731 queue_idx = vsi->base_queue - main_vsi->nb_qps;
1736 for (i = 0; i < vsi->nb_used_qps; i++) {
1738 if (!rte_intr_allow_others(intr_handle))
1739 /* allow to share MISC_VEC_ID */
1740 msix_vect = I40E_MISC_VEC_ID;
1742 /* no enough msix_vect, map all to one */
1743 __vsi_queues_bind_intr(vsi, msix_vect,
1744 vsi->base_queue + i,
1745 vsi->nb_used_qps - i,
1747 for (; !!record && i < vsi->nb_used_qps; i++)
1748 intr_handle->intr_vec[queue_idx + i] =
1752 /* 1:1 queue/msix_vect mapping */
1753 __vsi_queues_bind_intr(vsi, msix_vect,
1754 vsi->base_queue + i, 1,
1757 intr_handle->intr_vec[queue_idx + i] = msix_vect;
1765 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1767 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1768 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1769 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1770 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1771 uint16_t interval = i40e_calc_itr_interval(\
1772 RTE_LIBRTE_I40E_ITR_INTERVAL, 1);
1773 uint16_t msix_intr, i;
1775 if (rte_intr_allow_others(intr_handle))
1776 for (i = 0; i < vsi->nb_msix; i++) {
1777 msix_intr = vsi->msix_intr + i;
1778 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1779 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1780 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1781 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1783 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1786 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1787 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1788 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1789 (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1791 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1793 I40E_WRITE_FLUSH(hw);
1797 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1799 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1800 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1801 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1802 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1803 uint16_t msix_intr, i;
1805 if (rte_intr_allow_others(intr_handle))
1806 for (i = 0; i < vsi->nb_msix; i++) {
1807 msix_intr = vsi->msix_intr + i;
1808 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1812 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1814 I40E_WRITE_FLUSH(hw);
1817 static inline uint8_t
1818 i40e_parse_link_speeds(uint16_t link_speeds)
1820 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1822 if (link_speeds & ETH_LINK_SPEED_40G)
1823 link_speed |= I40E_LINK_SPEED_40GB;
1824 if (link_speeds & ETH_LINK_SPEED_25G)
1825 link_speed |= I40E_LINK_SPEED_25GB;
1826 if (link_speeds & ETH_LINK_SPEED_20G)
1827 link_speed |= I40E_LINK_SPEED_20GB;
1828 if (link_speeds & ETH_LINK_SPEED_10G)
1829 link_speed |= I40E_LINK_SPEED_10GB;
1830 if (link_speeds & ETH_LINK_SPEED_1G)
1831 link_speed |= I40E_LINK_SPEED_1GB;
1832 if (link_speeds & ETH_LINK_SPEED_100M)
1833 link_speed |= I40E_LINK_SPEED_100MB;
1839 i40e_phy_conf_link(struct i40e_hw *hw,
1841 uint8_t force_speed,
1844 enum i40e_status_code status;
1845 struct i40e_aq_get_phy_abilities_resp phy_ab;
1846 struct i40e_aq_set_phy_config phy_conf;
1847 enum i40e_aq_phy_type cnt;
1848 uint32_t phy_type_mask = 0;
1850 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1851 I40E_AQ_PHY_FLAG_PAUSE_RX |
1852 I40E_AQ_PHY_FLAG_PAUSE_RX |
1853 I40E_AQ_PHY_FLAG_LOW_POWER;
1854 const uint8_t advt = I40E_LINK_SPEED_40GB |
1855 I40E_LINK_SPEED_25GB |
1856 I40E_LINK_SPEED_10GB |
1857 I40E_LINK_SPEED_1GB |
1858 I40E_LINK_SPEED_100MB;
1862 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1867 /* If link already up, no need to set up again */
1868 if (is_up && phy_ab.phy_type != 0)
1869 return I40E_SUCCESS;
1871 memset(&phy_conf, 0, sizeof(phy_conf));
1873 /* bits 0-2 use the values from get_phy_abilities_resp */
1875 abilities |= phy_ab.abilities & mask;
1877 /* update ablities and speed */
1878 if (abilities & I40E_AQ_PHY_AN_ENABLED)
1879 phy_conf.link_speed = advt;
1881 phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
1883 phy_conf.abilities = abilities;
1887 /* To enable link, phy_type mask needs to include each type */
1888 for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
1889 phy_type_mask |= 1 << cnt;
1891 /* use get_phy_abilities_resp value for the rest */
1892 phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
1893 phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
1894 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
1895 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
1896 phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
1897 phy_conf.eee_capability = phy_ab.eee_capability;
1898 phy_conf.eeer = phy_ab.eeer_val;
1899 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1901 PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1902 phy_ab.abilities, phy_ab.link_speed);
1903 PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
1904 phy_conf.abilities, phy_conf.link_speed);
1906 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1910 return I40E_SUCCESS;
1914 i40e_apply_link_speed(struct rte_eth_dev *dev)
1917 uint8_t abilities = 0;
1918 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1919 struct rte_eth_conf *conf = &dev->data->dev_conf;
1921 speed = i40e_parse_link_speeds(conf->link_speeds);
1922 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1923 if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
1924 abilities |= I40E_AQ_PHY_AN_ENABLED;
1925 abilities |= I40E_AQ_PHY_LINK_ENABLED;
1927 return i40e_phy_conf_link(hw, abilities, speed, true);
1931 i40e_dev_start(struct rte_eth_dev *dev)
1933 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1934 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1935 struct i40e_vsi *main_vsi = pf->main_vsi;
1937 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1938 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1939 uint32_t intr_vector = 0;
1940 struct i40e_vsi *vsi;
1942 hw->adapter_stopped = 0;
1944 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1946 "Invalid link_speeds for port %u, autonegotiation disabled",
1947 dev->data->port_id);
1951 rte_intr_disable(intr_handle);
1953 if ((rte_intr_cap_multiple(intr_handle) ||
1954 !RTE_ETH_DEV_SRIOV(dev).active) &&
1955 dev->data->dev_conf.intr_conf.rxq != 0) {
1956 intr_vector = dev->data->nb_rx_queues;
1957 ret = rte_intr_efd_enable(intr_handle, intr_vector);
1962 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1963 intr_handle->intr_vec =
1964 rte_zmalloc("intr_vec",
1965 dev->data->nb_rx_queues * sizeof(int),
1967 if (!intr_handle->intr_vec) {
1969 "Failed to allocate %d rx_queues intr_vec",
1970 dev->data->nb_rx_queues);
1975 /* Initialize VSI */
1976 ret = i40e_dev_rxtx_init(pf);
1977 if (ret != I40E_SUCCESS) {
1978 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1982 /* Map queues with MSIX interrupt */
1983 main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1984 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1985 i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
1986 i40e_vsi_enable_queues_intr(main_vsi);
1988 /* Map VMDQ VSI queues with MSIX interrupt */
1989 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1990 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1991 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
1992 I40E_ITR_INDEX_DEFAULT);
1993 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1996 /* enable FDIR MSIX interrupt */
1997 if (pf->fdir.fdir_vsi) {
1998 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
1999 I40E_ITR_INDEX_NONE);
2000 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2003 /* Enable all queues which have been configured */
2004 ret = i40e_dev_switch_queues(pf, TRUE);
2006 if (ret != I40E_SUCCESS) {
2007 PMD_DRV_LOG(ERR, "Failed to enable VSI");
2011 /* Enable receiving broadcast packets */
2012 ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2013 if (ret != I40E_SUCCESS)
2014 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2016 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2017 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2019 if (ret != I40E_SUCCESS)
2020 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2023 /* Enable the VLAN promiscuous mode. */
2025 for (i = 0; i < pf->vf_num; i++) {
2026 vsi = pf->vfs[i].vsi;
2027 i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2032 /* Enable mac loopback mode */
2033 if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2034 dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2035 ret = i40e_diag_set_loopback(hw, dev->data->dev_conf.lpbk_mode);
2036 if (ret != I40E_SUCCESS) {
2037 PMD_DRV_LOG(ERR, "fail to set loopback link");
2042 /* Apply link configure */
2043 if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
2044 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
2045 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
2046 ETH_LINK_SPEED_40G)) {
2047 PMD_DRV_LOG(ERR, "Invalid link setting");
2050 ret = i40e_apply_link_speed(dev);
2051 if (I40E_SUCCESS != ret) {
2052 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2056 if (!rte_intr_allow_others(intr_handle)) {
2057 rte_intr_callback_unregister(intr_handle,
2058 i40e_dev_interrupt_handler,
2060 /* configure and enable device interrupt */
2061 i40e_pf_config_irq0(hw, FALSE);
2062 i40e_pf_enable_irq0(hw);
2064 if (dev->data->dev_conf.intr_conf.lsc != 0)
2066 "lsc won't enable because of no intr multiplex");
2068 ret = i40e_aq_set_phy_int_mask(hw,
2069 ~(I40E_AQ_EVENT_LINK_UPDOWN |
2070 I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2071 I40E_AQ_EVENT_MEDIA_NA), NULL);
2072 if (ret != I40E_SUCCESS)
2073 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2075 /* Call get_link_info aq commond to enable/disable LSE */
2076 i40e_dev_link_update(dev, 0);
2079 /* enable uio intr after callback register */
2080 rte_intr_enable(intr_handle);
2082 i40e_filter_restore(pf);
2084 if (pf->tm_conf.root && !pf->tm_conf.committed)
2085 PMD_DRV_LOG(WARNING,
2086 "please call hierarchy_commit() "
2087 "before starting the port");
2089 return I40E_SUCCESS;
2092 i40e_dev_switch_queues(pf, FALSE);
2093 i40e_dev_clear_queues(dev);
2099 i40e_dev_stop(struct rte_eth_dev *dev)
2101 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2102 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2103 struct i40e_vsi *main_vsi = pf->main_vsi;
2104 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2105 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2108 if (hw->adapter_stopped == 1)
2110 /* Disable all queues */
2111 i40e_dev_switch_queues(pf, FALSE);
2113 /* un-map queues with interrupt registers */
2114 i40e_vsi_disable_queues_intr(main_vsi);
2115 i40e_vsi_queues_unbind_intr(main_vsi);
2117 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2118 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2119 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2122 if (pf->fdir.fdir_vsi) {
2123 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2124 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2126 /* Clear all queues and release memory */
2127 i40e_dev_clear_queues(dev);
2130 i40e_dev_set_link_down(dev);
2132 if (!rte_intr_allow_others(intr_handle))
2133 /* resume to the default handler */
2134 rte_intr_callback_register(intr_handle,
2135 i40e_dev_interrupt_handler,
2138 /* Clean datapath event and queue/vec mapping */
2139 rte_intr_efd_disable(intr_handle);
2140 if (intr_handle->intr_vec) {
2141 rte_free(intr_handle->intr_vec);
2142 intr_handle->intr_vec = NULL;
2145 /* reset hierarchy commit */
2146 pf->tm_conf.committed = false;
2148 hw->adapter_stopped = 1;
2152 i40e_dev_close(struct rte_eth_dev *dev)
2154 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2155 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2156 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2157 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2158 struct i40e_mirror_rule *p_mirror;
2163 PMD_INIT_FUNC_TRACE();
2167 /* Remove all mirror rules */
2168 while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2169 ret = i40e_aq_del_mirror_rule(hw,
2170 pf->main_vsi->veb->seid,
2171 p_mirror->rule_type,
2173 p_mirror->num_entries,
2176 PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2177 "status = %d, aq_err = %d.", ret,
2178 hw->aq.asq_last_status);
2180 /* remove mirror software resource anyway */
2181 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2183 pf->nb_mirror_rule--;
2186 i40e_dev_free_queues(dev);
2188 /* Disable interrupt */
2189 i40e_pf_disable_irq0(hw);
2190 rte_intr_disable(intr_handle);
2192 /* shutdown and destroy the HMC */
2193 i40e_shutdown_lan_hmc(hw);
2195 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2196 i40e_vsi_release(pf->vmdq[i].vsi);
2197 pf->vmdq[i].vsi = NULL;
2202 /* release all the existing VSIs and VEBs */
2203 i40e_fdir_teardown(pf);
2204 i40e_vsi_release(pf->main_vsi);
2206 /* shutdown the adminq */
2207 i40e_aq_queue_shutdown(hw, true);
2208 i40e_shutdown_adminq(hw);
2210 i40e_res_pool_destroy(&pf->qp_pool);
2211 i40e_res_pool_destroy(&pf->msix_pool);
2213 /* Disable flexible payload in global configuration */
2214 i40e_flex_payload_reg_set_default(hw);
2216 /* force a PF reset to clean anything leftover */
2217 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2218 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2219 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2220 I40E_WRITE_FLUSH(hw);
2224 * Reset PF device only to re-initialize resources in PMD layer
2227 i40e_dev_reset(struct rte_eth_dev *dev)
2231 /* When a DPDK PMD PF begin to reset PF port, it should notify all
2232 * its VF to make them align with it. The detailed notification
2233 * mechanism is PMD specific. As to i40e PF, it is rather complex.
2234 * To avoid unexpected behavior in VF, currently reset of PF with
2235 * SR-IOV activation is not supported. It might be supported later.
2237 if (dev->data->sriov.active)
2240 ret = eth_i40e_dev_uninit(dev);
2244 ret = eth_i40e_dev_init(dev);
2250 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2252 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2253 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2254 struct i40e_vsi *vsi = pf->main_vsi;
2257 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2259 if (status != I40E_SUCCESS)
2260 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2262 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2264 if (status != I40E_SUCCESS)
2265 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2270 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2272 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2273 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2274 struct i40e_vsi *vsi = pf->main_vsi;
2277 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2279 if (status != I40E_SUCCESS)
2280 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2282 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2284 if (status != I40E_SUCCESS)
2285 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2289 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2291 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2292 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2293 struct i40e_vsi *vsi = pf->main_vsi;
2296 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2297 if (ret != I40E_SUCCESS)
2298 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2302 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2304 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2305 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2306 struct i40e_vsi *vsi = pf->main_vsi;
2309 if (dev->data->promiscuous == 1)
2310 return; /* must remain in all_multicast mode */
2312 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2313 vsi->seid, FALSE, NULL);
2314 if (ret != I40E_SUCCESS)
2315 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2319 * Set device link up.
2322 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2324 /* re-apply link speed setting */
2325 return i40e_apply_link_speed(dev);
2329 * Set device link down.
2332 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2334 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2335 uint8_t abilities = 0;
2336 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2338 abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2339 return i40e_phy_conf_link(hw, abilities, speed, false);
2343 i40e_dev_link_update(struct rte_eth_dev *dev,
2344 int wait_to_complete)
2346 #define CHECK_INTERVAL 100 /* 100ms */
2347 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
2348 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2349 struct i40e_link_status link_status;
2350 struct rte_eth_link link, old;
2352 unsigned rep_cnt = MAX_REPEAT_TIME;
2353 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2355 memset(&link, 0, sizeof(link));
2356 memset(&old, 0, sizeof(old));
2357 memset(&link_status, 0, sizeof(link_status));
2358 rte_i40e_dev_atomic_read_link_status(dev, &old);
2361 /* Get link status information from hardware */
2362 status = i40e_aq_get_link_info(hw, enable_lse,
2363 &link_status, NULL);
2364 if (status != I40E_SUCCESS) {
2365 link.link_speed = ETH_SPEED_NUM_100M;
2366 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2367 PMD_DRV_LOG(ERR, "Failed to get link info");
2371 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
2372 if (!wait_to_complete || link.link_status)
2375 rte_delay_ms(CHECK_INTERVAL);
2376 } while (--rep_cnt);
2378 if (!link.link_status)
2381 /* i40e uses full duplex only */
2382 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2384 /* Parse the link status */
2385 switch (link_status.link_speed) {
2386 case I40E_LINK_SPEED_100MB:
2387 link.link_speed = ETH_SPEED_NUM_100M;
2389 case I40E_LINK_SPEED_1GB:
2390 link.link_speed = ETH_SPEED_NUM_1G;
2392 case I40E_LINK_SPEED_10GB:
2393 link.link_speed = ETH_SPEED_NUM_10G;
2395 case I40E_LINK_SPEED_20GB:
2396 link.link_speed = ETH_SPEED_NUM_20G;
2398 case I40E_LINK_SPEED_25GB:
2399 link.link_speed = ETH_SPEED_NUM_25G;
2401 case I40E_LINK_SPEED_40GB:
2402 link.link_speed = ETH_SPEED_NUM_40G;
2405 link.link_speed = ETH_SPEED_NUM_100M;
2409 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2410 ETH_LINK_SPEED_FIXED);
2413 rte_i40e_dev_atomic_write_link_status(dev, &link);
2414 if (link.link_status == old.link_status)
2417 i40e_notify_all_vfs_link_status(dev);
2422 /* Get all the statistics of a VSI */
2424 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2426 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2427 struct i40e_eth_stats *nes = &vsi->eth_stats;
2428 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2429 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2431 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2432 vsi->offset_loaded, &oes->rx_bytes,
2434 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2435 vsi->offset_loaded, &oes->rx_unicast,
2437 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2438 vsi->offset_loaded, &oes->rx_multicast,
2439 &nes->rx_multicast);
2440 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2441 vsi->offset_loaded, &oes->rx_broadcast,
2442 &nes->rx_broadcast);
2443 /* exclude CRC bytes */
2444 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2445 nes->rx_broadcast) * ETHER_CRC_LEN;
2447 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2448 &oes->rx_discards, &nes->rx_discards);
2449 /* GLV_REPC not supported */
2450 /* GLV_RMPC not supported */
2451 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2452 &oes->rx_unknown_protocol,
2453 &nes->rx_unknown_protocol);
2454 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2455 vsi->offset_loaded, &oes->tx_bytes,
2457 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2458 vsi->offset_loaded, &oes->tx_unicast,
2460 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2461 vsi->offset_loaded, &oes->tx_multicast,
2462 &nes->tx_multicast);
2463 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2464 vsi->offset_loaded, &oes->tx_broadcast,
2465 &nes->tx_broadcast);
2466 /* GLV_TDPC not supported */
2467 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2468 &oes->tx_errors, &nes->tx_errors);
2469 vsi->offset_loaded = true;
2471 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2473 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
2474 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
2475 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
2476 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
2477 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
2478 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2479 nes->rx_unknown_protocol);
2480 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
2481 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
2482 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
2483 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
2484 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
2485 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
2486 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2491 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2494 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2495 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2497 /* Get rx/tx bytes of internal transfer packets */
2498 i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2499 I40E_GLV_GORCL(hw->port),
2501 &pf->internal_stats_offset.rx_bytes,
2502 &pf->internal_stats.rx_bytes);
2504 i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2505 I40E_GLV_GOTCL(hw->port),
2507 &pf->internal_stats_offset.tx_bytes,
2508 &pf->internal_stats.tx_bytes);
2509 /* Get total internal rx packet count */
2510 i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2511 I40E_GLV_UPRCL(hw->port),
2513 &pf->internal_stats_offset.rx_unicast,
2514 &pf->internal_stats.rx_unicast);
2515 i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2516 I40E_GLV_MPRCL(hw->port),
2518 &pf->internal_stats_offset.rx_multicast,
2519 &pf->internal_stats.rx_multicast);
2520 i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2521 I40E_GLV_BPRCL(hw->port),
2523 &pf->internal_stats_offset.rx_broadcast,
2524 &pf->internal_stats.rx_broadcast);
2525 /* Get total internal tx packet count */
2526 i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2527 I40E_GLV_UPTCL(hw->port),
2529 &pf->internal_stats_offset.tx_unicast,
2530 &pf->internal_stats.tx_unicast);
2531 i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2532 I40E_GLV_MPTCL(hw->port),
2534 &pf->internal_stats_offset.tx_multicast,
2535 &pf->internal_stats.tx_multicast);
2536 i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2537 I40E_GLV_BPTCL(hw->port),
2539 &pf->internal_stats_offset.tx_broadcast,
2540 &pf->internal_stats.tx_broadcast);
2542 /* exclude CRC size */
2543 pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2544 pf->internal_stats.rx_multicast +
2545 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2547 /* Get statistics of struct i40e_eth_stats */
2548 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2549 I40E_GLPRT_GORCL(hw->port),
2550 pf->offset_loaded, &os->eth.rx_bytes,
2552 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2553 I40E_GLPRT_UPRCL(hw->port),
2554 pf->offset_loaded, &os->eth.rx_unicast,
2555 &ns->eth.rx_unicast);
2556 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2557 I40E_GLPRT_MPRCL(hw->port),
2558 pf->offset_loaded, &os->eth.rx_multicast,
2559 &ns->eth.rx_multicast);
2560 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2561 I40E_GLPRT_BPRCL(hw->port),
2562 pf->offset_loaded, &os->eth.rx_broadcast,
2563 &ns->eth.rx_broadcast);
2564 /* Workaround: CRC size should not be included in byte statistics,
2565 * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2567 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2568 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2570 /* exclude internal rx bytes
2571 * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2572 * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2574 * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2576 if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2577 ns->eth.rx_bytes = 0;
2579 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2581 if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2582 ns->eth.rx_unicast = 0;
2584 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2586 if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2587 ns->eth.rx_multicast = 0;
2589 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2591 if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2592 ns->eth.rx_broadcast = 0;
2594 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2596 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2597 pf->offset_loaded, &os->eth.rx_discards,
2598 &ns->eth.rx_discards);
2599 /* GLPRT_REPC not supported */
2600 /* GLPRT_RMPC not supported */
2601 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2603 &os->eth.rx_unknown_protocol,
2604 &ns->eth.rx_unknown_protocol);
2605 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2606 I40E_GLPRT_GOTCL(hw->port),
2607 pf->offset_loaded, &os->eth.tx_bytes,
2609 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2610 I40E_GLPRT_UPTCL(hw->port),
2611 pf->offset_loaded, &os->eth.tx_unicast,
2612 &ns->eth.tx_unicast);
2613 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2614 I40E_GLPRT_MPTCL(hw->port),
2615 pf->offset_loaded, &os->eth.tx_multicast,
2616 &ns->eth.tx_multicast);
2617 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2618 I40E_GLPRT_BPTCL(hw->port),
2619 pf->offset_loaded, &os->eth.tx_broadcast,
2620 &ns->eth.tx_broadcast);
2621 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2622 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2624 /* exclude internal tx bytes
2625 * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
2626 * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
2628 * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
2630 if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2631 ns->eth.tx_bytes = 0;
2633 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2635 if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
2636 ns->eth.tx_unicast = 0;
2638 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
2640 if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
2641 ns->eth.tx_multicast = 0;
2643 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
2645 if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
2646 ns->eth.tx_broadcast = 0;
2648 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
2650 /* GLPRT_TEPC not supported */
2652 /* additional port specific stats */
2653 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2654 pf->offset_loaded, &os->tx_dropped_link_down,
2655 &ns->tx_dropped_link_down);
2656 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2657 pf->offset_loaded, &os->crc_errors,
2659 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2660 pf->offset_loaded, &os->illegal_bytes,
2661 &ns->illegal_bytes);
2662 /* GLPRT_ERRBC not supported */
2663 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2664 pf->offset_loaded, &os->mac_local_faults,
2665 &ns->mac_local_faults);
2666 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2667 pf->offset_loaded, &os->mac_remote_faults,
2668 &ns->mac_remote_faults);
2669 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2670 pf->offset_loaded, &os->rx_length_errors,
2671 &ns->rx_length_errors);
2672 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2673 pf->offset_loaded, &os->link_xon_rx,
2675 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2676 pf->offset_loaded, &os->link_xoff_rx,
2678 for (i = 0; i < 8; i++) {
2679 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2681 &os->priority_xon_rx[i],
2682 &ns->priority_xon_rx[i]);
2683 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2685 &os->priority_xoff_rx[i],
2686 &ns->priority_xoff_rx[i]);
2688 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2689 pf->offset_loaded, &os->link_xon_tx,
2691 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2692 pf->offset_loaded, &os->link_xoff_tx,
2694 for (i = 0; i < 8; i++) {
2695 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2697 &os->priority_xon_tx[i],
2698 &ns->priority_xon_tx[i]);
2699 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2701 &os->priority_xoff_tx[i],
2702 &ns->priority_xoff_tx[i]);
2703 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2705 &os->priority_xon_2_xoff[i],
2706 &ns->priority_xon_2_xoff[i]);
2708 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2709 I40E_GLPRT_PRC64L(hw->port),
2710 pf->offset_loaded, &os->rx_size_64,
2712 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2713 I40E_GLPRT_PRC127L(hw->port),
2714 pf->offset_loaded, &os->rx_size_127,
2716 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2717 I40E_GLPRT_PRC255L(hw->port),
2718 pf->offset_loaded, &os->rx_size_255,
2720 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2721 I40E_GLPRT_PRC511L(hw->port),
2722 pf->offset_loaded, &os->rx_size_511,
2724 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2725 I40E_GLPRT_PRC1023L(hw->port),
2726 pf->offset_loaded, &os->rx_size_1023,
2728 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2729 I40E_GLPRT_PRC1522L(hw->port),
2730 pf->offset_loaded, &os->rx_size_1522,
2732 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2733 I40E_GLPRT_PRC9522L(hw->port),
2734 pf->offset_loaded, &os->rx_size_big,
2736 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2737 pf->offset_loaded, &os->rx_undersize,
2739 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2740 pf->offset_loaded, &os->rx_fragments,
2742 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2743 pf->offset_loaded, &os->rx_oversize,
2745 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2746 pf->offset_loaded, &os->rx_jabber,
2748 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2749 I40E_GLPRT_PTC64L(hw->port),
2750 pf->offset_loaded, &os->tx_size_64,
2752 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2753 I40E_GLPRT_PTC127L(hw->port),
2754 pf->offset_loaded, &os->tx_size_127,
2756 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2757 I40E_GLPRT_PTC255L(hw->port),
2758 pf->offset_loaded, &os->tx_size_255,
2760 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2761 I40E_GLPRT_PTC511L(hw->port),
2762 pf->offset_loaded, &os->tx_size_511,
2764 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2765 I40E_GLPRT_PTC1023L(hw->port),
2766 pf->offset_loaded, &os->tx_size_1023,
2768 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2769 I40E_GLPRT_PTC1522L(hw->port),
2770 pf->offset_loaded, &os->tx_size_1522,
2772 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2773 I40E_GLPRT_PTC9522L(hw->port),
2774 pf->offset_loaded, &os->tx_size_big,
2776 i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2778 &os->fd_sb_match, &ns->fd_sb_match);
2779 /* GLPRT_MSPDC not supported */
2780 /* GLPRT_XEC not supported */
2782 pf->offset_loaded = true;
2785 i40e_update_vsi_stats(pf->main_vsi);
2788 /* Get all statistics of a port */
2790 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2792 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2793 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2794 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2797 /* call read registers - updates values, now write them to struct */
2798 i40e_read_stats_registers(pf, hw);
2800 stats->ipackets = ns->eth.rx_unicast +
2801 ns->eth.rx_multicast +
2802 ns->eth.rx_broadcast -
2803 ns->eth.rx_discards -
2804 pf->main_vsi->eth_stats.rx_discards;
2805 stats->opackets = ns->eth.tx_unicast +
2806 ns->eth.tx_multicast +
2807 ns->eth.tx_broadcast;
2808 stats->ibytes = ns->eth.rx_bytes;
2809 stats->obytes = ns->eth.tx_bytes;
2810 stats->oerrors = ns->eth.tx_errors +
2811 pf->main_vsi->eth_stats.tx_errors;
2814 stats->imissed = ns->eth.rx_discards +
2815 pf->main_vsi->eth_stats.rx_discards;
2816 stats->ierrors = ns->crc_errors +
2817 ns->rx_length_errors + ns->rx_undersize +
2818 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2820 PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2821 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
2822 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
2823 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast);
2824 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast);
2825 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards);
2826 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2827 ns->eth.rx_unknown_protocol);
2828 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
2829 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
2830 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast);
2831 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast);
2832 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards);
2833 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
2835 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
2836 ns->tx_dropped_link_down);
2837 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
2838 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
2840 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
2841 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
2842 ns->mac_local_faults);
2843 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
2844 ns->mac_remote_faults);
2845 PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"",
2846 ns->rx_length_errors);
2847 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
2848 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
2849 for (i = 0; i < 8; i++) {
2850 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"",
2851 i, ns->priority_xon_rx[i]);
2852 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"",
2853 i, ns->priority_xoff_rx[i]);
2855 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
2856 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
2857 for (i = 0; i < 8; i++) {
2858 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"",
2859 i, ns->priority_xon_tx[i]);
2860 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"",
2861 i, ns->priority_xoff_tx[i]);
2862 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"",
2863 i, ns->priority_xon_2_xoff[i]);
2865 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
2866 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
2867 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
2868 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
2869 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
2870 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
2871 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
2872 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
2873 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
2874 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
2875 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
2876 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
2877 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
2878 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
2879 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
2880 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
2881 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
2882 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
2883 PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2884 ns->mac_short_packet_dropped);
2885 PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"",
2886 ns->checksum_error);
2887 PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
2888 PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2892 /* Reset the statistics */
2894 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2896 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2897 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2899 /* Mark PF and VSI stats to update the offset, aka "reset" */
2900 pf->offset_loaded = false;
2902 pf->main_vsi->offset_loaded = false;
2904 /* read the stats, reading current register values into offset */
2905 i40e_read_stats_registers(pf, hw);
2909 i40e_xstats_calc_num(void)
2911 return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2912 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
2913 (I40E_NB_TXQ_PRIO_XSTATS * 8);
2916 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2917 struct rte_eth_xstat_name *xstats_names,
2918 __rte_unused unsigned limit)
2923 if (xstats_names == NULL)
2924 return i40e_xstats_calc_num();
2926 /* Note: limit checked in rte_eth_xstats_names() */
2928 /* Get stats from i40e_eth_stats struct */
2929 for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2930 snprintf(xstats_names[count].name,
2931 sizeof(xstats_names[count].name),
2932 "%s", rte_i40e_stats_strings[i].name);
2936 /* Get individiual stats from i40e_hw_port struct */
2937 for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2938 snprintf(xstats_names[count].name,
2939 sizeof(xstats_names[count].name),
2940 "%s", rte_i40e_hw_port_strings[i].name);
2944 for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2945 for (prio = 0; prio < 8; prio++) {
2946 snprintf(xstats_names[count].name,
2947 sizeof(xstats_names[count].name),
2948 "rx_priority%u_%s", prio,
2949 rte_i40e_rxq_prio_strings[i].name);
2954 for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2955 for (prio = 0; prio < 8; prio++) {
2956 snprintf(xstats_names[count].name,
2957 sizeof(xstats_names[count].name),
2958 "tx_priority%u_%s", prio,
2959 rte_i40e_txq_prio_strings[i].name);
2967 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2970 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2971 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2972 unsigned i, count, prio;
2973 struct i40e_hw_port_stats *hw_stats = &pf->stats;
2975 count = i40e_xstats_calc_num();
2979 i40e_read_stats_registers(pf, hw);
2986 /* Get stats from i40e_eth_stats struct */
2987 for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2988 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2989 rte_i40e_stats_strings[i].offset);
2990 xstats[count].id = count;
2994 /* Get individiual stats from i40e_hw_port struct */
2995 for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2996 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2997 rte_i40e_hw_port_strings[i].offset);
2998 xstats[count].id = count;
3002 for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3003 for (prio = 0; prio < 8; prio++) {
3004 xstats[count].value =
3005 *(uint64_t *)(((char *)hw_stats) +
3006 rte_i40e_rxq_prio_strings[i].offset +
3007 (sizeof(uint64_t) * prio));
3008 xstats[count].id = count;
3013 for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3014 for (prio = 0; prio < 8; prio++) {
3015 xstats[count].value =
3016 *(uint64_t *)(((char *)hw_stats) +
3017 rte_i40e_txq_prio_strings[i].offset +
3018 (sizeof(uint64_t) * prio));
3019 xstats[count].id = count;
3028 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3029 __rte_unused uint16_t queue_id,
3030 __rte_unused uint8_t stat_idx,
3031 __rte_unused uint8_t is_rx)
3033 PMD_INIT_FUNC_TRACE();
3039 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3041 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3047 full_ver = hw->nvm.oem_ver;
3048 ver = (u8)(full_ver >> 24);
3049 build = (u16)((full_ver >> 8) & 0xffff);
3050 patch = (u8)(full_ver & 0xff);
3052 ret = snprintf(fw_version, fw_size,
3053 "%d.%d%d 0x%08x %d.%d.%d",
3054 ((hw->nvm.version >> 12) & 0xf),
3055 ((hw->nvm.version >> 4) & 0xff),
3056 (hw->nvm.version & 0xf), hw->nvm.eetrack,
3059 ret += 1; /* add the size of '\0' */
3060 if (fw_size < (u32)ret)
3067 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3069 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3070 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3071 struct i40e_vsi *vsi = pf->main_vsi;
3072 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3074 dev_info->pci_dev = pci_dev;
3075 dev_info->max_rx_queues = vsi->nb_qps;
3076 dev_info->max_tx_queues = vsi->nb_qps;
3077 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3078 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3079 dev_info->max_mac_addrs = vsi->max_macaddrs;
3080 dev_info->max_vfs = pci_dev->max_vfs;
3081 dev_info->rx_offload_capa =
3082 DEV_RX_OFFLOAD_VLAN_STRIP |
3083 DEV_RX_OFFLOAD_QINQ_STRIP |
3084 DEV_RX_OFFLOAD_IPV4_CKSUM |
3085 DEV_RX_OFFLOAD_UDP_CKSUM |
3086 DEV_RX_OFFLOAD_TCP_CKSUM |
3087 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
3088 dev_info->tx_offload_capa =
3089 DEV_TX_OFFLOAD_VLAN_INSERT |
3090 DEV_TX_OFFLOAD_QINQ_INSERT |
3091 DEV_TX_OFFLOAD_IPV4_CKSUM |
3092 DEV_TX_OFFLOAD_UDP_CKSUM |
3093 DEV_TX_OFFLOAD_TCP_CKSUM |
3094 DEV_TX_OFFLOAD_SCTP_CKSUM |
3095 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3096 DEV_TX_OFFLOAD_TCP_TSO |
3097 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3098 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3099 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3100 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
3101 dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3103 dev_info->reta_size = pf->hash_lut_size;
3104 dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3106 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3108 .pthresh = I40E_DEFAULT_RX_PTHRESH,
3109 .hthresh = I40E_DEFAULT_RX_HTHRESH,
3110 .wthresh = I40E_DEFAULT_RX_WTHRESH,
3112 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3116 dev_info->default_txconf = (struct rte_eth_txconf) {
3118 .pthresh = I40E_DEFAULT_TX_PTHRESH,
3119 .hthresh = I40E_DEFAULT_TX_HTHRESH,
3120 .wthresh = I40E_DEFAULT_TX_WTHRESH,
3122 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3123 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3124 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3125 ETH_TXQ_FLAGS_NOOFFLOADS,
3128 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3129 .nb_max = I40E_MAX_RING_DESC,
3130 .nb_min = I40E_MIN_RING_DESC,
3131 .nb_align = I40E_ALIGN_RING_DESC,
3134 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3135 .nb_max = I40E_MAX_RING_DESC,
3136 .nb_min = I40E_MIN_RING_DESC,
3137 .nb_align = I40E_ALIGN_RING_DESC,
3138 .nb_seg_max = I40E_TX_MAX_SEG,
3139 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3142 if (pf->flags & I40E_FLAG_VMDQ) {
3143 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3144 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3145 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3146 pf->max_nb_vmdq_vsi;
3147 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3148 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3149 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3152 if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
3154 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3155 else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
3157 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3160 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3164 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3166 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3167 struct i40e_vsi *vsi = pf->main_vsi;
3168 PMD_INIT_FUNC_TRACE();
3171 return i40e_vsi_add_vlan(vsi, vlan_id);
3173 return i40e_vsi_delete_vlan(vsi, vlan_id);
3177 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3178 enum rte_vlan_type vlan_type,
3179 uint16_t tpid, int qinq)
3181 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3184 uint16_t reg_id = 3;
3188 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3192 ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3194 if (ret != I40E_SUCCESS) {
3196 "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3201 "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3204 reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3205 reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3206 if (reg_r == reg_w) {
3207 PMD_DRV_LOG(DEBUG, "No need to write");
3211 ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3213 if (ret != I40E_SUCCESS) {
3215 "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3220 "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
3227 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3228 enum rte_vlan_type vlan_type,
3231 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3232 int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
3235 if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3236 vlan_type != ETH_VLAN_TYPE_OUTER) ||
3237 (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3239 "Unsupported vlan type.");
3242 /* 802.1ad frames ability is added in NVM API 1.7*/
3243 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3245 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3246 hw->first_tag = rte_cpu_to_le_16(tpid);
3247 else if (vlan_type == ETH_VLAN_TYPE_INNER)
3248 hw->second_tag = rte_cpu_to_le_16(tpid);
3250 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3251 hw->second_tag = rte_cpu_to_le_16(tpid);
3253 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3254 if (ret != I40E_SUCCESS) {
3256 "Set switch config failed aq_err: %d",
3257 hw->aq.asq_last_status);
3261 /* If NVM API < 1.7, keep the register setting */
3262 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3269 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3271 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3272 struct i40e_vsi *vsi = pf->main_vsi;
3274 if (mask & ETH_VLAN_FILTER_MASK) {
3275 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3276 i40e_vsi_config_vlan_filter(vsi, TRUE);
3278 i40e_vsi_config_vlan_filter(vsi, FALSE);
3281 if (mask & ETH_VLAN_STRIP_MASK) {
3282 /* Enable or disable VLAN stripping */
3283 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
3284 i40e_vsi_config_vlan_stripping(vsi, TRUE);
3286 i40e_vsi_config_vlan_stripping(vsi, FALSE);
3289 if (mask & ETH_VLAN_EXTEND_MASK) {
3290 if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
3291 i40e_vsi_config_double_vlan(vsi, TRUE);
3292 /* Set global registers with default ethertype. */
3293 i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3295 i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3299 i40e_vsi_config_double_vlan(vsi, FALSE);
3306 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3307 __rte_unused uint16_t queue,
3308 __rte_unused int on)
3310 PMD_INIT_FUNC_TRACE();
3314 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3316 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3317 struct i40e_vsi *vsi = pf->main_vsi;
3318 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3319 struct i40e_vsi_vlan_pvid_info info;
3321 memset(&info, 0, sizeof(info));
3324 info.config.pvid = pvid;
3326 info.config.reject.tagged =
3327 data->dev_conf.txmode.hw_vlan_reject_tagged;
3328 info.config.reject.untagged =
3329 data->dev_conf.txmode.hw_vlan_reject_untagged;
3332 return i40e_vsi_vlan_pvid_set(vsi, &info);
3336 i40e_dev_led_on(struct rte_eth_dev *dev)
3338 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3339 uint32_t mode = i40e_led_get(hw);
3342 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3348 i40e_dev_led_off(struct rte_eth_dev *dev)
3350 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3351 uint32_t mode = i40e_led_get(hw);
3354 i40e_led_set(hw, 0, false);
3360 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3362 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3363 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3365 fc_conf->pause_time = pf->fc_conf.pause_time;
3367 /* read out from register, in case they are modified by other port */
3368 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3369 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3370 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3371 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3373 fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3374 fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3376 /* Return current mode according to actual setting*/
3377 switch (hw->fc.current_mode) {
3379 fc_conf->mode = RTE_FC_FULL;
3381 case I40E_FC_TX_PAUSE:
3382 fc_conf->mode = RTE_FC_TX_PAUSE;
3384 case I40E_FC_RX_PAUSE:
3385 fc_conf->mode = RTE_FC_RX_PAUSE;
3389 fc_conf->mode = RTE_FC_NONE;
3396 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3398 uint32_t mflcn_reg, fctrl_reg, reg;
3399 uint32_t max_high_water;
3400 uint8_t i, aq_failure;
3404 enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3405 [RTE_FC_NONE] = I40E_FC_NONE,
3406 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3407 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3408 [RTE_FC_FULL] = I40E_FC_FULL
3411 /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3413 max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3414 if ((fc_conf->high_water > max_high_water) ||
3415 (fc_conf->high_water < fc_conf->low_water)) {
3417 "Invalid high/low water setup value in KB, High_water must be <= %d.",
3422 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3423 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3424 hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3426 pf->fc_conf.pause_time = fc_conf->pause_time;
3427 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3428 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3430 PMD_INIT_FUNC_TRACE();
3432 /* All the link flow control related enable/disable register
3433 * configuration is handle by the F/W
3435 err = i40e_set_fc(hw, &aq_failure, true);
3439 if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3440 /* Configure flow control refresh threshold,
3441 * the value for stat_tx_pause_refresh_timer[8]
3442 * is used for global pause operation.
3446 I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3447 pf->fc_conf.pause_time);
3449 /* configure the timer value included in transmitted pause
3451 * the value for stat_tx_pause_quanta[8] is used for global
3454 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3455 pf->fc_conf.pause_time);
3457 fctrl_reg = I40E_READ_REG(hw,
3458 I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3460 if (fc_conf->mac_ctrl_frame_fwd != 0)
3461 fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3463 fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3465 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3468 /* Configure pause time (2 TCs per register) */
3469 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3470 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3471 I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3473 /* Configure flow control refresh threshold value */
3474 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3475 pf->fc_conf.pause_time / 2);
3477 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3479 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3480 *depending on configuration
3482 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3483 mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3484 mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3486 mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3487 mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3490 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3493 /* config the water marker both based on the packets and bytes */
3494 I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
3495 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3496 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3497 I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
3498 (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3499 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3500 I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
3501 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3503 I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
3504 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3507 I40E_WRITE_FLUSH(hw);
3513 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3514 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3516 PMD_INIT_FUNC_TRACE();
3521 /* Add a MAC address, and update filters */
3523 i40e_macaddr_add(struct rte_eth_dev *dev,
3524 struct ether_addr *mac_addr,
3525 __rte_unused uint32_t index,
3528 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3529 struct i40e_mac_filter_info mac_filter;
3530 struct i40e_vsi *vsi;
3533 /* If VMDQ not enabled or configured, return */
3534 if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3535 !pf->nb_cfg_vmdq_vsi)) {
3536 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3537 pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3542 if (pool > pf->nb_cfg_vmdq_vsi) {
3543 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3544 pool, pf->nb_cfg_vmdq_vsi);
3548 rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3549 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3550 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3552 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3557 vsi = pf->vmdq[pool - 1].vsi;
3559 ret = i40e_vsi_add_mac(vsi, &mac_filter);
3560 if (ret != I40E_SUCCESS) {
3561 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3567 /* Remove a MAC address, and update filters */
3569 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3571 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3572 struct i40e_vsi *vsi;
3573 struct rte_eth_dev_data *data = dev->data;
3574 struct ether_addr *macaddr;
3579 macaddr = &(data->mac_addrs[index]);
3581 pool_sel = dev->data->mac_pool_sel[index];
3583 for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3584 if (pool_sel & (1ULL << i)) {
3588 /* No VMDQ pool enabled or configured */
3589 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3590 (i > pf->nb_cfg_vmdq_vsi)) {
3592 "No VMDQ pool enabled/configured");
3595 vsi = pf->vmdq[i - 1].vsi;
3597 ret = i40e_vsi_delete_mac(vsi, macaddr);
3600 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3607 /* Set perfect match or hash match of MAC and VLAN for a VF */
3609 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3610 struct rte_eth_mac_filter *filter,
3614 struct i40e_mac_filter_info mac_filter;
3615 struct ether_addr old_mac;
3616 struct ether_addr *new_mac;
3617 struct i40e_pf_vf *vf = NULL;
3622 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3625 hw = I40E_PF_TO_HW(pf);
3627 if (filter == NULL) {
3628 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3632 new_mac = &filter->mac_addr;
3634 if (is_zero_ether_addr(new_mac)) {
3635 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3639 vf_id = filter->dst_id;
3641 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3642 PMD_DRV_LOG(ERR, "Invalid argument.");
3645 vf = &pf->vfs[vf_id];
3647 if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3648 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3653 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3654 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3656 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3659 mac_filter.filter_type = filter->filter_type;
3660 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3661 if (ret != I40E_SUCCESS) {
3662 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3665 ether_addr_copy(new_mac, &pf->dev_addr);
3667 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3669 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3670 if (ret != I40E_SUCCESS) {
3671 PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3675 /* Clear device address as it has been removed */
3676 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3677 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3683 /* MAC filter handle */
3685 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3688 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3689 struct rte_eth_mac_filter *filter;
3690 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3691 int ret = I40E_NOT_SUPPORTED;
3693 filter = (struct rte_eth_mac_filter *)(arg);
3695 switch (filter_op) {
3696 case RTE_ETH_FILTER_NOP:
3699 case RTE_ETH_FILTER_ADD:
3700 i40e_pf_disable_irq0(hw);
3702 ret = i40e_vf_mac_filter_set(pf, filter, 1);
3703 i40e_pf_enable_irq0(hw);
3705 case RTE_ETH_FILTER_DELETE:
3706 i40e_pf_disable_irq0(hw);
3708 ret = i40e_vf_mac_filter_set(pf, filter, 0);
3709 i40e_pf_enable_irq0(hw);
3712 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3713 ret = I40E_ERR_PARAM;
3721 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3723 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3724 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3731 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3732 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3735 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3739 uint32_t *lut_dw = (uint32_t *)lut;
3740 uint16_t i, lut_size_dw = lut_size / 4;
3742 if (vsi->type == I40E_VSI_SRIOV) {
3743 for (i = 0; i <= lut_size_dw; i++) {
3744 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
3745 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
3748 for (i = 0; i < lut_size_dw; i++)
3749 lut_dw[i] = I40E_READ_REG(hw,
3758 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3767 pf = I40E_VSI_TO_PF(vsi);
3768 hw = I40E_VSI_TO_HW(vsi);
3770 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3771 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3774 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3778 uint32_t *lut_dw = (uint32_t *)lut;
3779 uint16_t i, lut_size_dw = lut_size / 4;
3781 if (vsi->type == I40E_VSI_SRIOV) {
3782 for (i = 0; i < lut_size_dw; i++)
3785 I40E_VFQF_HLUT1(i, vsi->user_param),
3788 for (i = 0; i < lut_size_dw; i++)
3789 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
3792 I40E_WRITE_FLUSH(hw);
3799 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3800 struct rte_eth_rss_reta_entry64 *reta_conf,
3803 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3804 uint16_t i, lut_size = pf->hash_lut_size;
3805 uint16_t idx, shift;
3809 if (reta_size != lut_size ||
3810 reta_size > ETH_RSS_RETA_SIZE_512) {
3812 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3813 reta_size, lut_size);
3817 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3819 PMD_DRV_LOG(ERR, "No memory can be allocated");
3822 ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3825 for (i = 0; i < reta_size; i++) {
3826 idx = i / RTE_RETA_GROUP_SIZE;
3827 shift = i % RTE_RETA_GROUP_SIZE;
3828 if (reta_conf[idx].mask & (1ULL << shift))
3829 lut[i] = reta_conf[idx].reta[shift];
3831 ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3840 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3841 struct rte_eth_rss_reta_entry64 *reta_conf,
3844 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3845 uint16_t i, lut_size = pf->hash_lut_size;
3846 uint16_t idx, shift;
3850 if (reta_size != lut_size ||
3851 reta_size > ETH_RSS_RETA_SIZE_512) {
3853 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3854 reta_size, lut_size);
3858 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3860 PMD_DRV_LOG(ERR, "No memory can be allocated");
3864 ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3867 for (i = 0; i < reta_size; i++) {
3868 idx = i / RTE_RETA_GROUP_SIZE;
3869 shift = i % RTE_RETA_GROUP_SIZE;
3870 if (reta_conf[idx].mask & (1ULL << shift))
3871 reta_conf[idx].reta[shift] = lut[i];
3881 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
3882 * @hw: pointer to the HW structure
3883 * @mem: pointer to mem struct to fill out
3884 * @size: size of memory requested
3885 * @alignment: what to align the allocation to
3887 enum i40e_status_code
3888 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3889 struct i40e_dma_mem *mem,
3893 const struct rte_memzone *mz = NULL;
3894 char z_name[RTE_MEMZONE_NAMESIZE];
3897 return I40E_ERR_PARAM;
3899 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
3900 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
3901 alignment, RTE_PGSIZE_2M);
3903 return I40E_ERR_NO_MEMORY;
3908 mem->zone = (const void *)mz;
3910 "memzone %s allocated with physical address: %"PRIu64,
3913 return I40E_SUCCESS;
3917 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
3918 * @hw: pointer to the HW structure
3919 * @mem: ptr to mem struct to free
3921 enum i40e_status_code
3922 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3923 struct i40e_dma_mem *mem)
3926 return I40E_ERR_PARAM;
3929 "memzone %s to be freed with physical address: %"PRIu64,
3930 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
3931 rte_memzone_free((const struct rte_memzone *)mem->zone);
3936 return I40E_SUCCESS;
3940 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
3941 * @hw: pointer to the HW structure
3942 * @mem: pointer to mem struct to fill out
3943 * @size: size of memory requested
3945 enum i40e_status_code
3946 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3947 struct i40e_virt_mem *mem,
3951 return I40E_ERR_PARAM;
3954 mem->va = rte_zmalloc("i40e", size, 0);
3957 return I40E_SUCCESS;
3959 return I40E_ERR_NO_MEMORY;
3963 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
3964 * @hw: pointer to the HW structure
3965 * @mem: pointer to mem struct to free
3967 enum i40e_status_code
3968 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3969 struct i40e_virt_mem *mem)
3972 return I40E_ERR_PARAM;
3977 return I40E_SUCCESS;
3981 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3983 rte_spinlock_init(&sp->spinlock);
3987 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3989 rte_spinlock_lock(&sp->spinlock);
3993 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3995 rte_spinlock_unlock(&sp->spinlock);
3999 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4005 * Get the hardware capabilities, which will be parsed
4006 * and saved into struct i40e_hw.
4009 i40e_get_cap(struct i40e_hw *hw)
4011 struct i40e_aqc_list_capabilities_element_resp *buf;
4012 uint16_t len, size = 0;
4015 /* Calculate a huge enough buff for saving response data temporarily */
4016 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4017 I40E_MAX_CAP_ELE_NUM;
4018 buf = rte_zmalloc("i40e", len, 0);
4020 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4021 return I40E_ERR_NO_MEMORY;
4024 /* Get, parse the capabilities and save it to hw */
4025 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4026 i40e_aqc_opc_list_func_capabilities, NULL);
4027 if (ret != I40E_SUCCESS)
4028 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4030 /* Free the temporary buffer after being used */
4036 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
4037 #define QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
4038 RTE_PMD_REGISTER_PARAM_STRING(net_i40e, QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16");
4040 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4048 pf = (struct i40e_pf *)opaque;
4052 num = strtoul(value, &end, 0);
4053 if (errno != 0 || end == value || *end != 0) {
4054 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4055 "kept the value = %hu", value, pf->vf_nb_qp_max);
4059 if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4060 pf->vf_nb_qp_max = (uint16_t)num;
4062 /* here return 0 to make next valid same argument work */
4063 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4064 "power of 2 and equal or less than 16 !, Now it is "
4065 "kept the value = %hu", num, pf->vf_nb_qp_max);
4070 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4072 static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
4073 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4074 struct rte_kvargs *kvlist;
4076 /* set default queue number per VF as 4 */
4077 pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4079 if (dev->device->devargs == NULL)
4082 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4086 if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
4087 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4088 "the first invalid or last valid one is used !",
4089 QUEUE_NUM_PER_VF_ARG);
4091 rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
4092 i40e_pf_parse_vf_queue_number_handler, pf);
4094 rte_kvargs_free(kvlist);
4100 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4102 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4103 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4104 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4105 uint16_t qp_count = 0, vsi_count = 0;
4107 if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4108 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4112 i40e_pf_config_vf_rxq_number(dev);
4114 /* Add the parameter init for LFC */
4115 pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4116 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4117 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4119 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4120 pf->max_num_vsi = hw->func_caps.num_vsis;
4121 pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4122 pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4124 /* FDir queue/VSI allocation */
4125 pf->fdir_qp_offset = 0;
4126 if (hw->func_caps.fd) {
4127 pf->flags |= I40E_FLAG_FDIR;
4128 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4130 pf->fdir_nb_qps = 0;
4132 qp_count += pf->fdir_nb_qps;
4135 /* LAN queue/VSI allocation */
4136 pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4137 if (!hw->func_caps.rss) {
4140 pf->flags |= I40E_FLAG_RSS;
4141 if (hw->mac.type == I40E_MAC_X722)
4142 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4143 pf->lan_nb_qps = pf->lan_nb_qp_max;
4145 qp_count += pf->lan_nb_qps;
4148 /* VF queue/VSI allocation */
4149 pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4150 if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4151 pf->flags |= I40E_FLAG_SRIOV;
4152 pf->vf_nb_qps = pf->vf_nb_qp_max;
4153 pf->vf_num = pci_dev->max_vfs;
4155 "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4156 pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4161 qp_count += pf->vf_nb_qps * pf->vf_num;
4162 vsi_count += pf->vf_num;
4164 /* VMDq queue/VSI allocation */
4165 pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4166 pf->vmdq_nb_qps = 0;
4167 pf->max_nb_vmdq_vsi = 0;
4168 if (hw->func_caps.vmdq) {
4169 if (qp_count < hw->func_caps.num_tx_qp &&
4170 vsi_count < hw->func_caps.num_vsis) {
4171 pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4172 qp_count) / pf->vmdq_nb_qp_max;
4174 /* Limit the maximum number of VMDq vsi to the maximum
4175 * ethdev can support
4177 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4178 hw->func_caps.num_vsis - vsi_count);
4179 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4181 if (pf->max_nb_vmdq_vsi) {
4182 pf->flags |= I40E_FLAG_VMDQ;
4183 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4185 "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4186 pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4187 pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4190 "No enough queues left for VMDq");
4193 PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4196 qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4197 vsi_count += pf->max_nb_vmdq_vsi;
4199 if (hw->func_caps.dcb)
4200 pf->flags |= I40E_FLAG_DCB;
4202 if (qp_count > hw->func_caps.num_tx_qp) {
4204 "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4205 qp_count, hw->func_caps.num_tx_qp);
4208 if (vsi_count > hw->func_caps.num_vsis) {
4210 "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4211 vsi_count, hw->func_caps.num_vsis);
4219 i40e_pf_get_switch_config(struct i40e_pf *pf)
4221 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4222 struct i40e_aqc_get_switch_config_resp *switch_config;
4223 struct i40e_aqc_switch_config_element_resp *element;
4224 uint16_t start_seid = 0, num_reported;
4227 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4228 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4229 if (!switch_config) {
4230 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4234 /* Get the switch configurations */
4235 ret = i40e_aq_get_switch_config(hw, switch_config,
4236 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4237 if (ret != I40E_SUCCESS) {
4238 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4241 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4242 if (num_reported != 1) { /* The number should be 1 */
4243 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4247 /* Parse the switch configuration elements */
4248 element = &(switch_config->element[0]);
4249 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4250 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4251 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4253 PMD_DRV_LOG(INFO, "Unknown element type");
4256 rte_free(switch_config);
4262 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4265 struct pool_entry *entry;
4267 if (pool == NULL || num == 0)
4270 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4271 if (entry == NULL) {
4272 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4276 /* queue heap initialize */
4277 pool->num_free = num;
4278 pool->num_alloc = 0;
4280 LIST_INIT(&pool->alloc_list);
4281 LIST_INIT(&pool->free_list);
4283 /* Initialize element */
4287 LIST_INSERT_HEAD(&pool->free_list, entry, next);
4292 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4294 struct pool_entry *entry, *next_entry;
4299 for (entry = LIST_FIRST(&pool->alloc_list);
4300 entry && (next_entry = LIST_NEXT(entry, next), 1);
4301 entry = next_entry) {
4302 LIST_REMOVE(entry, next);
4306 for (entry = LIST_FIRST(&pool->free_list);
4307 entry && (next_entry = LIST_NEXT(entry, next), 1);
4308 entry = next_entry) {
4309 LIST_REMOVE(entry, next);
4314 pool->num_alloc = 0;
4316 LIST_INIT(&pool->alloc_list);
4317 LIST_INIT(&pool->free_list);
4321 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4324 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4325 uint32_t pool_offset;
4329 PMD_DRV_LOG(ERR, "Invalid parameter");
4333 pool_offset = base - pool->base;
4334 /* Lookup in alloc list */
4335 LIST_FOREACH(entry, &pool->alloc_list, next) {
4336 if (entry->base == pool_offset) {
4337 valid_entry = entry;
4338 LIST_REMOVE(entry, next);
4343 /* Not find, return */
4344 if (valid_entry == NULL) {
4345 PMD_DRV_LOG(ERR, "Failed to find entry");
4350 * Found it, move it to free list and try to merge.
4351 * In order to make merge easier, always sort it by qbase.
4352 * Find adjacent prev and last entries.
4355 LIST_FOREACH(entry, &pool->free_list, next) {
4356 if (entry->base > valid_entry->base) {
4364 /* Try to merge with next one*/
4366 /* Merge with next one */
4367 if (valid_entry->base + valid_entry->len == next->base) {
4368 next->base = valid_entry->base;
4369 next->len += valid_entry->len;
4370 rte_free(valid_entry);
4377 /* Merge with previous one */
4378 if (prev->base + prev->len == valid_entry->base) {
4379 prev->len += valid_entry->len;
4380 /* If it merge with next one, remove next node */
4382 LIST_REMOVE(valid_entry, next);
4383 rte_free(valid_entry);
4385 rte_free(valid_entry);
4391 /* Not find any entry to merge, insert */
4394 LIST_INSERT_AFTER(prev, valid_entry, next);
4395 else if (next != NULL)
4396 LIST_INSERT_BEFORE(next, valid_entry, next);
4397 else /* It's empty list, insert to head */
4398 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4401 pool->num_free += valid_entry->len;
4402 pool->num_alloc -= valid_entry->len;
4408 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4411 struct pool_entry *entry, *valid_entry;
4413 if (pool == NULL || num == 0) {
4414 PMD_DRV_LOG(ERR, "Invalid parameter");
4418 if (pool->num_free < num) {
4419 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4420 num, pool->num_free);
4425 /* Lookup in free list and find most fit one */
4426 LIST_FOREACH(entry, &pool->free_list, next) {
4427 if (entry->len >= num) {
4429 if (entry->len == num) {
4430 valid_entry = entry;
4433 if (valid_entry == NULL || valid_entry->len > entry->len)
4434 valid_entry = entry;
4438 /* Not find one to satisfy the request, return */
4439 if (valid_entry == NULL) {
4440 PMD_DRV_LOG(ERR, "No valid entry found");
4444 * The entry have equal queue number as requested,
4445 * remove it from alloc_list.
4447 if (valid_entry->len == num) {
4448 LIST_REMOVE(valid_entry, next);
4451 * The entry have more numbers than requested,
4452 * create a new entry for alloc_list and minus its
4453 * queue base and number in free_list.
4455 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4456 if (entry == NULL) {
4458 "Failed to allocate memory for resource pool");
4461 entry->base = valid_entry->base;
4463 valid_entry->base += num;
4464 valid_entry->len -= num;
4465 valid_entry = entry;
4468 /* Insert it into alloc list, not sorted */
4469 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4471 pool->num_free -= valid_entry->len;
4472 pool->num_alloc += valid_entry->len;
4474 return valid_entry->base + pool->base;
4478 * bitmap_is_subset - Check whether src2 is subset of src1
4481 bitmap_is_subset(uint8_t src1, uint8_t src2)
4483 return !((src1 ^ src2) & src2);
4486 static enum i40e_status_code
4487 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4489 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4491 /* If DCB is not supported, only default TC is supported */
4492 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4493 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4494 return I40E_NOT_SUPPORTED;
4497 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4499 "Enabled TC map 0x%x not applicable to HW support 0x%x",
4500 hw->func_caps.enabled_tcmap, enabled_tcmap);
4501 return I40E_NOT_SUPPORTED;
4503 return I40E_SUCCESS;
4507 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4508 struct i40e_vsi_vlan_pvid_info *info)
4511 struct i40e_vsi_context ctxt;
4512 uint8_t vlan_flags = 0;
4515 if (vsi == NULL || info == NULL) {
4516 PMD_DRV_LOG(ERR, "invalid parameters");
4517 return I40E_ERR_PARAM;
4521 vsi->info.pvid = info->config.pvid;
4523 * If insert pvid is enabled, only tagged pkts are
4524 * allowed to be sent out.
4526 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4527 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4530 if (info->config.reject.tagged == 0)
4531 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4533 if (info->config.reject.untagged == 0)
4534 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4536 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4537 I40E_AQ_VSI_PVLAN_MODE_MASK);
4538 vsi->info.port_vlan_flags |= vlan_flags;
4539 vsi->info.valid_sections =
4540 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4541 memset(&ctxt, 0, sizeof(ctxt));
4542 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4543 ctxt.seid = vsi->seid;
4545 hw = I40E_VSI_TO_HW(vsi);
4546 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4547 if (ret != I40E_SUCCESS)
4548 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4554 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4556 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4558 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4560 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4561 if (ret != I40E_SUCCESS)
4565 PMD_DRV_LOG(ERR, "seid not valid");
4569 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4570 tc_bw_data.tc_valid_bits = enabled_tcmap;
4571 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4572 tc_bw_data.tc_bw_credits[i] =
4573 (enabled_tcmap & (1 << i)) ? 1 : 0;
4575 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4576 if (ret != I40E_SUCCESS) {
4577 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4581 rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4582 sizeof(vsi->info.qs_handle));
4583 return I40E_SUCCESS;
4586 static enum i40e_status_code
4587 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4588 struct i40e_aqc_vsi_properties_data *info,
4589 uint8_t enabled_tcmap)
4591 enum i40e_status_code ret;
4592 int i, total_tc = 0;
4593 uint16_t qpnum_per_tc, bsf, qp_idx;
4595 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4596 if (ret != I40E_SUCCESS)
4599 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4600 if (enabled_tcmap & (1 << i))
4604 vsi->enabled_tc = enabled_tcmap;
4606 /* Number of queues per enabled TC */
4607 qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4608 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4609 bsf = rte_bsf32(qpnum_per_tc);
4611 /* Adjust the queue number to actual queues that can be applied */
4612 if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4613 vsi->nb_qps = qpnum_per_tc * total_tc;
4616 * Configure TC and queue mapping parameters, for enabled TC,
4617 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4618 * default queue will serve it.
4621 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4622 if (vsi->enabled_tc & (1 << i)) {
4623 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4624 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4625 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4626 qp_idx += qpnum_per_tc;
4628 info->tc_mapping[i] = 0;
4631 /* Associate queue number with VSI */
4632 if (vsi->type == I40E_VSI_SRIOV) {
4633 info->mapping_flags |=
4634 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4635 for (i = 0; i < vsi->nb_qps; i++)
4636 info->queue_mapping[i] =
4637 rte_cpu_to_le_16(vsi->base_queue + i);
4639 info->mapping_flags |=
4640 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4641 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4643 info->valid_sections |=
4644 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4646 return I40E_SUCCESS;
4650 i40e_veb_release(struct i40e_veb *veb)
4652 struct i40e_vsi *vsi;
4658 if (!TAILQ_EMPTY(&veb->head)) {
4659 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4662 /* associate_vsi field is NULL for floating VEB */
4663 if (veb->associate_vsi != NULL) {
4664 vsi = veb->associate_vsi;
4665 hw = I40E_VSI_TO_HW(vsi);
4667 vsi->uplink_seid = veb->uplink_seid;
4670 veb->associate_pf->main_vsi->floating_veb = NULL;
4671 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4674 i40e_aq_delete_element(hw, veb->seid, NULL);
4676 return I40E_SUCCESS;
4680 static struct i40e_veb *
4681 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4683 struct i40e_veb *veb;
4689 "veb setup failed, associated PF shouldn't null");
4692 hw = I40E_PF_TO_HW(pf);
4694 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4696 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4700 veb->associate_vsi = vsi;
4701 veb->associate_pf = pf;
4702 TAILQ_INIT(&veb->head);
4703 veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4705 /* create floating veb if vsi is NULL */
4707 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4708 I40E_DEFAULT_TCMAP, false,
4709 &veb->seid, false, NULL);
4711 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4712 true, &veb->seid, false, NULL);
4715 if (ret != I40E_SUCCESS) {
4716 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4717 hw->aq.asq_last_status);
4720 veb->enabled_tc = I40E_DEFAULT_TCMAP;
4722 /* get statistics index */
4723 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4724 &veb->stats_idx, NULL, NULL, NULL);
4725 if (ret != I40E_SUCCESS) {
4726 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4727 hw->aq.asq_last_status);
4730 /* Get VEB bandwidth, to be implemented */
4731 /* Now associated vsi binding to the VEB, set uplink to this VEB */
4733 vsi->uplink_seid = veb->seid;
4742 i40e_vsi_release(struct i40e_vsi *vsi)
4746 struct i40e_vsi_list *vsi_list;
4749 struct i40e_mac_filter *f;
4750 uint16_t user_param;
4753 return I40E_SUCCESS;
4758 user_param = vsi->user_param;
4760 pf = I40E_VSI_TO_PF(vsi);
4761 hw = I40E_VSI_TO_HW(vsi);
4763 /* VSI has child to attach, release child first */
4765 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4766 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4769 i40e_veb_release(vsi->veb);
4772 if (vsi->floating_veb) {
4773 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4774 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4779 /* Remove all macvlan filters of the VSI */
4780 i40e_vsi_remove_all_macvlan_filter(vsi);
4781 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4784 if (vsi->type != I40E_VSI_MAIN &&
4785 ((vsi->type != I40E_VSI_SRIOV) ||
4786 !pf->floating_veb_list[user_param])) {
4787 /* Remove vsi from parent's sibling list */
4788 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4789 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4790 return I40E_ERR_PARAM;
4792 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4793 &vsi->sib_vsi_list, list);
4795 /* Remove all switch element of the VSI */
4796 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4797 if (ret != I40E_SUCCESS)
4798 PMD_DRV_LOG(ERR, "Failed to delete element");
4801 if ((vsi->type == I40E_VSI_SRIOV) &&
4802 pf->floating_veb_list[user_param]) {
4803 /* Remove vsi from parent's sibling list */
4804 if (vsi->parent_vsi == NULL ||
4805 vsi->parent_vsi->floating_veb == NULL) {
4806 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4807 return I40E_ERR_PARAM;
4809 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4810 &vsi->sib_vsi_list, list);
4812 /* Remove all switch element of the VSI */
4813 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4814 if (ret != I40E_SUCCESS)
4815 PMD_DRV_LOG(ERR, "Failed to delete element");
4818 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4820 if (vsi->type != I40E_VSI_SRIOV)
4821 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4824 return I40E_SUCCESS;
4828 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4830 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4831 struct i40e_aqc_remove_macvlan_element_data def_filter;
4832 struct i40e_mac_filter_info filter;
4835 if (vsi->type != I40E_VSI_MAIN)
4836 return I40E_ERR_CONFIG;
4837 memset(&def_filter, 0, sizeof(def_filter));
4838 rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4840 def_filter.vlan_tag = 0;
4841 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4842 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4843 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4844 if (ret != I40E_SUCCESS) {
4845 struct i40e_mac_filter *f;
4846 struct ether_addr *mac;
4849 "Cannot remove the default macvlan filter");
4850 /* It needs to add the permanent mac into mac list */
4851 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4853 PMD_DRV_LOG(ERR, "failed to allocate memory");
4854 return I40E_ERR_NO_MEMORY;
4856 mac = &f->mac_info.mac_addr;
4857 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
4859 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4860 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4865 rte_memcpy(&filter.mac_addr,
4866 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
4867 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4868 return i40e_vsi_add_mac(vsi, &filter);
4872 * i40e_vsi_get_bw_config - Query VSI BW Information
4873 * @vsi: the VSI to be queried
4875 * Returns 0 on success, negative value on failure
4877 static enum i40e_status_code
4878 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
4880 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
4881 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
4882 struct i40e_hw *hw = &vsi->adapter->hw;
4887 memset(&bw_config, 0, sizeof(bw_config));
4888 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4889 if (ret != I40E_SUCCESS) {
4890 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
4891 hw->aq.asq_last_status);
4895 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
4896 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
4897 &ets_sla_config, NULL);
4898 if (ret != I40E_SUCCESS) {
4900 "VSI failed to get TC bandwdith configuration %u",
4901 hw->aq.asq_last_status);
4905 /* store and print out BW info */
4906 vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
4907 vsi->bw_info.bw_max = bw_config.max_bw;
4908 PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
4909 PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
4910 bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
4911 (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
4913 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4914 vsi->bw_info.bw_ets_share_credits[i] =
4915 ets_sla_config.share_credits[i];
4916 vsi->bw_info.bw_ets_credits[i] =
4917 rte_le_to_cpu_16(ets_sla_config.credits[i]);
4918 /* 4 bits per TC, 4th bit is reserved */
4919 vsi->bw_info.bw_ets_max[i] =
4920 (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
4921 RTE_LEN2MASK(3, uint8_t));
4922 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
4923 vsi->bw_info.bw_ets_share_credits[i]);
4924 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
4925 vsi->bw_info.bw_ets_credits[i]);
4926 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
4927 vsi->bw_info.bw_ets_max[i]);
4930 return I40E_SUCCESS;
4933 /* i40e_enable_pf_lb
4934 * @pf: pointer to the pf structure
4936 * allow loopback on pf
4939 i40e_enable_pf_lb(struct i40e_pf *pf)
4941 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4942 struct i40e_vsi_context ctxt;
4945 /* Use the FW API if FW >= v5.0 */
4946 if (hw->aq.fw_maj_ver < 5) {
4947 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
4951 memset(&ctxt, 0, sizeof(ctxt));
4952 ctxt.seid = pf->main_vsi_seid;
4953 ctxt.pf_num = hw->pf_id;
4954 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4956 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
4957 ret, hw->aq.asq_last_status);
4960 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4961 ctxt.info.valid_sections =
4962 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4963 ctxt.info.switch_id |=
4964 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4966 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4968 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
4969 hw->aq.asq_last_status);
4974 i40e_vsi_setup(struct i40e_pf *pf,
4975 enum i40e_vsi_type type,
4976 struct i40e_vsi *uplink_vsi,
4977 uint16_t user_param)
4979 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4980 struct i40e_vsi *vsi;
4981 struct i40e_mac_filter_info filter;
4983 struct i40e_vsi_context ctxt;
4984 struct ether_addr broadcast =
4985 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
4987 if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
4988 uplink_vsi == NULL) {
4990 "VSI setup failed, VSI link shouldn't be NULL");
4994 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
4996 "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5001 * 1.type is not MAIN and uplink vsi is not NULL
5002 * If uplink vsi didn't setup VEB, create one first under veb field
5003 * 2.type is SRIOV and the uplink is NULL
5004 * If floating VEB is NULL, create one veb under floating veb field
5007 if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5008 uplink_vsi->veb == NULL) {
5009 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5011 if (uplink_vsi->veb == NULL) {
5012 PMD_DRV_LOG(ERR, "VEB setup failed");
5015 /* set ALLOWLOOPBACk on pf, when veb is created */
5016 i40e_enable_pf_lb(pf);
5019 if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5020 pf->main_vsi->floating_veb == NULL) {
5021 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5023 if (pf->main_vsi->floating_veb == NULL) {
5024 PMD_DRV_LOG(ERR, "VEB setup failed");
5029 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5031 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5034 TAILQ_INIT(&vsi->mac_list);
5036 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5037 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5038 vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5039 vsi->user_param = user_param;
5040 vsi->vlan_anti_spoof_on = 0;
5041 vsi->vlan_filter_on = 0;
5042 /* Allocate queues */
5043 switch (vsi->type) {
5044 case I40E_VSI_MAIN :
5045 vsi->nb_qps = pf->lan_nb_qps;
5047 case I40E_VSI_SRIOV :
5048 vsi->nb_qps = pf->vf_nb_qps;
5050 case I40E_VSI_VMDQ2:
5051 vsi->nb_qps = pf->vmdq_nb_qps;
5054 vsi->nb_qps = pf->fdir_nb_qps;
5060 * The filter status descriptor is reported in rx queue 0,
5061 * while the tx queue for fdir filter programming has no
5062 * such constraints, can be non-zero queues.
5063 * To simplify it, choose FDIR vsi use queue 0 pair.
5064 * To make sure it will use queue 0 pair, queue allocation
5065 * need be done before this function is called
5067 if (type != I40E_VSI_FDIR) {
5068 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5070 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5074 vsi->base_queue = ret;
5076 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5078 /* VF has MSIX interrupt in VF range, don't allocate here */
5079 if (type == I40E_VSI_MAIN) {
5080 ret = i40e_res_pool_alloc(&pf->msix_pool,
5081 RTE_MIN(vsi->nb_qps,
5082 RTE_MAX_RXTX_INTR_VEC_ID));
5084 PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
5086 goto fail_queue_alloc;
5088 vsi->msix_intr = ret;
5089 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
5090 } else if (type != I40E_VSI_SRIOV) {
5091 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5093 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5094 goto fail_queue_alloc;
5096 vsi->msix_intr = ret;
5104 if (type == I40E_VSI_MAIN) {
5105 /* For main VSI, no need to add since it's default one */
5106 vsi->uplink_seid = pf->mac_seid;
5107 vsi->seid = pf->main_vsi_seid;
5108 /* Bind queues with specific MSIX interrupt */
5110 * Needs 2 interrupt at least, one for misc cause which will
5111 * enabled from OS side, Another for queues binding the
5112 * interrupt from device side only.
5115 /* Get default VSI parameters from hardware */
5116 memset(&ctxt, 0, sizeof(ctxt));
5117 ctxt.seid = vsi->seid;
5118 ctxt.pf_num = hw->pf_id;
5119 ctxt.uplink_seid = vsi->uplink_seid;
5121 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5122 if (ret != I40E_SUCCESS) {
5123 PMD_DRV_LOG(ERR, "Failed to get VSI params");
5124 goto fail_msix_alloc;
5126 rte_memcpy(&vsi->info, &ctxt.info,
5127 sizeof(struct i40e_aqc_vsi_properties_data));
5128 vsi->vsi_id = ctxt.vsi_number;
5129 vsi->info.valid_sections = 0;
5131 /* Configure tc, enabled TC0 only */
5132 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5134 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5135 goto fail_msix_alloc;
5138 /* TC, queue mapping */
5139 memset(&ctxt, 0, sizeof(ctxt));
5140 vsi->info.valid_sections |=
5141 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5142 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5143 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5144 rte_memcpy(&ctxt.info, &vsi->info,
5145 sizeof(struct i40e_aqc_vsi_properties_data));
5146 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5147 I40E_DEFAULT_TCMAP);
5148 if (ret != I40E_SUCCESS) {
5150 "Failed to configure TC queue mapping");
5151 goto fail_msix_alloc;
5153 ctxt.seid = vsi->seid;
5154 ctxt.pf_num = hw->pf_id;
5155 ctxt.uplink_seid = vsi->uplink_seid;
5158 /* Update VSI parameters */
5159 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5160 if (ret != I40E_SUCCESS) {
5161 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5162 goto fail_msix_alloc;
5165 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5166 sizeof(vsi->info.tc_mapping));
5167 rte_memcpy(&vsi->info.queue_mapping,
5168 &ctxt.info.queue_mapping,
5169 sizeof(vsi->info.queue_mapping));
5170 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5171 vsi->info.valid_sections = 0;
5173 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5177 * Updating default filter settings are necessary to prevent
5178 * reception of tagged packets.
5179 * Some old firmware configurations load a default macvlan
5180 * filter which accepts both tagged and untagged packets.
5181 * The updating is to use a normal filter instead if needed.
5182 * For NVM 4.2.2 or after, the updating is not needed anymore.
5183 * The firmware with correct configurations load the default
5184 * macvlan filter which is expected and cannot be removed.
5186 i40e_update_default_filter_setting(vsi);
5187 i40e_config_qinq(hw, vsi);
5188 } else if (type == I40E_VSI_SRIOV) {
5189 memset(&ctxt, 0, sizeof(ctxt));
5191 * For other VSI, the uplink_seid equals to uplink VSI's
5192 * uplink_seid since they share same VEB
5194 if (uplink_vsi == NULL)
5195 vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5197 vsi->uplink_seid = uplink_vsi->uplink_seid;
5198 ctxt.pf_num = hw->pf_id;
5199 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5200 ctxt.uplink_seid = vsi->uplink_seid;
5201 ctxt.connection_type = 0x1;
5202 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5204 /* Use the VEB configuration if FW >= v5.0 */
5205 if (hw->aq.fw_maj_ver >= 5) {
5206 /* Configure switch ID */
5207 ctxt.info.valid_sections |=
5208 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5209 ctxt.info.switch_id =
5210 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5213 /* Configure port/vlan */
5214 ctxt.info.valid_sections |=
5215 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5216 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5217 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5218 hw->func_caps.enabled_tcmap);
5219 if (ret != I40E_SUCCESS) {
5221 "Failed to configure TC queue mapping");
5222 goto fail_msix_alloc;
5225 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5226 ctxt.info.valid_sections |=
5227 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5229 * Since VSI is not created yet, only configure parameter,
5230 * will add vsi below.
5233 i40e_config_qinq(hw, vsi);
5234 } else if (type == I40E_VSI_VMDQ2) {
5235 memset(&ctxt, 0, sizeof(ctxt));
5237 * For other VSI, the uplink_seid equals to uplink VSI's
5238 * uplink_seid since they share same VEB
5240 vsi->uplink_seid = uplink_vsi->uplink_seid;
5241 ctxt.pf_num = hw->pf_id;
5243 ctxt.uplink_seid = vsi->uplink_seid;
5244 ctxt.connection_type = 0x1;
5245 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5247 ctxt.info.valid_sections |=
5248 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5249 /* user_param carries flag to enable loop back */
5251 ctxt.info.switch_id =
5252 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5253 ctxt.info.switch_id |=
5254 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5257 /* Configure port/vlan */
5258 ctxt.info.valid_sections |=
5259 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5260 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5261 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5262 I40E_DEFAULT_TCMAP);
5263 if (ret != I40E_SUCCESS) {
5265 "Failed to configure TC queue mapping");
5266 goto fail_msix_alloc;
5268 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5269 ctxt.info.valid_sections |=
5270 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5271 } else if (type == I40E_VSI_FDIR) {
5272 memset(&ctxt, 0, sizeof(ctxt));
5273 vsi->uplink_seid = uplink_vsi->uplink_seid;
5274 ctxt.pf_num = hw->pf_id;
5276 ctxt.uplink_seid = vsi->uplink_seid;
5277 ctxt.connection_type = 0x1; /* regular data port */
5278 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5279 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5280 I40E_DEFAULT_TCMAP);
5281 if (ret != I40E_SUCCESS) {
5283 "Failed to configure TC queue mapping.");
5284 goto fail_msix_alloc;
5286 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5287 ctxt.info.valid_sections |=
5288 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5290 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5291 goto fail_msix_alloc;
5294 if (vsi->type != I40E_VSI_MAIN) {
5295 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5296 if (ret != I40E_SUCCESS) {
5297 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5298 hw->aq.asq_last_status);
5299 goto fail_msix_alloc;
5301 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5302 vsi->info.valid_sections = 0;
5303 vsi->seid = ctxt.seid;
5304 vsi->vsi_id = ctxt.vsi_number;
5305 vsi->sib_vsi_list.vsi = vsi;
5306 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5307 TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5308 &vsi->sib_vsi_list, list);
5310 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5311 &vsi->sib_vsi_list, list);
5315 /* MAC/VLAN configuration */
5316 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5317 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5319 ret = i40e_vsi_add_mac(vsi, &filter);
5320 if (ret != I40E_SUCCESS) {
5321 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5322 goto fail_msix_alloc;
5325 /* Get VSI BW information */
5326 i40e_vsi_get_bw_config(vsi);
5329 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5331 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5337 /* Configure vlan filter on or off */
5339 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5342 struct i40e_mac_filter *f;
5344 struct i40e_mac_filter_info *mac_filter;
5345 enum rte_mac_filter_type desired_filter;
5346 int ret = I40E_SUCCESS;
5349 /* Filter to match MAC and VLAN */
5350 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5352 /* Filter to match only MAC */
5353 desired_filter = RTE_MAC_PERFECT_MATCH;
5358 mac_filter = rte_zmalloc("mac_filter_info_data",
5359 num * sizeof(*mac_filter), 0);
5360 if (mac_filter == NULL) {
5361 PMD_DRV_LOG(ERR, "failed to allocate memory");
5362 return I40E_ERR_NO_MEMORY;
5367 /* Remove all existing mac */
5368 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5369 mac_filter[i] = f->mac_info;
5370 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5372 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5373 on ? "enable" : "disable");
5379 /* Override with new filter */
5380 for (i = 0; i < num; i++) {
5381 mac_filter[i].filter_type = desired_filter;
5382 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5384 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5385 on ? "enable" : "disable");
5391 rte_free(mac_filter);
5395 /* Configure vlan stripping on or off */
5397 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5399 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5400 struct i40e_vsi_context ctxt;
5402 int ret = I40E_SUCCESS;
5404 /* Check if it has been already on or off */
5405 if (vsi->info.valid_sections &
5406 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5408 if ((vsi->info.port_vlan_flags &
5409 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5410 return 0; /* already on */
5412 if ((vsi->info.port_vlan_flags &
5413 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5414 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5415 return 0; /* already off */
5420 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5422 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5423 vsi->info.valid_sections =
5424 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5425 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5426 vsi->info.port_vlan_flags |= vlan_flags;
5427 ctxt.seid = vsi->seid;
5428 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5429 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5431 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5432 on ? "enable" : "disable");
5438 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5440 struct rte_eth_dev_data *data = dev->data;
5444 /* Apply vlan offload setting */
5445 mask = ETH_VLAN_STRIP_MASK |
5446 ETH_VLAN_FILTER_MASK |
5447 ETH_VLAN_EXTEND_MASK;
5448 ret = i40e_vlan_offload_set(dev, mask);
5450 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5454 /* Apply pvid setting */
5455 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5456 data->dev_conf.txmode.hw_vlan_insert_pvid);
5458 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5464 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5466 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5468 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5472 i40e_update_flow_control(struct i40e_hw *hw)
5474 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5475 struct i40e_link_status link_status;
5476 uint32_t rxfc = 0, txfc = 0, reg;
5480 memset(&link_status, 0, sizeof(link_status));
5481 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5482 if (ret != I40E_SUCCESS) {
5483 PMD_DRV_LOG(ERR, "Failed to get link status information");
5484 goto write_reg; /* Disable flow control */
5487 an_info = hw->phy.link_info.an_info;
5488 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5489 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5490 ret = I40E_ERR_NOT_READY;
5491 goto write_reg; /* Disable flow control */
5494 * If link auto negotiation is enabled, flow control needs to
5495 * be configured according to it
5497 switch (an_info & I40E_LINK_PAUSE_RXTX) {
5498 case I40E_LINK_PAUSE_RXTX:
5501 hw->fc.current_mode = I40E_FC_FULL;
5503 case I40E_AQ_LINK_PAUSE_RX:
5505 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5507 case I40E_AQ_LINK_PAUSE_TX:
5509 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5512 hw->fc.current_mode = I40E_FC_NONE;
5517 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5518 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5519 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5520 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5521 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5522 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5529 i40e_pf_setup(struct i40e_pf *pf)
5531 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5532 struct i40e_filter_control_settings settings;
5533 struct i40e_vsi *vsi;
5536 /* Clear all stats counters */
5537 pf->offset_loaded = FALSE;
5538 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5539 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5540 memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5541 memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5543 ret = i40e_pf_get_switch_config(pf);
5544 if (ret != I40E_SUCCESS) {
5545 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5548 if (pf->flags & I40E_FLAG_FDIR) {
5549 /* make queue allocated first, let FDIR use queue pair 0*/
5550 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5551 if (ret != I40E_FDIR_QUEUE_ID) {
5553 "queue allocation fails for FDIR: ret =%d",
5555 pf->flags &= ~I40E_FLAG_FDIR;
5558 /* main VSI setup */
5559 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5561 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5562 return I40E_ERR_NOT_READY;
5566 /* Configure filter control */
5567 memset(&settings, 0, sizeof(settings));
5568 if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5569 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5570 else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5571 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5573 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5574 hw->func_caps.rss_table_size);
5575 return I40E_ERR_PARAM;
5577 PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5578 hw->func_caps.rss_table_size);
5579 pf->hash_lut_size = hw->func_caps.rss_table_size;
5581 /* Enable ethtype and macvlan filters */
5582 settings.enable_ethtype = TRUE;
5583 settings.enable_macvlan = TRUE;
5584 ret = i40e_set_filter_control(hw, &settings);
5586 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5589 /* Update flow control according to the auto negotiation */
5590 i40e_update_flow_control(hw);
5592 return I40E_SUCCESS;
5596 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5602 * Set or clear TX Queue Disable flags,
5603 * which is required by hardware.
5605 i40e_pre_tx_queue_cfg(hw, q_idx, on);
5606 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5608 /* Wait until the request is finished */
5609 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5610 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5611 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5612 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5613 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5619 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5620 return I40E_SUCCESS; /* already on, skip next steps */
5622 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5623 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5625 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5626 return I40E_SUCCESS; /* already off, skip next steps */
5627 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5629 /* Write the register */
5630 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5631 /* Check the result */
5632 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5633 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5634 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5636 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5637 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5640 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5641 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5645 /* Check if it is timeout */
5646 if (j >= I40E_CHK_Q_ENA_COUNT) {
5647 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5648 (on ? "enable" : "disable"), q_idx);
5649 return I40E_ERR_TIMEOUT;
5652 return I40E_SUCCESS;
5655 /* Swith on or off the tx queues */
5657 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5659 struct rte_eth_dev_data *dev_data = pf->dev_data;
5660 struct i40e_tx_queue *txq;
5661 struct rte_eth_dev *dev = pf->adapter->eth_dev;
5665 for (i = 0; i < dev_data->nb_tx_queues; i++) {
5666 txq = dev_data->tx_queues[i];
5667 /* Don't operate the queue if not configured or
5668 * if starting only per queue */
5669 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5672 ret = i40e_dev_tx_queue_start(dev, i);
5674 ret = i40e_dev_tx_queue_stop(dev, i);
5675 if ( ret != I40E_SUCCESS)
5679 return I40E_SUCCESS;
5683 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5688 /* Wait until the request is finished */
5689 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5690 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5691 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5692 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5693 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5698 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5699 return I40E_SUCCESS; /* Already on, skip next steps */
5700 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5702 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5703 return I40E_SUCCESS; /* Already off, skip next steps */
5704 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5707 /* Write the register */
5708 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5709 /* Check the result */
5710 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5711 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5712 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5714 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5715 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5718 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5719 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5724 /* Check if it is timeout */
5725 if (j >= I40E_CHK_Q_ENA_COUNT) {
5726 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5727 (on ? "enable" : "disable"), q_idx);
5728 return I40E_ERR_TIMEOUT;
5731 return I40E_SUCCESS;
5733 /* Switch on or off the rx queues */
5735 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5737 struct rte_eth_dev_data *dev_data = pf->dev_data;
5738 struct i40e_rx_queue *rxq;
5739 struct rte_eth_dev *dev = pf->adapter->eth_dev;
5743 for (i = 0; i < dev_data->nb_rx_queues; i++) {
5744 rxq = dev_data->rx_queues[i];
5745 /* Don't operate the queue if not configured or
5746 * if starting only per queue */
5747 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5750 ret = i40e_dev_rx_queue_start(dev, i);
5752 ret = i40e_dev_rx_queue_stop(dev, i);
5753 if (ret != I40E_SUCCESS)
5757 return I40E_SUCCESS;
5760 /* Switch on or off all the rx/tx queues */
5762 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5767 /* enable rx queues before enabling tx queues */
5768 ret = i40e_dev_switch_rx_queues(pf, on);
5770 PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5773 ret = i40e_dev_switch_tx_queues(pf, on);
5775 /* Stop tx queues before stopping rx queues */
5776 ret = i40e_dev_switch_tx_queues(pf, on);
5778 PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5781 ret = i40e_dev_switch_rx_queues(pf, on);
5787 /* Initialize VSI for TX */
5789 i40e_dev_tx_init(struct i40e_pf *pf)
5791 struct rte_eth_dev_data *data = pf->dev_data;
5793 uint32_t ret = I40E_SUCCESS;
5794 struct i40e_tx_queue *txq;
5796 for (i = 0; i < data->nb_tx_queues; i++) {
5797 txq = data->tx_queues[i];
5798 if (!txq || !txq->q_set)
5800 ret = i40e_tx_queue_init(txq);
5801 if (ret != I40E_SUCCESS)
5804 if (ret == I40E_SUCCESS)
5805 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5811 /* Initialize VSI for RX */
5813 i40e_dev_rx_init(struct i40e_pf *pf)
5815 struct rte_eth_dev_data *data = pf->dev_data;
5816 int ret = I40E_SUCCESS;
5818 struct i40e_rx_queue *rxq;
5820 i40e_pf_config_mq_rx(pf);
5821 for (i = 0; i < data->nb_rx_queues; i++) {
5822 rxq = data->rx_queues[i];
5823 if (!rxq || !rxq->q_set)
5826 ret = i40e_rx_queue_init(rxq);
5827 if (ret != I40E_SUCCESS) {
5829 "Failed to do RX queue initialization");
5833 if (ret == I40E_SUCCESS)
5834 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5841 i40e_dev_rxtx_init(struct i40e_pf *pf)
5845 err = i40e_dev_tx_init(pf);
5847 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
5850 err = i40e_dev_rx_init(pf);
5852 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
5860 i40e_vmdq_setup(struct rte_eth_dev *dev)
5862 struct rte_eth_conf *conf = &dev->data->dev_conf;
5863 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5864 int i, err, conf_vsis, j, loop;
5865 struct i40e_vsi *vsi;
5866 struct i40e_vmdq_info *vmdq_info;
5867 struct rte_eth_vmdq_rx_conf *vmdq_conf;
5868 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5871 * Disable interrupt to avoid message from VF. Furthermore, it will
5872 * avoid race condition in VSI creation/destroy.
5874 i40e_pf_disable_irq0(hw);
5876 if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
5877 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
5881 conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
5882 if (conf_vsis > pf->max_nb_vmdq_vsi) {
5883 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
5884 conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
5885 pf->max_nb_vmdq_vsi);
5889 if (pf->vmdq != NULL) {
5890 PMD_INIT_LOG(INFO, "VMDQ already configured");
5894 pf->vmdq = rte_zmalloc("vmdq_info_struct",
5895 sizeof(*vmdq_info) * conf_vsis, 0);
5897 if (pf->vmdq == NULL) {
5898 PMD_INIT_LOG(ERR, "Failed to allocate memory");
5902 vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
5904 /* Create VMDQ VSI */
5905 for (i = 0; i < conf_vsis; i++) {
5906 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
5907 vmdq_conf->enable_loop_back);
5909 PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
5913 vmdq_info = &pf->vmdq[i];
5915 vmdq_info->vsi = vsi;
5917 pf->nb_cfg_vmdq_vsi = conf_vsis;
5919 /* Configure Vlan */
5920 loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
5921 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
5922 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
5923 if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
5924 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
5925 vmdq_conf->pool_map[i].vlan_id, j);
5927 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
5928 vmdq_conf->pool_map[i].vlan_id);
5930 PMD_INIT_LOG(ERR, "Failed to add vlan");
5938 i40e_pf_enable_irq0(hw);
5943 for (i = 0; i < conf_vsis; i++)
5944 if (pf->vmdq[i].vsi == NULL)
5947 i40e_vsi_release(pf->vmdq[i].vsi);
5951 i40e_pf_enable_irq0(hw);
5956 i40e_stat_update_32(struct i40e_hw *hw,
5964 new_data = (uint64_t)I40E_READ_REG(hw, reg);
5968 if (new_data >= *offset)
5969 *stat = (uint64_t)(new_data - *offset);
5971 *stat = (uint64_t)((new_data +
5972 ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
5976 i40e_stat_update_48(struct i40e_hw *hw,
5985 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
5986 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
5987 I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
5992 if (new_data >= *offset)
5993 *stat = new_data - *offset;
5995 *stat = (uint64_t)((new_data +
5996 ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
5998 *stat &= I40E_48_BIT_MASK;
6003 i40e_pf_disable_irq0(struct i40e_hw *hw)
6005 /* Disable all interrupt types */
6006 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
6007 I40E_WRITE_FLUSH(hw);
6012 i40e_pf_enable_irq0(struct i40e_hw *hw)
6014 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6015 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6016 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6017 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6018 I40E_WRITE_FLUSH(hw);
6022 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6024 /* read pending request and disable first */
6025 i40e_pf_disable_irq0(hw);
6026 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6027 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6028 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6031 /* Link no queues with irq0 */
6032 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6033 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6037 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6039 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6040 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6043 uint32_t index, offset, val;
6048 * Try to find which VF trigger a reset, use absolute VF id to access
6049 * since the reg is global register.
6051 for (i = 0; i < pf->vf_num; i++) {
6052 abs_vf_id = hw->func_caps.vf_base_id + i;
6053 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6054 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6055 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6056 /* VFR event occurred */
6057 if (val & (0x1 << offset)) {
6060 /* Clear the event first */
6061 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6063 PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6065 * Only notify a VF reset event occurred,
6066 * don't trigger another SW reset
6068 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6069 if (ret != I40E_SUCCESS)
6070 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6076 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6078 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6081 for (i = 0; i < pf->vf_num; i++)
6082 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6086 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6088 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6089 struct i40e_arq_event_info info;
6090 uint16_t pending, opcode;
6093 info.buf_len = I40E_AQ_BUF_SZ;
6094 info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6095 if (!info.msg_buf) {
6096 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6102 ret = i40e_clean_arq_element(hw, &info, &pending);
6104 if (ret != I40E_SUCCESS) {
6106 "Failed to read msg from AdminQ, aq_err: %u",
6107 hw->aq.asq_last_status);
6110 opcode = rte_le_to_cpu_16(info.desc.opcode);
6113 case i40e_aqc_opc_send_msg_to_pf:
6114 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6115 i40e_pf_host_handle_vf_msg(dev,
6116 rte_le_to_cpu_16(info.desc.retval),
6117 rte_le_to_cpu_32(info.desc.cookie_high),
6118 rte_le_to_cpu_32(info.desc.cookie_low),
6122 case i40e_aqc_opc_get_link_status:
6123 ret = i40e_dev_link_update(dev, 0);
6125 _rte_eth_dev_callback_process(dev,
6126 RTE_ETH_EVENT_INTR_LSC, NULL);
6129 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6134 rte_free(info.msg_buf);
6138 * Interrupt handler triggered by NIC for handling
6139 * specific interrupt.
6142 * Pointer to interrupt handle.
6144 * The address of parameter (struct rte_eth_dev *) regsitered before.
6150 i40e_dev_interrupt_handler(void *param)
6152 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6153 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6156 /* Disable interrupt */
6157 i40e_pf_disable_irq0(hw);
6159 /* read out interrupt causes */
6160 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6162 /* No interrupt event indicated */
6163 if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6164 PMD_DRV_LOG(INFO, "No interrupt event");
6167 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6168 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6169 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6170 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6171 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6172 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6173 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6174 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6175 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6176 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6177 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6178 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6179 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6180 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6182 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6183 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6184 i40e_dev_handle_vfr_event(dev);
6186 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6187 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6188 i40e_dev_handle_aq_msg(dev);
6192 /* Enable interrupt */
6193 i40e_pf_enable_irq0(hw);
6194 rte_intr_enable(dev->intr_handle);
6198 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6199 struct i40e_macvlan_filter *filter,
6202 int ele_num, ele_buff_size;
6203 int num, actual_num, i;
6205 int ret = I40E_SUCCESS;
6206 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6207 struct i40e_aqc_add_macvlan_element_data *req_list;
6209 if (filter == NULL || total == 0)
6210 return I40E_ERR_PARAM;
6211 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6212 ele_buff_size = hw->aq.asq_buf_size;
6214 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6215 if (req_list == NULL) {
6216 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6217 return I40E_ERR_NO_MEMORY;
6222 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6223 memset(req_list, 0, ele_buff_size);
6225 for (i = 0; i < actual_num; i++) {
6226 rte_memcpy(req_list[i].mac_addr,
6227 &filter[num + i].macaddr, ETH_ADDR_LEN);
6228 req_list[i].vlan_tag =
6229 rte_cpu_to_le_16(filter[num + i].vlan_id);
6231 switch (filter[num + i].filter_type) {
6232 case RTE_MAC_PERFECT_MATCH:
6233 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6234 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6236 case RTE_MACVLAN_PERFECT_MATCH:
6237 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6239 case RTE_MAC_HASH_MATCH:
6240 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6241 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6243 case RTE_MACVLAN_HASH_MATCH:
6244 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6247 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6248 ret = I40E_ERR_PARAM;
6252 req_list[i].queue_number = 0;
6254 req_list[i].flags = rte_cpu_to_le_16(flags);
6257 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6259 if (ret != I40E_SUCCESS) {
6260 PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6264 } while (num < total);
6272 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6273 struct i40e_macvlan_filter *filter,
6276 int ele_num, ele_buff_size;
6277 int num, actual_num, i;
6279 int ret = I40E_SUCCESS;
6280 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6281 struct i40e_aqc_remove_macvlan_element_data *req_list;
6283 if (filter == NULL || total == 0)
6284 return I40E_ERR_PARAM;
6286 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6287 ele_buff_size = hw->aq.asq_buf_size;
6289 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6290 if (req_list == NULL) {
6291 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6292 return I40E_ERR_NO_MEMORY;
6297 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6298 memset(req_list, 0, ele_buff_size);
6300 for (i = 0; i < actual_num; i++) {
6301 rte_memcpy(req_list[i].mac_addr,
6302 &filter[num + i].macaddr, ETH_ADDR_LEN);
6303 req_list[i].vlan_tag =
6304 rte_cpu_to_le_16(filter[num + i].vlan_id);
6306 switch (filter[num + i].filter_type) {
6307 case RTE_MAC_PERFECT_MATCH:
6308 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6309 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6311 case RTE_MACVLAN_PERFECT_MATCH:
6312 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6314 case RTE_MAC_HASH_MATCH:
6315 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6316 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6318 case RTE_MACVLAN_HASH_MATCH:
6319 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6322 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6323 ret = I40E_ERR_PARAM;
6326 req_list[i].flags = rte_cpu_to_le_16(flags);
6329 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6331 if (ret != I40E_SUCCESS) {
6332 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6336 } while (num < total);
6343 /* Find out specific MAC filter */
6344 static struct i40e_mac_filter *
6345 i40e_find_mac_filter(struct i40e_vsi *vsi,
6346 struct ether_addr *macaddr)
6348 struct i40e_mac_filter *f;
6350 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6351 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6359 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6362 uint32_t vid_idx, vid_bit;
6364 if (vlan_id > ETH_VLAN_ID_MAX)
6367 vid_idx = I40E_VFTA_IDX(vlan_id);
6368 vid_bit = I40E_VFTA_BIT(vlan_id);
6370 if (vsi->vfta[vid_idx] & vid_bit)
6377 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6378 uint16_t vlan_id, bool on)
6380 uint32_t vid_idx, vid_bit;
6382 vid_idx = I40E_VFTA_IDX(vlan_id);
6383 vid_bit = I40E_VFTA_BIT(vlan_id);
6386 vsi->vfta[vid_idx] |= vid_bit;
6388 vsi->vfta[vid_idx] &= ~vid_bit;
6392 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6393 uint16_t vlan_id, bool on)
6395 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6396 struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6399 if (vlan_id > ETH_VLAN_ID_MAX)
6402 i40e_store_vlan_filter(vsi, vlan_id, on);
6404 if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6407 vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6410 ret = i40e_aq_add_vlan(hw, vsi->seid,
6411 &vlan_data, 1, NULL);
6412 if (ret != I40E_SUCCESS)
6413 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6415 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6416 &vlan_data, 1, NULL);
6417 if (ret != I40E_SUCCESS)
6419 "Failed to remove vlan filter");
6424 * Find all vlan options for specific mac addr,
6425 * return with actual vlan found.
6428 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6429 struct i40e_macvlan_filter *mv_f,
6430 int num, struct ether_addr *addr)
6436 * Not to use i40e_find_vlan_filter to decrease the loop time,
6437 * although the code looks complex.
6439 if (num < vsi->vlan_num)
6440 return I40E_ERR_PARAM;
6443 for (j = 0; j < I40E_VFTA_SIZE; j++) {
6445 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6446 if (vsi->vfta[j] & (1 << k)) {
6449 "vlan number doesn't match");
6450 return I40E_ERR_PARAM;
6452 rte_memcpy(&mv_f[i].macaddr,
6453 addr, ETH_ADDR_LEN);
6455 j * I40E_UINT32_BIT_SIZE + k;
6461 return I40E_SUCCESS;
6465 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6466 struct i40e_macvlan_filter *mv_f,
6471 struct i40e_mac_filter *f;
6473 if (num < vsi->mac_num)
6474 return I40E_ERR_PARAM;
6476 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6478 PMD_DRV_LOG(ERR, "buffer number not match");
6479 return I40E_ERR_PARAM;
6481 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6483 mv_f[i].vlan_id = vlan;
6484 mv_f[i].filter_type = f->mac_info.filter_type;
6488 return I40E_SUCCESS;
6492 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6495 struct i40e_mac_filter *f;
6496 struct i40e_macvlan_filter *mv_f;
6497 int ret = I40E_SUCCESS;
6499 if (vsi == NULL || vsi->mac_num == 0)
6500 return I40E_ERR_PARAM;
6502 /* Case that no vlan is set */
6503 if (vsi->vlan_num == 0)
6506 num = vsi->mac_num * vsi->vlan_num;
6508 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6510 PMD_DRV_LOG(ERR, "failed to allocate memory");
6511 return I40E_ERR_NO_MEMORY;
6515 if (vsi->vlan_num == 0) {
6516 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6517 rte_memcpy(&mv_f[i].macaddr,
6518 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6519 mv_f[i].filter_type = f->mac_info.filter_type;
6520 mv_f[i].vlan_id = 0;
6524 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6525 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6526 vsi->vlan_num, &f->mac_info.mac_addr);
6527 if (ret != I40E_SUCCESS)
6529 for (j = i; j < i + vsi->vlan_num; j++)
6530 mv_f[j].filter_type = f->mac_info.filter_type;
6535 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6543 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6545 struct i40e_macvlan_filter *mv_f;
6547 int ret = I40E_SUCCESS;
6549 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6550 return I40E_ERR_PARAM;
6552 /* If it's already set, just return */
6553 if (i40e_find_vlan_filter(vsi,vlan))
6554 return I40E_SUCCESS;
6556 mac_num = vsi->mac_num;
6559 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6560 return I40E_ERR_PARAM;
6563 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6566 PMD_DRV_LOG(ERR, "failed to allocate memory");
6567 return I40E_ERR_NO_MEMORY;
6570 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6572 if (ret != I40E_SUCCESS)
6575 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6577 if (ret != I40E_SUCCESS)
6580 i40e_set_vlan_filter(vsi, vlan, 1);
6590 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6592 struct i40e_macvlan_filter *mv_f;
6594 int ret = I40E_SUCCESS;
6597 * Vlan 0 is the generic filter for untagged packets
6598 * and can't be removed.
6600 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6601 return I40E_ERR_PARAM;
6603 /* If can't find it, just return */
6604 if (!i40e_find_vlan_filter(vsi, vlan))
6605 return I40E_ERR_PARAM;
6607 mac_num = vsi->mac_num;
6610 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6611 return I40E_ERR_PARAM;
6614 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6617 PMD_DRV_LOG(ERR, "failed to allocate memory");
6618 return I40E_ERR_NO_MEMORY;
6621 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6623 if (ret != I40E_SUCCESS)
6626 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6628 if (ret != I40E_SUCCESS)
6631 /* This is last vlan to remove, replace all mac filter with vlan 0 */
6632 if (vsi->vlan_num == 1) {
6633 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6634 if (ret != I40E_SUCCESS)
6637 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6638 if (ret != I40E_SUCCESS)
6642 i40e_set_vlan_filter(vsi, vlan, 0);
6652 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6654 struct i40e_mac_filter *f;
6655 struct i40e_macvlan_filter *mv_f;
6656 int i, vlan_num = 0;
6657 int ret = I40E_SUCCESS;
6659 /* If it's add and we've config it, return */
6660 f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6662 return I40E_SUCCESS;
6663 if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6664 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6667 * If vlan_num is 0, that's the first time to add mac,
6668 * set mask for vlan_id 0.
6670 if (vsi->vlan_num == 0) {
6671 i40e_set_vlan_filter(vsi, 0, 1);
6674 vlan_num = vsi->vlan_num;
6675 } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6676 (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6679 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6681 PMD_DRV_LOG(ERR, "failed to allocate memory");
6682 return I40E_ERR_NO_MEMORY;
6685 for (i = 0; i < vlan_num; i++) {
6686 mv_f[i].filter_type = mac_filter->filter_type;
6687 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6691 if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6692 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6693 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6694 &mac_filter->mac_addr);
6695 if (ret != I40E_SUCCESS)
6699 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6700 if (ret != I40E_SUCCESS)
6703 /* Add the mac addr into mac list */
6704 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6706 PMD_DRV_LOG(ERR, "failed to allocate memory");
6707 ret = I40E_ERR_NO_MEMORY;
6710 rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6712 f->mac_info.filter_type = mac_filter->filter_type;
6713 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6724 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6726 struct i40e_mac_filter *f;
6727 struct i40e_macvlan_filter *mv_f;
6729 enum rte_mac_filter_type filter_type;
6730 int ret = I40E_SUCCESS;
6732 /* Can't find it, return an error */
6733 f = i40e_find_mac_filter(vsi, addr);
6735 return I40E_ERR_PARAM;
6737 vlan_num = vsi->vlan_num;
6738 filter_type = f->mac_info.filter_type;
6739 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6740 filter_type == RTE_MACVLAN_HASH_MATCH) {
6741 if (vlan_num == 0) {
6742 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
6743 return I40E_ERR_PARAM;
6745 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6746 filter_type == RTE_MAC_HASH_MATCH)
6749 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6751 PMD_DRV_LOG(ERR, "failed to allocate memory");
6752 return I40E_ERR_NO_MEMORY;
6755 for (i = 0; i < vlan_num; i++) {
6756 mv_f[i].filter_type = filter_type;
6757 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6760 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6761 filter_type == RTE_MACVLAN_HASH_MATCH) {
6762 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6763 if (ret != I40E_SUCCESS)
6767 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6768 if (ret != I40E_SUCCESS)
6771 /* Remove the mac addr into mac list */
6772 TAILQ_REMOVE(&vsi->mac_list, f, next);
6782 /* Configure hash enable flags for RSS */
6784 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
6792 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6793 if (flags & (1ULL << i))
6794 hena |= adapter->pctypes_tbl[i];
6800 /* Parse the hash enable flags */
6802 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
6804 uint64_t rss_hf = 0;
6810 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6811 if (flags & adapter->pctypes_tbl[i])
6812 rss_hf |= (1ULL << i);
6819 i40e_pf_disable_rss(struct i40e_pf *pf)
6821 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6823 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
6824 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
6825 I40E_WRITE_FLUSH(hw);
6829 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6831 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6832 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6833 uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
6834 I40E_VFQF_HKEY_MAX_INDEX :
6835 I40E_PFQF_HKEY_MAX_INDEX;
6838 if (!key || key_len == 0) {
6839 PMD_DRV_LOG(DEBUG, "No key to be configured");
6841 } else if (key_len != (key_idx + 1) *
6843 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
6847 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6848 struct i40e_aqc_get_set_rss_key_data *key_dw =
6849 (struct i40e_aqc_get_set_rss_key_data *)key;
6851 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
6853 PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
6855 uint32_t *hash_key = (uint32_t *)key;
6858 if (vsi->type == I40E_VSI_SRIOV) {
6859 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
6862 I40E_VFQF_HKEY1(i, vsi->user_param),
6866 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6867 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
6870 I40E_WRITE_FLUSH(hw);
6877 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
6879 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6880 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6884 if (!key || !key_len)
6887 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6888 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
6889 (struct i40e_aqc_get_set_rss_key_data *)key);
6891 PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
6895 uint32_t *key_dw = (uint32_t *)key;
6898 if (vsi->type == I40E_VSI_SRIOV) {
6899 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
6900 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
6901 key_dw[i] = i40e_read_rx_ctl(hw, reg);
6903 *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
6906 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
6907 reg = I40E_PFQF_HKEY(i);
6908 key_dw[i] = i40e_read_rx_ctl(hw, reg);
6910 *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6918 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
6920 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6924 ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
6925 rss_conf->rss_key_len);
6929 hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
6930 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6931 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6932 I40E_WRITE_FLUSH(hw);
6938 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
6939 struct rte_eth_rss_conf *rss_conf)
6941 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6942 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6943 uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
6946 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6947 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6949 if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
6950 if (rss_hf != 0) /* Enable RSS */
6952 return 0; /* Nothing to do */
6955 if (rss_hf == 0) /* Disable RSS */
6958 return i40e_hw_rss_hash_set(pf, rss_conf);
6962 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
6963 struct rte_eth_rss_conf *rss_conf)
6965 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6966 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6969 i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
6970 &rss_conf->rss_key_len);
6972 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6973 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6974 rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
6980 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
6982 switch (filter_type) {
6983 case RTE_TUNNEL_FILTER_IMAC_IVLAN:
6984 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
6986 case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
6987 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
6989 case RTE_TUNNEL_FILTER_IMAC_TENID:
6990 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
6992 case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
6993 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
6995 case ETH_TUNNEL_FILTER_IMAC:
6996 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
6998 case ETH_TUNNEL_FILTER_OIP:
6999 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7001 case ETH_TUNNEL_FILTER_IIP:
7002 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7005 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7012 /* Convert tunnel filter structure */
7014 i40e_tunnel_filter_convert(
7015 struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
7016 struct i40e_tunnel_filter *tunnel_filter)
7018 ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
7019 (struct ether_addr *)&tunnel_filter->input.outer_mac);
7020 ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
7021 (struct ether_addr *)&tunnel_filter->input.inner_mac);
7022 tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7023 if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7024 I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7025 I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7026 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7028 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7029 tunnel_filter->input.flags = cld_filter->element.flags;
7030 tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7031 tunnel_filter->queue = cld_filter->element.queue_number;
7032 rte_memcpy(tunnel_filter->input.general_fields,
7033 cld_filter->general_fields,
7034 sizeof(cld_filter->general_fields));
7039 /* Check if there exists the tunnel filter */
7040 struct i40e_tunnel_filter *
7041 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7042 const struct i40e_tunnel_filter_input *input)
7046 ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7050 return tunnel_rule->hash_map[ret];
7053 /* Add a tunnel filter into the SW list */
7055 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7056 struct i40e_tunnel_filter *tunnel_filter)
7058 struct i40e_tunnel_rule *rule = &pf->tunnel;
7061 ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7064 "Failed to insert tunnel filter to hash table %d!",
7068 rule->hash_map[ret] = tunnel_filter;
7070 TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7075 /* Delete a tunnel filter from the SW list */
7077 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7078 struct i40e_tunnel_filter_input *input)
7080 struct i40e_tunnel_rule *rule = &pf->tunnel;
7081 struct i40e_tunnel_filter *tunnel_filter;
7084 ret = rte_hash_del_key(rule->hash_table, input);
7087 "Failed to delete tunnel filter to hash table %d!",
7091 tunnel_filter = rule->hash_map[ret];
7092 rule->hash_map[ret] = NULL;
7094 TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7095 rte_free(tunnel_filter);
7101 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7102 struct rte_eth_tunnel_filter_conf *tunnel_filter,
7106 uint32_t ipv4_addr, ipv4_addr_le;
7107 uint8_t i, tun_type = 0;
7108 /* internal varialbe to convert ipv6 byte order */
7109 uint32_t convert_ipv6[4];
7111 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7112 struct i40e_vsi *vsi = pf->main_vsi;
7113 struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7114 struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7115 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7116 struct i40e_tunnel_filter *tunnel, *node;
7117 struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7119 cld_filter = rte_zmalloc("tunnel_filter",
7120 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7123 if (NULL == cld_filter) {
7124 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7127 pfilter = cld_filter;
7129 ether_addr_copy(&tunnel_filter->outer_mac,
7130 (struct ether_addr *)&pfilter->element.outer_mac);
7131 ether_addr_copy(&tunnel_filter->inner_mac,
7132 (struct ether_addr *)&pfilter->element.inner_mac);
7134 pfilter->element.inner_vlan =
7135 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7136 if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7137 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7138 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7139 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7140 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7142 sizeof(pfilter->element.ipaddr.v4.data));
7144 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7145 for (i = 0; i < 4; i++) {
7147 rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7149 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7151 sizeof(pfilter->element.ipaddr.v6.data));
7154 /* check tunneled type */
7155 switch (tunnel_filter->tunnel_type) {
7156 case RTE_TUNNEL_TYPE_VXLAN:
7157 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7159 case RTE_TUNNEL_TYPE_NVGRE:
7160 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7162 case RTE_TUNNEL_TYPE_IP_IN_GRE:
7163 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7166 /* Other tunnel types is not supported. */
7167 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7168 rte_free(cld_filter);
7172 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7173 &pfilter->element.flags);
7175 rte_free(cld_filter);
7179 pfilter->element.flags |= rte_cpu_to_le_16(
7180 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7181 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7182 pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7183 pfilter->element.queue_number =
7184 rte_cpu_to_le_16(tunnel_filter->queue_id);
7186 /* Check if there is the filter in SW list */
7187 memset(&check_filter, 0, sizeof(check_filter));
7188 i40e_tunnel_filter_convert(cld_filter, &check_filter);
7189 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7191 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7192 rte_free(cld_filter);
7196 if (!add && !node) {
7197 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7198 rte_free(cld_filter);
7203 ret = i40e_aq_add_cloud_filters(hw,
7204 vsi->seid, &cld_filter->element, 1);
7206 PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7207 rte_free(cld_filter);
7210 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7211 if (tunnel == NULL) {
7212 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7213 rte_free(cld_filter);
7217 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7218 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7222 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7223 &cld_filter->element, 1);
7225 PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7226 rte_free(cld_filter);
7229 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7232 rte_free(cld_filter);
7236 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7237 #define I40E_TR_VXLAN_GRE_KEY_MASK 0x4
7238 #define I40E_TR_GENEVE_KEY_MASK 0x8
7239 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40
7240 #define I40E_TR_GRE_KEY_MASK 0x400
7241 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800
7242 #define I40E_TR_GRE_NO_KEY_MASK 0x8000
7245 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7247 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
7248 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
7249 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7250 enum i40e_status_code status = I40E_SUCCESS;
7252 memset(&filter_replace, 0,
7253 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7254 memset(&filter_replace_buf, 0,
7255 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7257 /* create L1 filter */
7258 filter_replace.old_filter_type =
7259 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7260 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7261 filter_replace.tr_bit = 0;
7263 /* Prepare the buffer, 3 entries */
7264 filter_replace_buf.data[0] =
7265 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7266 filter_replace_buf.data[0] |=
7267 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7268 filter_replace_buf.data[2] = 0xFF;
7269 filter_replace_buf.data[3] = 0xFF;
7270 filter_replace_buf.data[4] =
7271 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7272 filter_replace_buf.data[4] |=
7273 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7274 filter_replace_buf.data[7] = 0xF0;
7275 filter_replace_buf.data[8]
7276 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7277 filter_replace_buf.data[8] |=
7278 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7279 filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7280 I40E_TR_GENEVE_KEY_MASK |
7281 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7282 filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7283 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7284 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7286 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7287 &filter_replace_buf);
7292 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7294 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
7295 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
7296 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7297 enum i40e_status_code status = I40E_SUCCESS;
7300 memset(&filter_replace, 0,
7301 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7302 memset(&filter_replace_buf, 0,
7303 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7304 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7305 I40E_AQC_MIRROR_CLOUD_FILTER;
7306 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7307 filter_replace.new_filter_type =
7308 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7309 /* Prepare the buffer, 2 entries */
7310 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7311 filter_replace_buf.data[0] |=
7312 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7313 filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7314 filter_replace_buf.data[4] |=
7315 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7316 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7317 &filter_replace_buf);
7322 memset(&filter_replace, 0,
7323 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7324 memset(&filter_replace_buf, 0,
7325 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7327 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7328 I40E_AQC_MIRROR_CLOUD_FILTER;
7329 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7330 filter_replace.new_filter_type =
7331 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7332 /* Prepare the buffer, 2 entries */
7333 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7334 filter_replace_buf.data[0] |=
7335 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7336 filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7337 filter_replace_buf.data[4] |=
7338 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7340 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7341 &filter_replace_buf);
7345 static enum i40e_status_code
7346 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7348 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
7349 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
7350 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7351 enum i40e_status_code status = I40E_SUCCESS;
7354 memset(&filter_replace, 0,
7355 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7356 memset(&filter_replace_buf, 0,
7357 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7358 /* create L1 filter */
7359 filter_replace.old_filter_type =
7360 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7361 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7362 filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7363 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7364 /* Prepare the buffer, 2 entries */
7365 filter_replace_buf.data[0] =
7366 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7367 filter_replace_buf.data[0] |=
7368 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7369 filter_replace_buf.data[2] = 0xFF;
7370 filter_replace_buf.data[3] = 0xFF;
7371 filter_replace_buf.data[4] =
7372 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7373 filter_replace_buf.data[4] |=
7374 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7375 filter_replace_buf.data[6] = 0xFF;
7376 filter_replace_buf.data[7] = 0xFF;
7377 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7378 &filter_replace_buf);
7383 memset(&filter_replace, 0,
7384 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7385 memset(&filter_replace_buf, 0,
7386 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7387 /* create L1 filter */
7388 filter_replace.old_filter_type =
7389 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7390 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7391 filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7392 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7393 /* Prepare the buffer, 2 entries */
7394 filter_replace_buf.data[0] =
7395 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7396 filter_replace_buf.data[0] |=
7397 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7398 filter_replace_buf.data[2] = 0xFF;
7399 filter_replace_buf.data[3] = 0xFF;
7400 filter_replace_buf.data[4] =
7401 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7402 filter_replace_buf.data[4] |=
7403 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7404 filter_replace_buf.data[6] = 0xFF;
7405 filter_replace_buf.data[7] = 0xFF;
7407 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7408 &filter_replace_buf);
7413 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7415 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
7416 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
7417 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7418 enum i40e_status_code status = I40E_SUCCESS;
7421 memset(&filter_replace, 0,
7422 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7423 memset(&filter_replace_buf, 0,
7424 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7425 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7426 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7427 filter_replace.new_filter_type =
7428 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7429 /* Prepare the buffer, 2 entries */
7430 filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7431 filter_replace_buf.data[0] |=
7432 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7433 filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7434 filter_replace_buf.data[4] |=
7435 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7436 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7437 &filter_replace_buf);
7442 memset(&filter_replace, 0,
7443 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7444 memset(&filter_replace_buf, 0,
7445 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7446 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7447 filter_replace.old_filter_type =
7448 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7449 filter_replace.new_filter_type =
7450 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7451 /* Prepare the buffer, 2 entries */
7452 filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7453 filter_replace_buf.data[0] |=
7454 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7455 filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7456 filter_replace_buf.data[4] |=
7457 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7459 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7460 &filter_replace_buf);
7465 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7466 struct i40e_tunnel_filter_conf *tunnel_filter,
7470 uint32_t ipv4_addr, ipv4_addr_le;
7471 uint8_t i, tun_type = 0;
7472 /* internal variable to convert ipv6 byte order */
7473 uint32_t convert_ipv6[4];
7475 struct i40e_pf_vf *vf = NULL;
7476 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7477 struct i40e_vsi *vsi;
7478 struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7479 struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7480 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7481 struct i40e_tunnel_filter *tunnel, *node;
7482 struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7484 bool big_buffer = 0;
7486 cld_filter = rte_zmalloc("tunnel_filter",
7487 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7490 if (cld_filter == NULL) {
7491 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7494 pfilter = cld_filter;
7496 ether_addr_copy(&tunnel_filter->outer_mac,
7497 (struct ether_addr *)&pfilter->element.outer_mac);
7498 ether_addr_copy(&tunnel_filter->inner_mac,
7499 (struct ether_addr *)&pfilter->element.inner_mac);
7501 pfilter->element.inner_vlan =
7502 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7503 if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7504 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7505 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7506 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7507 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7509 sizeof(pfilter->element.ipaddr.v4.data));
7511 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7512 for (i = 0; i < 4; i++) {
7514 rte_cpu_to_le_32(rte_be_to_cpu_32(
7515 tunnel_filter->ip_addr.ipv6_addr[i]));
7517 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7519 sizeof(pfilter->element.ipaddr.v6.data));
7522 /* check tunneled type */
7523 switch (tunnel_filter->tunnel_type) {
7524 case I40E_TUNNEL_TYPE_VXLAN:
7525 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7527 case I40E_TUNNEL_TYPE_NVGRE:
7528 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7530 case I40E_TUNNEL_TYPE_IP_IN_GRE:
7531 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7533 case I40E_TUNNEL_TYPE_MPLSoUDP:
7534 if (!pf->mpls_replace_flag) {
7535 i40e_replace_mpls_l1_filter(pf);
7536 i40e_replace_mpls_cloud_filter(pf);
7537 pf->mpls_replace_flag = 1;
7539 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7540 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7542 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7543 (teid_le & 0xF) << 12;
7544 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7547 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7549 case I40E_TUNNEL_TYPE_MPLSoGRE:
7550 if (!pf->mpls_replace_flag) {
7551 i40e_replace_mpls_l1_filter(pf);
7552 i40e_replace_mpls_cloud_filter(pf);
7553 pf->mpls_replace_flag = 1;
7555 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7556 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7558 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7559 (teid_le & 0xF) << 12;
7560 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7563 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7565 case I40E_TUNNEL_TYPE_GTPC:
7566 if (!pf->gtp_replace_flag) {
7567 i40e_replace_gtp_l1_filter(pf);
7568 i40e_replace_gtp_cloud_filter(pf);
7569 pf->gtp_replace_flag = 1;
7571 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7572 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
7573 (teid_le >> 16) & 0xFFFF;
7574 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
7576 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
7580 case I40E_TUNNEL_TYPE_GTPU:
7581 if (!pf->gtp_replace_flag) {
7582 i40e_replace_gtp_l1_filter(pf);
7583 i40e_replace_gtp_cloud_filter(pf);
7584 pf->gtp_replace_flag = 1;
7586 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7587 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
7588 (teid_le >> 16) & 0xFFFF;
7589 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
7591 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
7595 case I40E_TUNNEL_TYPE_QINQ:
7596 if (!pf->qinq_replace_flag) {
7597 ret = i40e_cloud_filter_qinq_create(pf);
7600 "QinQ tunnel filter already created.");
7601 pf->qinq_replace_flag = 1;
7603 /* Add in the General fields the values of
7604 * the Outer and Inner VLAN
7605 * Big Buffer should be set, see changes in
7606 * i40e_aq_add_cloud_filters
7608 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7609 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7613 /* Other tunnel types is not supported. */
7614 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7615 rte_free(cld_filter);
7619 if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7620 pfilter->element.flags =
7621 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7622 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7623 pfilter->element.flags =
7624 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7625 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
7626 pfilter->element.flags =
7627 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7628 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
7629 pfilter->element.flags =
7630 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7631 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7632 pfilter->element.flags |=
7633 I40E_AQC_ADD_CLOUD_FILTER_0X10;
7635 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7636 &pfilter->element.flags);
7638 rte_free(cld_filter);
7643 pfilter->element.flags |= rte_cpu_to_le_16(
7644 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7645 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7646 pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7647 pfilter->element.queue_number =
7648 rte_cpu_to_le_16(tunnel_filter->queue_id);
7650 if (!tunnel_filter->is_to_vf)
7653 if (tunnel_filter->vf_id >= pf->vf_num) {
7654 PMD_DRV_LOG(ERR, "Invalid argument.");
7655 rte_free(cld_filter);
7658 vf = &pf->vfs[tunnel_filter->vf_id];
7662 /* Check if there is the filter in SW list */
7663 memset(&check_filter, 0, sizeof(check_filter));
7664 i40e_tunnel_filter_convert(cld_filter, &check_filter);
7665 check_filter.is_to_vf = tunnel_filter->is_to_vf;
7666 check_filter.vf_id = tunnel_filter->vf_id;
7667 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7669 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7670 rte_free(cld_filter);
7674 if (!add && !node) {
7675 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7676 rte_free(cld_filter);
7682 ret = i40e_aq_add_cloud_filters_big_buffer(hw,
7683 vsi->seid, cld_filter, 1);
7685 ret = i40e_aq_add_cloud_filters(hw,
7686 vsi->seid, &cld_filter->element, 1);
7688 PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7689 rte_free(cld_filter);
7692 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7693 if (tunnel == NULL) {
7694 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7695 rte_free(cld_filter);
7699 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7700 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7705 ret = i40e_aq_remove_cloud_filters_big_buffer(
7706 hw, vsi->seid, cld_filter, 1);
7708 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7709 &cld_filter->element, 1);
7711 PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7712 rte_free(cld_filter);
7715 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7718 rte_free(cld_filter);
7723 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
7727 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7728 if (pf->vxlan_ports[i] == port)
7736 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
7740 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7742 idx = i40e_get_vxlan_port_idx(pf, port);
7744 /* Check if port already exists */
7746 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
7750 /* Now check if there is space to add the new port */
7751 idx = i40e_get_vxlan_port_idx(pf, 0);
7754 "Maximum number of UDP ports reached, not adding port %d",
7759 ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
7762 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
7766 PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
7769 /* New port: add it and mark its index in the bitmap */
7770 pf->vxlan_ports[idx] = port;
7771 pf->vxlan_bitmap |= (1 << idx);
7773 if (!(pf->flags & I40E_FLAG_VXLAN))
7774 pf->flags |= I40E_FLAG_VXLAN;
7780 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
7783 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7785 if (!(pf->flags & I40E_FLAG_VXLAN)) {
7786 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
7790 idx = i40e_get_vxlan_port_idx(pf, port);
7793 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
7797 if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
7798 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
7802 PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
7805 pf->vxlan_ports[idx] = 0;
7806 pf->vxlan_bitmap &= ~(1 << idx);
7808 if (!pf->vxlan_bitmap)
7809 pf->flags &= ~I40E_FLAG_VXLAN;
7814 /* Add UDP tunneling port */
7816 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7817 struct rte_eth_udp_tunnel *udp_tunnel)
7820 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7822 if (udp_tunnel == NULL)
7825 switch (udp_tunnel->prot_type) {
7826 case RTE_TUNNEL_TYPE_VXLAN:
7827 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
7830 case RTE_TUNNEL_TYPE_GENEVE:
7831 case RTE_TUNNEL_TYPE_TEREDO:
7832 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7837 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7845 /* Remove UDP tunneling port */
7847 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
7848 struct rte_eth_udp_tunnel *udp_tunnel)
7851 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7853 if (udp_tunnel == NULL)
7856 switch (udp_tunnel->prot_type) {
7857 case RTE_TUNNEL_TYPE_VXLAN:
7858 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
7860 case RTE_TUNNEL_TYPE_GENEVE:
7861 case RTE_TUNNEL_TYPE_TEREDO:
7862 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7866 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7874 /* Calculate the maximum number of contiguous PF queues that are configured */
7876 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
7878 struct rte_eth_dev_data *data = pf->dev_data;
7880 struct i40e_rx_queue *rxq;
7883 for (i = 0; i < pf->lan_nb_qps; i++) {
7884 rxq = data->rx_queues[i];
7885 if (rxq && rxq->q_set)
7896 i40e_pf_config_rss(struct i40e_pf *pf)
7898 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7899 struct rte_eth_rss_conf rss_conf;
7900 uint32_t i, lut = 0;
7904 * If both VMDQ and RSS enabled, not all of PF queues are configured.
7905 * It's necessary to calculate the actual PF queues that are configured.
7907 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
7908 num = i40e_pf_calc_configured_queues_num(pf);
7910 num = pf->dev_data->nb_rx_queues;
7912 num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
7913 PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
7917 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
7921 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
7924 lut = (lut << 8) | (j & ((0x1 <<
7925 hw->func_caps.rss_table_entry_width) - 1));
7927 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
7930 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
7931 if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
7932 i40e_pf_disable_rss(pf);
7935 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
7936 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
7937 /* Random default keys */
7938 static uint32_t rss_key_default[] = {0x6b793944,
7939 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
7940 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
7941 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
7943 rss_conf.rss_key = (uint8_t *)rss_key_default;
7944 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7948 return i40e_hw_rss_hash_set(pf, &rss_conf);
7952 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
7953 struct rte_eth_tunnel_filter_conf *filter)
7955 if (pf == NULL || filter == NULL) {
7956 PMD_DRV_LOG(ERR, "Invalid parameter");
7960 if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
7961 PMD_DRV_LOG(ERR, "Invalid queue ID");
7965 if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
7966 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
7970 if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
7971 (is_zero_ether_addr(&filter->outer_mac))) {
7972 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
7976 if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
7977 (is_zero_ether_addr(&filter->inner_mac))) {
7978 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
7985 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
7986 #define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
7988 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
7993 val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
7994 PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
7997 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
7998 } else if (len == 4) {
7999 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8001 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8006 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
8013 PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8014 I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8020 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8027 switch (cfg->cfg_type) {
8028 case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8029 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8032 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8040 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8041 enum rte_filter_op filter_op,
8044 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8045 int ret = I40E_ERR_PARAM;
8047 switch (filter_op) {
8048 case RTE_ETH_FILTER_SET:
8049 ret = i40e_dev_global_config_set(hw,
8050 (struct rte_eth_global_cfg *)arg);
8053 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8061 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8062 enum rte_filter_op filter_op,
8065 struct rte_eth_tunnel_filter_conf *filter;
8066 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8067 int ret = I40E_SUCCESS;
8069 filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8071 if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8072 return I40E_ERR_PARAM;
8074 switch (filter_op) {
8075 case RTE_ETH_FILTER_NOP:
8076 if (!(pf->flags & I40E_FLAG_VXLAN))
8077 ret = I40E_NOT_SUPPORTED;
8079 case RTE_ETH_FILTER_ADD:
8080 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8082 case RTE_ETH_FILTER_DELETE:
8083 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8086 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8087 ret = I40E_ERR_PARAM;
8095 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8098 enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8101 if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8102 ret = i40e_pf_config_rss(pf);
8104 i40e_pf_disable_rss(pf);
8109 /* Get the symmetric hash enable configurations per port */
8111 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8113 uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8115 *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8118 /* Set the symmetric hash enable configurations per port */
8120 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8122 uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8125 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8127 "Symmetric hash has already been enabled");
8130 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8132 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8134 "Symmetric hash has already been disabled");
8137 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8139 i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8140 I40E_WRITE_FLUSH(hw);
8144 * Get global configurations of hash function type and symmetric hash enable
8145 * per flow type (pctype). Note that global configuration means it affects all
8146 * the ports on the same NIC.
8149 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8150 struct rte_eth_hash_global_conf *g_cfg)
8152 struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8156 memset(g_cfg, 0, sizeof(*g_cfg));
8157 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8158 if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8159 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8161 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8162 PMD_DRV_LOG(DEBUG, "Hash function is %s",
8163 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8166 * As i40e supports less than 64 flow types, only first 64 bits need to
8169 for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8170 g_cfg->valid_bit_mask[i] = 0ULL;
8171 g_cfg->sym_hash_enable_mask[i] = 0ULL;
8174 g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8176 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8177 if (!adapter->pctypes_tbl[i])
8179 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8180 j < I40E_FILTER_PCTYPE_MAX; j++) {
8181 if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8182 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8183 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8184 g_cfg->sym_hash_enable_mask[0] |=
8195 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8196 const struct rte_eth_hash_global_conf *g_cfg)
8199 uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8201 if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8202 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8203 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8204 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8210 * As i40e supports less than 64 flow types, only first 64 bits need to
8213 mask0 = g_cfg->valid_bit_mask[0];
8214 for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8216 /* Check if any unsupported flow type configured */
8217 if ((mask0 | i40e_mask) ^ i40e_mask)
8220 if (g_cfg->valid_bit_mask[i])
8228 PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8234 * Set global configurations of hash function type and symmetric hash enable
8235 * per flow type (pctype). Note any modifying global configuration will affect
8236 * all the ports on the same NIC.
8239 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8240 struct rte_eth_hash_global_conf *g_cfg)
8242 struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8246 uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
8248 /* Check the input parameters */
8249 ret = i40e_hash_global_config_check(adapter, g_cfg);
8254 * As i40e supports less than 64 flow types, only first 64 bits need to
8257 for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
8258 if (mask0 & (1UL << i)) {
8259 reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
8260 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8262 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8263 j < I40E_FILTER_PCTYPE_MAX; j++) {
8264 if (adapter->pctypes_tbl[i] & (1ULL << j))
8265 i40e_write_rx_ctl(hw,
8272 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8273 if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8275 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8277 "Hash function already set to Toeplitz");
8280 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8281 } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8283 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8285 "Hash function already set to Simple XOR");
8288 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8290 /* Use the default, and keep it as it is */
8293 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
8296 I40E_WRITE_FLUSH(hw);
8302 * Valid input sets for hash and flow director filters per PCTYPE
8305 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8306 enum rte_filter_type filter)
8310 static const uint64_t valid_hash_inset_table[] = {
8311 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8312 I40E_INSET_DMAC | I40E_INSET_SMAC |
8313 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8314 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8315 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8316 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8317 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8318 I40E_INSET_FLEX_PAYLOAD,
8319 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8320 I40E_INSET_DMAC | I40E_INSET_SMAC |
8321 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8322 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8323 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8324 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8325 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8326 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8327 I40E_INSET_FLEX_PAYLOAD,
8328 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8329 I40E_INSET_DMAC | I40E_INSET_SMAC |
8330 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8331 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8332 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8333 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8334 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8335 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8336 I40E_INSET_FLEX_PAYLOAD,
8337 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8338 I40E_INSET_DMAC | I40E_INSET_SMAC |
8339 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8340 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8341 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8342 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8343 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8344 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8345 I40E_INSET_FLEX_PAYLOAD,
8346 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8347 I40E_INSET_DMAC | I40E_INSET_SMAC |
8348 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8349 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8350 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8351 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8352 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8353 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8354 I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8355 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8356 I40E_INSET_DMAC | I40E_INSET_SMAC |
8357 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8358 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8359 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8360 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8361 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8362 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8363 I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8364 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8365 I40E_INSET_DMAC | I40E_INSET_SMAC |
8366 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8367 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8368 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8369 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8370 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8371 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8372 I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8373 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8374 I40E_INSET_DMAC | I40E_INSET_SMAC |
8375 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8376 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8377 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8378 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8379 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8380 I40E_INSET_FLEX_PAYLOAD,
8381 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8382 I40E_INSET_DMAC | I40E_INSET_SMAC |
8383 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8384 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8385 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8386 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8387 I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8388 I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8389 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8390 I40E_INSET_DMAC | I40E_INSET_SMAC |
8391 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8392 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8393 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8394 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8395 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8396 I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8397 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8398 I40E_INSET_DMAC | I40E_INSET_SMAC |
8399 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8400 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8401 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8402 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8403 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8404 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8405 I40E_INSET_FLEX_PAYLOAD,
8406 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8407 I40E_INSET_DMAC | I40E_INSET_SMAC |
8408 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8409 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8410 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8411 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8412 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8413 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8414 I40E_INSET_FLEX_PAYLOAD,
8415 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8416 I40E_INSET_DMAC | I40E_INSET_SMAC |
8417 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8418 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8419 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8420 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8421 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8422 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8423 I40E_INSET_FLEX_PAYLOAD,
8424 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8425 I40E_INSET_DMAC | I40E_INSET_SMAC |
8426 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8427 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8428 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8429 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8430 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8431 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8432 I40E_INSET_FLEX_PAYLOAD,
8433 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8434 I40E_INSET_DMAC | I40E_INSET_SMAC |
8435 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8436 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8437 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8438 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8439 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8440 I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8441 I40E_INSET_FLEX_PAYLOAD,
8442 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8443 I40E_INSET_DMAC | I40E_INSET_SMAC |
8444 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8445 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8446 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8447 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8448 I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8449 I40E_INSET_FLEX_PAYLOAD,
8450 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8451 I40E_INSET_DMAC | I40E_INSET_SMAC |
8452 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8453 I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8454 I40E_INSET_FLEX_PAYLOAD,
8458 * Flow director supports only fields defined in
8459 * union rte_eth_fdir_flow.
8461 static const uint64_t valid_fdir_inset_table[] = {
8462 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8463 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8464 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8465 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8466 I40E_INSET_IPV4_TTL,
8467 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8468 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8469 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8470 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8471 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8472 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8473 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8474 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8475 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8476 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8477 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8478 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8479 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8480 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8481 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8482 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8483 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8484 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8485 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8486 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8487 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8488 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8489 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8490 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8491 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8492 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8493 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8494 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8495 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8496 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8498 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8499 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8500 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8501 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8502 I40E_INSET_IPV4_TTL,
8503 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8504 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8505 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8506 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8507 I40E_INSET_IPV6_HOP_LIMIT,
8508 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8509 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8510 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8511 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8512 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8513 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8514 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8515 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8516 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8517 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8518 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8519 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8520 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8521 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8522 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8523 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8524 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8525 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8526 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8527 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8528 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8529 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8530 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8531 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8532 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8533 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8534 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8535 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8536 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8537 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8539 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8540 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8541 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8542 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8543 I40E_INSET_IPV6_HOP_LIMIT,
8544 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8545 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8546 I40E_INSET_LAST_ETHER_TYPE,
8549 if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8551 if (filter == RTE_ETH_FILTER_HASH)
8552 valid = valid_hash_inset_table[pctype];
8554 valid = valid_fdir_inset_table[pctype];
8560 * Validate if the input set is allowed for a specific PCTYPE
8563 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8564 enum rte_filter_type filter, uint64_t inset)
8568 valid = i40e_get_valid_input_set(pctype, filter);
8569 if (inset & (~valid))
8575 /* default input set fields combination per pctype */
8577 i40e_get_default_input_set(uint16_t pctype)
8579 static const uint64_t default_inset_table[] = {
8580 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8581 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8582 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8583 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8584 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8585 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8586 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8587 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8588 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8589 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8590 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8591 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8592 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8593 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8594 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8595 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8596 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8597 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8598 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8599 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8601 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8602 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8603 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8604 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8605 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8606 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8607 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8608 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8609 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8610 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8611 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8612 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8613 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8614 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8615 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8616 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8617 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8618 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8619 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8620 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8621 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8622 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8624 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8625 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8626 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8627 I40E_INSET_LAST_ETHER_TYPE,
8630 if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8633 return default_inset_table[pctype];
8637 * Parse the input set from index to logical bit masks
8640 i40e_parse_input_set(uint64_t *inset,
8641 enum i40e_filter_pctype pctype,
8642 enum rte_eth_input_set_field *field,
8648 static const struct {
8649 enum rte_eth_input_set_field field;
8651 } inset_convert_table[] = {
8652 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
8653 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
8654 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
8655 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
8656 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
8657 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
8658 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
8659 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
8660 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
8661 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
8662 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
8663 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
8664 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
8665 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
8666 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
8667 I40E_INSET_IPV6_NEXT_HDR},
8668 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
8669 I40E_INSET_IPV6_HOP_LIMIT},
8670 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
8671 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
8672 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
8673 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
8674 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
8675 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
8676 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
8677 I40E_INSET_SCTP_VT},
8678 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
8679 I40E_INSET_TUNNEL_DMAC},
8680 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
8681 I40E_INSET_VLAN_TUNNEL},
8682 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
8683 I40E_INSET_TUNNEL_ID},
8684 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
8685 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
8686 I40E_INSET_FLEX_PAYLOAD_W1},
8687 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
8688 I40E_INSET_FLEX_PAYLOAD_W2},
8689 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
8690 I40E_INSET_FLEX_PAYLOAD_W3},
8691 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
8692 I40E_INSET_FLEX_PAYLOAD_W4},
8693 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
8694 I40E_INSET_FLEX_PAYLOAD_W5},
8695 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
8696 I40E_INSET_FLEX_PAYLOAD_W6},
8697 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
8698 I40E_INSET_FLEX_PAYLOAD_W7},
8699 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
8700 I40E_INSET_FLEX_PAYLOAD_W8},
8703 if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
8706 /* Only one item allowed for default or all */
8708 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
8709 *inset = i40e_get_default_input_set(pctype);
8711 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
8712 *inset = I40E_INSET_NONE;
8717 for (i = 0, *inset = 0; i < size; i++) {
8718 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
8719 if (field[i] == inset_convert_table[j].field) {
8720 *inset |= inset_convert_table[j].inset;
8725 /* It contains unsupported input set, return immediately */
8726 if (j == RTE_DIM(inset_convert_table))
8734 * Translate the input set from bit masks to register aware bit masks
8738 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
8748 static const struct inset_map inset_map_common[] = {
8749 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
8750 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
8751 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
8752 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
8753 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
8754 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
8755 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
8756 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
8757 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
8758 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
8759 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
8760 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
8761 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
8762 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
8763 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
8764 {I40E_INSET_TUNNEL_DMAC,
8765 I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
8766 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
8767 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
8768 {I40E_INSET_TUNNEL_SRC_PORT,
8769 I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
8770 {I40E_INSET_TUNNEL_DST_PORT,
8771 I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
8772 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
8773 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
8774 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
8775 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
8776 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
8777 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
8778 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
8779 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
8780 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
8783 /* some different registers map in x722*/
8784 static const struct inset_map inset_map_diff_x722[] = {
8785 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
8786 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
8787 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
8788 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
8791 static const struct inset_map inset_map_diff_not_x722[] = {
8792 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
8793 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
8794 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
8795 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
8801 /* Translate input set to register aware inset */
8802 if (type == I40E_MAC_X722) {
8803 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
8804 if (input & inset_map_diff_x722[i].inset)
8805 val |= inset_map_diff_x722[i].inset_reg;
8808 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
8809 if (input & inset_map_diff_not_x722[i].inset)
8810 val |= inset_map_diff_not_x722[i].inset_reg;
8814 for (i = 0; i < RTE_DIM(inset_map_common); i++) {
8815 if (input & inset_map_common[i].inset)
8816 val |= inset_map_common[i].inset_reg;
8823 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
8826 uint64_t inset_need_mask = inset;
8828 static const struct {
8831 } inset_mask_map[] = {
8832 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
8833 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
8834 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
8835 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
8836 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
8837 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
8838 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
8839 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
8842 if (!inset || !mask || !nb_elem)
8845 for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
8846 /* Clear the inset bit, if no MASK is required,
8847 * for example proto + ttl
8849 if ((inset & inset_mask_map[i].inset) ==
8850 inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
8851 inset_need_mask &= ~inset_mask_map[i].inset;
8852 if (!inset_need_mask)
8855 for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
8856 if ((inset_need_mask & inset_mask_map[i].inset) ==
8857 inset_mask_map[i].inset) {
8858 if (idx >= nb_elem) {
8859 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
8862 mask[idx] = inset_mask_map[i].mask;
8871 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
8873 uint32_t reg = i40e_read_rx_ctl(hw, addr);
8875 PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
8877 i40e_write_rx_ctl(hw, addr, val);
8878 PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
8879 (uint32_t)i40e_read_rx_ctl(hw, addr));
8883 i40e_filter_input_set_init(struct i40e_pf *pf)
8885 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8886 enum i40e_filter_pctype pctype;
8887 uint64_t input_set, inset_reg;
8888 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8892 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
8893 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
8894 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
8896 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
8899 input_set = i40e_get_default_input_set(pctype);
8901 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8902 I40E_INSET_MASK_NUM_REG);
8905 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
8908 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
8909 (uint32_t)(inset_reg & UINT32_MAX));
8910 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
8911 (uint32_t)((inset_reg >>
8912 I40E_32_BIT_WIDTH) & UINT32_MAX));
8913 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
8914 (uint32_t)(inset_reg & UINT32_MAX));
8915 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
8916 (uint32_t)((inset_reg >>
8917 I40E_32_BIT_WIDTH) & UINT32_MAX));
8919 for (i = 0; i < num; i++) {
8920 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8922 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8925 /*clear unused mask registers of the pctype */
8926 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
8927 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8929 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8932 I40E_WRITE_FLUSH(hw);
8934 /* store the default input set */
8935 pf->hash_input_set[pctype] = input_set;
8936 pf->fdir.input_set[pctype] = input_set;
8941 i40e_hash_filter_inset_select(struct i40e_hw *hw,
8942 struct rte_eth_input_set_conf *conf)
8944 struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8945 enum i40e_filter_pctype pctype;
8946 uint64_t input_set, inset_reg = 0;
8947 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8951 PMD_DRV_LOG(ERR, "Invalid pointer");
8954 if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
8955 conf->op != RTE_ETH_INPUT_SET_ADD) {
8956 PMD_DRV_LOG(ERR, "Unsupported input set operation");
8960 pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
8961 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
8962 PMD_DRV_LOG(ERR, "invalid flow_type input.");
8966 if (hw->mac.type == I40E_MAC_X722) {
8967 /* get translated pctype value in fd pctype register */
8968 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
8969 I40E_GLQF_FD_PCTYPES((int)pctype));
8972 ret = i40e_parse_input_set(&input_set, pctype, conf->field,
8975 PMD_DRV_LOG(ERR, "Failed to parse input set");
8979 if (conf->op == RTE_ETH_INPUT_SET_ADD) {
8980 /* get inset value in register */
8981 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
8982 inset_reg <<= I40E_32_BIT_WIDTH;
8983 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
8984 input_set |= pf->hash_input_set[pctype];
8986 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8987 I40E_INSET_MASK_NUM_REG);
8991 inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
8993 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
8994 (uint32_t)(inset_reg & UINT32_MAX));
8995 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
8996 (uint32_t)((inset_reg >>
8997 I40E_32_BIT_WIDTH) & UINT32_MAX));
8999 for (i = 0; i < num; i++)
9000 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9002 /*clear unused mask registers of the pctype */
9003 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9004 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9006 I40E_WRITE_FLUSH(hw);
9008 pf->hash_input_set[pctype] = input_set;
9013 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9014 struct rte_eth_input_set_conf *conf)
9016 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9017 enum i40e_filter_pctype pctype;
9018 uint64_t input_set, inset_reg = 0;
9019 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9023 PMD_DRV_LOG(ERR, "Invalid pointer");
9026 if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9027 conf->op != RTE_ETH_INPUT_SET_ADD) {
9028 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9032 pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9034 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9035 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9039 ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9042 PMD_DRV_LOG(ERR, "Failed to parse input set");
9046 /* get inset value in register */
9047 inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9048 inset_reg <<= I40E_32_BIT_WIDTH;
9049 inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9051 /* Can not change the inset reg for flex payload for fdir,
9052 * it is done by writing I40E_PRTQF_FD_FLXINSET
9053 * in i40e_set_flex_mask_on_pctype.
9055 if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9056 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9058 input_set |= pf->fdir.input_set[pctype];
9059 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9060 I40E_INSET_MASK_NUM_REG);
9064 inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9066 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9067 (uint32_t)(inset_reg & UINT32_MAX));
9068 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9069 (uint32_t)((inset_reg >>
9070 I40E_32_BIT_WIDTH) & UINT32_MAX));
9072 for (i = 0; i < num; i++)
9073 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
9075 /*clear unused mask registers of the pctype */
9076 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9077 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
9079 I40E_WRITE_FLUSH(hw);
9081 pf->fdir.input_set[pctype] = input_set;
9086 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9091 PMD_DRV_LOG(ERR, "Invalid pointer");
9095 switch (info->info_type) {
9096 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9097 i40e_get_symmetric_hash_enable_per_port(hw,
9098 &(info->info.enable));
9100 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9101 ret = i40e_get_hash_filter_global_config(hw,
9102 &(info->info.global_conf));
9105 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9115 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9120 PMD_DRV_LOG(ERR, "Invalid pointer");
9124 switch (info->info_type) {
9125 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9126 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9128 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9129 ret = i40e_set_hash_filter_global_config(hw,
9130 &(info->info.global_conf));
9132 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9133 ret = i40e_hash_filter_inset_select(hw,
9134 &(info->info.input_set_conf));
9138 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9147 /* Operations for hash function */
9149 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9150 enum rte_filter_op filter_op,
9153 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9156 switch (filter_op) {
9157 case RTE_ETH_FILTER_NOP:
9159 case RTE_ETH_FILTER_GET:
9160 ret = i40e_hash_filter_get(hw,
9161 (struct rte_eth_hash_filter_info *)arg);
9163 case RTE_ETH_FILTER_SET:
9164 ret = i40e_hash_filter_set(hw,
9165 (struct rte_eth_hash_filter_info *)arg);
9168 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9177 /* Convert ethertype filter structure */
9179 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9180 struct i40e_ethertype_filter *filter)
9182 rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9183 filter->input.ether_type = input->ether_type;
9184 filter->flags = input->flags;
9185 filter->queue = input->queue;
9190 /* Check if there exists the ehtertype filter */
9191 struct i40e_ethertype_filter *
9192 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9193 const struct i40e_ethertype_filter_input *input)
9197 ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9201 return ethertype_rule->hash_map[ret];
9204 /* Add ethertype filter in SW list */
9206 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9207 struct i40e_ethertype_filter *filter)
9209 struct i40e_ethertype_rule *rule = &pf->ethertype;
9212 ret = rte_hash_add_key(rule->hash_table, &filter->input);
9215 "Failed to insert ethertype filter"
9216 " to hash table %d!",
9220 rule->hash_map[ret] = filter;
9222 TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9227 /* Delete ethertype filter in SW list */
9229 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9230 struct i40e_ethertype_filter_input *input)
9232 struct i40e_ethertype_rule *rule = &pf->ethertype;
9233 struct i40e_ethertype_filter *filter;
9236 ret = rte_hash_del_key(rule->hash_table, input);
9239 "Failed to delete ethertype filter"
9240 " to hash table %d!",
9244 filter = rule->hash_map[ret];
9245 rule->hash_map[ret] = NULL;
9247 TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9254 * Configure ethertype filter, which can director packet by filtering
9255 * with mac address and ether_type or only ether_type
9258 i40e_ethertype_filter_set(struct i40e_pf *pf,
9259 struct rte_eth_ethertype_filter *filter,
9262 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9263 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9264 struct i40e_ethertype_filter *ethertype_filter, *node;
9265 struct i40e_ethertype_filter check_filter;
9266 struct i40e_control_filter_stats stats;
9270 if (filter->queue >= pf->dev_data->nb_rx_queues) {
9271 PMD_DRV_LOG(ERR, "Invalid queue ID");
9274 if (filter->ether_type == ETHER_TYPE_IPv4 ||
9275 filter->ether_type == ETHER_TYPE_IPv6) {
9277 "unsupported ether_type(0x%04x) in control packet filter.",
9278 filter->ether_type);
9281 if (filter->ether_type == ETHER_TYPE_VLAN)
9282 PMD_DRV_LOG(WARNING,
9283 "filter vlan ether_type in first tag is not supported.");
9285 /* Check if there is the filter in SW list */
9286 memset(&check_filter, 0, sizeof(check_filter));
9287 i40e_ethertype_filter_convert(filter, &check_filter);
9288 node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9289 &check_filter.input);
9291 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9295 if (!add && !node) {
9296 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9300 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9301 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9302 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9303 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9304 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9306 memset(&stats, 0, sizeof(stats));
9307 ret = i40e_aq_add_rem_control_packet_filter(hw,
9308 filter->mac_addr.addr_bytes,
9309 filter->ether_type, flags,
9311 filter->queue, add, &stats, NULL);
9314 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9315 ret, stats.mac_etype_used, stats.etype_used,
9316 stats.mac_etype_free, stats.etype_free);
9320 /* Add or delete a filter in SW list */
9322 ethertype_filter = rte_zmalloc("ethertype_filter",
9323 sizeof(*ethertype_filter), 0);
9324 if (ethertype_filter == NULL) {
9325 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9329 rte_memcpy(ethertype_filter, &check_filter,
9330 sizeof(check_filter));
9331 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9333 rte_free(ethertype_filter);
9335 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9342 * Handle operations for ethertype filter.
9345 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9346 enum rte_filter_op filter_op,
9349 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9352 if (filter_op == RTE_ETH_FILTER_NOP)
9356 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9361 switch (filter_op) {
9362 case RTE_ETH_FILTER_ADD:
9363 ret = i40e_ethertype_filter_set(pf,
9364 (struct rte_eth_ethertype_filter *)arg,
9367 case RTE_ETH_FILTER_DELETE:
9368 ret = i40e_ethertype_filter_set(pf,
9369 (struct rte_eth_ethertype_filter *)arg,
9373 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9381 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9382 enum rte_filter_type filter_type,
9383 enum rte_filter_op filter_op,
9391 switch (filter_type) {
9392 case RTE_ETH_FILTER_NONE:
9393 /* For global configuration */
9394 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9396 case RTE_ETH_FILTER_HASH:
9397 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9399 case RTE_ETH_FILTER_MACVLAN:
9400 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9402 case RTE_ETH_FILTER_ETHERTYPE:
9403 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9405 case RTE_ETH_FILTER_TUNNEL:
9406 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9408 case RTE_ETH_FILTER_FDIR:
9409 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9411 case RTE_ETH_FILTER_GENERIC:
9412 if (filter_op != RTE_ETH_FILTER_GET)
9414 *(const void **)arg = &i40e_flow_ops;
9417 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9427 * Check and enable Extended Tag.
9428 * Enabling Extended Tag is important for 40G performance.
9431 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9433 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9437 ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9440 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9444 if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9445 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9450 ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9453 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9457 if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9458 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9461 buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9462 ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9465 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9472 * As some registers wouldn't be reset unless a global hardware reset,
9473 * hardware initialization is needed to put those registers into an
9474 * expected initial state.
9477 i40e_hw_init(struct rte_eth_dev *dev)
9479 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9481 i40e_enable_extended_tag(dev);
9483 /* clear the PF Queue Filter control register */
9484 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9486 /* Disable symmetric hash per port */
9487 i40e_set_symmetric_hash_enable_per_port(hw, 0);
9491 * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9492 * however this function will return only one highest pctype index,
9493 * which is not quite correct. This is known problem of i40e driver
9494 * and needs to be fixed later.
9496 enum i40e_filter_pctype
9497 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9500 uint64_t pctype_mask;
9502 if (flow_type < I40E_FLOW_TYPE_MAX) {
9503 pctype_mask = adapter->pctypes_tbl[flow_type];
9504 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9505 if (pctype_mask & (1ULL << i))
9506 return (enum i40e_filter_pctype)i;
9509 return I40E_FILTER_PCTYPE_INVALID;
9513 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9514 enum i40e_filter_pctype pctype)
9517 uint64_t pctype_mask = 1ULL << pctype;
9519 for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9521 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9525 return RTE_ETH_FLOW_UNKNOWN;
9529 * On X710, performance number is far from the expectation on recent firmware
9530 * versions; on XL710, performance number is also far from the expectation on
9531 * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9532 * mode is enabled and port MAC address is equal to the packet destination MAC
9533 * address. The fix for this issue may not be integrated in the following
9534 * firmware version. So the workaround in software driver is needed. It needs
9535 * to modify the initial values of 3 internal only registers for both X710 and
9536 * XL710. Note that the values for X710 or XL710 could be different, and the
9537 * workaround can be removed when it is fixed in firmware in the future.
9540 /* For both X710 and XL710 */
9541 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
9542 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200
9543 #define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
9545 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9546 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
9549 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9550 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9553 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303
9555 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
9556 #define I40E_GL_SWR_PM_UP_THR 0x269FBC
9559 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9561 enum i40e_status_code status;
9562 struct i40e_aq_get_phy_abilities_resp phy_ab;
9566 status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9570 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9573 rte_delay_us(100000);
9575 status = i40e_aq_get_phy_capabilities(hw, false,
9576 true, &phy_ab, NULL);
9584 i40e_configure_registers(struct i40e_hw *hw)
9590 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
9591 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
9592 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
9598 for (i = 0; i < RTE_DIM(reg_table); i++) {
9599 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
9600 if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9602 I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9603 else /* For X710/XL710/XXV710 */
9604 if (hw->aq.fw_maj_ver < 6)
9606 I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
9609 I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
9612 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
9613 if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9615 I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9616 else /* For X710/XL710/XXV710 */
9618 I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9621 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
9622 if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
9623 I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
9625 I40E_GL_SWR_PM_UP_THR_SF_VALUE;
9628 I40E_GL_SWR_PM_UP_THR_EF_VALUE;
9631 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
9634 PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
9638 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
9639 reg_table[i].addr, reg);
9640 if (reg == reg_table[i].val)
9643 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
9644 reg_table[i].val, NULL);
9647 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
9648 reg_table[i].val, reg_table[i].addr);
9651 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
9652 "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
9656 #define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
9657 #define I40E_VSI_TSR_QINQ_CONFIG 0xc030
9658 #define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4))
9659 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
9661 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
9666 if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
9667 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
9671 /* Configure for double VLAN RX stripping */
9672 reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
9673 if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
9674 reg |= I40E_VSI_TSR_QINQ_CONFIG;
9675 ret = i40e_aq_debug_write_register(hw,
9676 I40E_VSI_TSR(vsi->vsi_id),
9679 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
9681 return I40E_ERR_CONFIG;
9685 /* Configure for double VLAN TX insertion */
9686 reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
9687 if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
9688 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
9689 ret = i40e_aq_debug_write_register(hw,
9690 I40E_VSI_L2TAGSTXVALID(
9691 vsi->vsi_id), reg, NULL);
9694 "Failed to update VSI_L2TAGSTXVALID[%d]",
9696 return I40E_ERR_CONFIG;
9704 * i40e_aq_add_mirror_rule
9705 * @hw: pointer to the hardware structure
9706 * @seid: VEB seid to add mirror rule to
9707 * @dst_id: destination vsi seid
9708 * @entries: Buffer which contains the entities to be mirrored
9709 * @count: number of entities contained in the buffer
9710 * @rule_id:the rule_id of the rule to be added
9712 * Add a mirror rule for a given veb.
9715 static enum i40e_status_code
9716 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
9717 uint16_t seid, uint16_t dst_id,
9718 uint16_t rule_type, uint16_t *entries,
9719 uint16_t count, uint16_t *rule_id)
9721 struct i40e_aq_desc desc;
9722 struct i40e_aqc_add_delete_mirror_rule cmd;
9723 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
9724 (struct i40e_aqc_add_delete_mirror_rule_completion *)
9727 enum i40e_status_code status;
9729 i40e_fill_default_direct_cmd_desc(&desc,
9730 i40e_aqc_opc_add_mirror_rule);
9731 memset(&cmd, 0, sizeof(cmd));
9733 buff_len = sizeof(uint16_t) * count;
9734 desc.datalen = rte_cpu_to_le_16(buff_len);
9736 desc.flags |= rte_cpu_to_le_16(
9737 (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
9738 cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9739 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9740 cmd.num_entries = rte_cpu_to_le_16(count);
9741 cmd.seid = rte_cpu_to_le_16(seid);
9742 cmd.destination = rte_cpu_to_le_16(dst_id);
9744 rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9745 status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
9747 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
9748 hw->aq.asq_last_status, resp->rule_id,
9749 resp->mirror_rules_used, resp->mirror_rules_free);
9750 *rule_id = rte_le_to_cpu_16(resp->rule_id);
9756 * i40e_aq_del_mirror_rule
9757 * @hw: pointer to the hardware structure
9758 * @seid: VEB seid to add mirror rule to
9759 * @entries: Buffer which contains the entities to be mirrored
9760 * @count: number of entities contained in the buffer
9761 * @rule_id:the rule_id of the rule to be delete
9763 * Delete a mirror rule for a given veb.
9766 static enum i40e_status_code
9767 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
9768 uint16_t seid, uint16_t rule_type, uint16_t *entries,
9769 uint16_t count, uint16_t rule_id)
9771 struct i40e_aq_desc desc;
9772 struct i40e_aqc_add_delete_mirror_rule cmd;
9773 uint16_t buff_len = 0;
9774 enum i40e_status_code status;
9777 i40e_fill_default_direct_cmd_desc(&desc,
9778 i40e_aqc_opc_delete_mirror_rule);
9779 memset(&cmd, 0, sizeof(cmd));
9780 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
9781 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
9783 cmd.num_entries = count;
9784 buff_len = sizeof(uint16_t) * count;
9785 desc.datalen = rte_cpu_to_le_16(buff_len);
9786 buff = (void *)entries;
9788 /* rule id is filled in destination field for deleting mirror rule */
9789 cmd.destination = rte_cpu_to_le_16(rule_id);
9791 cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9792 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9793 cmd.seid = rte_cpu_to_le_16(seid);
9795 rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9796 status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
9802 * i40e_mirror_rule_set
9803 * @dev: pointer to the hardware structure
9804 * @mirror_conf: mirror rule info
9805 * @sw_id: mirror rule's sw_id
9806 * @on: enable/disable
9808 * set a mirror rule.
9812 i40e_mirror_rule_set(struct rte_eth_dev *dev,
9813 struct rte_eth_mirror_conf *mirror_conf,
9814 uint8_t sw_id, uint8_t on)
9816 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9817 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9818 struct i40e_mirror_rule *it, *mirr_rule = NULL;
9819 struct i40e_mirror_rule *parent = NULL;
9820 uint16_t seid, dst_seid, rule_id;
9824 PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
9826 if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
9828 "mirror rule can not be configured without veb or vfs.");
9831 if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
9832 PMD_DRV_LOG(ERR, "mirror table is full.");
9835 if (mirror_conf->dst_pool > pf->vf_num) {
9836 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
9837 mirror_conf->dst_pool);
9841 seid = pf->main_vsi->veb->seid;
9843 TAILQ_FOREACH(it, &pf->mirror_list, rules) {
9844 if (sw_id <= it->index) {
9850 if (mirr_rule && sw_id == mirr_rule->index) {
9852 PMD_DRV_LOG(ERR, "mirror rule exists.");
9855 ret = i40e_aq_del_mirror_rule(hw, seid,
9856 mirr_rule->rule_type,
9858 mirr_rule->num_entries, mirr_rule->id);
9861 "failed to remove mirror rule: ret = %d, aq_err = %d.",
9862 ret, hw->aq.asq_last_status);
9865 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
9866 rte_free(mirr_rule);
9867 pf->nb_mirror_rule--;
9871 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
9875 mirr_rule = rte_zmalloc("i40e_mirror_rule",
9876 sizeof(struct i40e_mirror_rule) , 0);
9878 PMD_DRV_LOG(ERR, "failed to allocate memory");
9879 return I40E_ERR_NO_MEMORY;
9881 switch (mirror_conf->rule_type) {
9882 case ETH_MIRROR_VLAN:
9883 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
9884 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
9885 mirr_rule->entries[j] =
9886 mirror_conf->vlan.vlan_id[i];
9891 PMD_DRV_LOG(ERR, "vlan is not specified.");
9892 rte_free(mirr_rule);
9895 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
9897 case ETH_MIRROR_VIRTUAL_POOL_UP:
9898 case ETH_MIRROR_VIRTUAL_POOL_DOWN:
9899 /* check if the specified pool bit is out of range */
9900 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
9901 PMD_DRV_LOG(ERR, "pool mask is out of range.");
9902 rte_free(mirr_rule);
9905 for (i = 0, j = 0; i < pf->vf_num; i++) {
9906 if (mirror_conf->pool_mask & (1ULL << i)) {
9907 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
9911 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
9912 /* add pf vsi to entries */
9913 mirr_rule->entries[j] = pf->main_vsi_seid;
9917 PMD_DRV_LOG(ERR, "pool is not specified.");
9918 rte_free(mirr_rule);
9921 /* egress and ingress in aq commands means from switch but not port */
9922 mirr_rule->rule_type =
9923 (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
9924 I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
9925 I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
9927 case ETH_MIRROR_UPLINK_PORT:
9928 /* egress and ingress in aq commands means from switch but not port*/
9929 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
9931 case ETH_MIRROR_DOWNLINK_PORT:
9932 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
9935 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
9936 mirror_conf->rule_type);
9937 rte_free(mirr_rule);
9941 /* If the dst_pool is equal to vf_num, consider it as PF */
9942 if (mirror_conf->dst_pool == pf->vf_num)
9943 dst_seid = pf->main_vsi_seid;
9945 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
9947 ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
9948 mirr_rule->rule_type, mirr_rule->entries,
9952 "failed to add mirror rule: ret = %d, aq_err = %d.",
9953 ret, hw->aq.asq_last_status);
9954 rte_free(mirr_rule);
9958 mirr_rule->index = sw_id;
9959 mirr_rule->num_entries = j;
9960 mirr_rule->id = rule_id;
9961 mirr_rule->dst_vsi_seid = dst_seid;
9964 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
9966 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
9968 pf->nb_mirror_rule++;
9973 * i40e_mirror_rule_reset
9974 * @dev: pointer to the device
9975 * @sw_id: mirror rule's sw_id
9977 * reset a mirror rule.
9981 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
9983 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9984 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9985 struct i40e_mirror_rule *it, *mirr_rule = NULL;
9989 PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
9991 seid = pf->main_vsi->veb->seid;
9993 TAILQ_FOREACH(it, &pf->mirror_list, rules) {
9994 if (sw_id == it->index) {
10000 ret = i40e_aq_del_mirror_rule(hw, seid,
10001 mirr_rule->rule_type,
10002 mirr_rule->entries,
10003 mirr_rule->num_entries, mirr_rule->id);
10006 "failed to remove mirror rule: status = %d, aq_err = %d.",
10007 ret, hw->aq.asq_last_status);
10010 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10011 rte_free(mirr_rule);
10012 pf->nb_mirror_rule--;
10014 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10021 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10023 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10024 uint64_t systim_cycles;
10026 systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10027 systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10030 return systim_cycles;
10034 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10036 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10037 uint64_t rx_tstamp;
10039 rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10040 rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10047 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10049 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10050 uint64_t tx_tstamp;
10052 tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10053 tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10060 i40e_start_timecounters(struct rte_eth_dev *dev)
10062 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10063 struct i40e_adapter *adapter =
10064 (struct i40e_adapter *)dev->data->dev_private;
10065 struct rte_eth_link link;
10066 uint32_t tsync_inc_l;
10067 uint32_t tsync_inc_h;
10069 /* Get current link speed. */
10070 memset(&link, 0, sizeof(link));
10071 i40e_dev_link_update(dev, 1);
10072 rte_i40e_dev_atomic_read_link_status(dev, &link);
10074 switch (link.link_speed) {
10075 case ETH_SPEED_NUM_40G:
10076 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10077 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10079 case ETH_SPEED_NUM_10G:
10080 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10081 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10083 case ETH_SPEED_NUM_1G:
10084 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10085 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10092 /* Set the timesync increment value. */
10093 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10094 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10096 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10097 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10098 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10100 adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10101 adapter->systime_tc.cc_shift = 0;
10102 adapter->systime_tc.nsec_mask = 0;
10104 adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10105 adapter->rx_tstamp_tc.cc_shift = 0;
10106 adapter->rx_tstamp_tc.nsec_mask = 0;
10108 adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10109 adapter->tx_tstamp_tc.cc_shift = 0;
10110 adapter->tx_tstamp_tc.nsec_mask = 0;
10114 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10116 struct i40e_adapter *adapter =
10117 (struct i40e_adapter *)dev->data->dev_private;
10119 adapter->systime_tc.nsec += delta;
10120 adapter->rx_tstamp_tc.nsec += delta;
10121 adapter->tx_tstamp_tc.nsec += delta;
10127 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10130 struct i40e_adapter *adapter =
10131 (struct i40e_adapter *)dev->data->dev_private;
10133 ns = rte_timespec_to_ns(ts);
10135 /* Set the timecounters to a new value. */
10136 adapter->systime_tc.nsec = ns;
10137 adapter->rx_tstamp_tc.nsec = ns;
10138 adapter->tx_tstamp_tc.nsec = ns;
10144 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10146 uint64_t ns, systime_cycles;
10147 struct i40e_adapter *adapter =
10148 (struct i40e_adapter *)dev->data->dev_private;
10150 systime_cycles = i40e_read_systime_cyclecounter(dev);
10151 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10152 *ts = rte_ns_to_timespec(ns);
10158 i40e_timesync_enable(struct rte_eth_dev *dev)
10160 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10161 uint32_t tsync_ctl_l;
10162 uint32_t tsync_ctl_h;
10164 /* Stop the timesync system time. */
10165 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10166 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10167 /* Reset the timesync system time value. */
10168 I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10169 I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10171 i40e_start_timecounters(dev);
10173 /* Clear timesync registers. */
10174 I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10175 I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10176 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10177 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10178 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10179 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10181 /* Enable timestamping of PTP packets. */
10182 tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10183 tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10185 tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10186 tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10187 tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10189 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10190 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10196 i40e_timesync_disable(struct rte_eth_dev *dev)
10198 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10199 uint32_t tsync_ctl_l;
10200 uint32_t tsync_ctl_h;
10202 /* Disable timestamping of transmitted PTP packets. */
10203 tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10204 tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10206 tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10207 tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10209 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10210 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10212 /* Reset the timesync increment value. */
10213 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10214 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10220 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10221 struct timespec *timestamp, uint32_t flags)
10223 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10224 struct i40e_adapter *adapter =
10225 (struct i40e_adapter *)dev->data->dev_private;
10227 uint32_t sync_status;
10228 uint32_t index = flags & 0x03;
10229 uint64_t rx_tstamp_cycles;
10232 sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10233 if ((sync_status & (1 << index)) == 0)
10236 rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10237 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10238 *timestamp = rte_ns_to_timespec(ns);
10244 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10245 struct timespec *timestamp)
10247 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10248 struct i40e_adapter *adapter =
10249 (struct i40e_adapter *)dev->data->dev_private;
10251 uint32_t sync_status;
10252 uint64_t tx_tstamp_cycles;
10255 sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10256 if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10259 tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10260 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10261 *timestamp = rte_ns_to_timespec(ns);
10267 * i40e_parse_dcb_configure - parse dcb configure from user
10268 * @dev: the device being configured
10269 * @dcb_cfg: pointer of the result of parse
10270 * @*tc_map: bit map of enabled traffic classes
10272 * Returns 0 on success, negative value on failure
10275 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10276 struct i40e_dcbx_config *dcb_cfg,
10279 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10280 uint8_t i, tc_bw, bw_lf;
10282 memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10284 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10285 if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10286 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10290 /* assume each tc has the same bw */
10291 tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10292 for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10293 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10294 /* to ensure the sum of tcbw is equal to 100 */
10295 bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10296 for (i = 0; i < bw_lf; i++)
10297 dcb_cfg->etscfg.tcbwtable[i]++;
10299 /* assume each tc has the same Transmission Selection Algorithm */
10300 for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10301 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10303 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10304 dcb_cfg->etscfg.prioritytable[i] =
10305 dcb_rx_conf->dcb_tc[i];
10307 /* FW needs one App to configure HW */
10308 dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10309 dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10310 dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10311 dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10313 if (dcb_rx_conf->nb_tcs == 0)
10314 *tc_map = 1; /* tc0 only */
10316 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10318 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10319 dcb_cfg->pfc.willing = 0;
10320 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10321 dcb_cfg->pfc.pfcenable = *tc_map;
10327 static enum i40e_status_code
10328 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10329 struct i40e_aqc_vsi_properties_data *info,
10330 uint8_t enabled_tcmap)
10332 enum i40e_status_code ret;
10333 int i, total_tc = 0;
10334 uint16_t qpnum_per_tc, bsf, qp_idx;
10335 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10336 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10337 uint16_t used_queues;
10339 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10340 if (ret != I40E_SUCCESS)
10343 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10344 if (enabled_tcmap & (1 << i))
10349 vsi->enabled_tc = enabled_tcmap;
10351 /* different VSI has different queues assigned */
10352 if (vsi->type == I40E_VSI_MAIN)
10353 used_queues = dev_data->nb_rx_queues -
10354 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10355 else if (vsi->type == I40E_VSI_VMDQ2)
10356 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10358 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10359 return I40E_ERR_NO_AVAILABLE_VSI;
10362 qpnum_per_tc = used_queues / total_tc;
10363 /* Number of queues per enabled TC */
10364 if (qpnum_per_tc == 0) {
10365 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10366 return I40E_ERR_INVALID_QP_ID;
10368 qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10369 I40E_MAX_Q_PER_TC);
10370 bsf = rte_bsf32(qpnum_per_tc);
10373 * Configure TC and queue mapping parameters, for enabled TC,
10374 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10375 * default queue will serve it.
10378 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10379 if (vsi->enabled_tc & (1 << i)) {
10380 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10381 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10382 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10383 qp_idx += qpnum_per_tc;
10385 info->tc_mapping[i] = 0;
10388 /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10389 if (vsi->type == I40E_VSI_SRIOV) {
10390 info->mapping_flags |=
10391 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10392 for (i = 0; i < vsi->nb_qps; i++)
10393 info->queue_mapping[i] =
10394 rte_cpu_to_le_16(vsi->base_queue + i);
10396 info->mapping_flags |=
10397 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10398 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10400 info->valid_sections |=
10401 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10403 return I40E_SUCCESS;
10407 * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10408 * @veb: VEB to be configured
10409 * @tc_map: enabled TC bitmap
10411 * Returns 0 on success, negative value on failure
10413 static enum i40e_status_code
10414 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10416 struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10417 struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10418 struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10419 struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10420 enum i40e_status_code ret = I40E_SUCCESS;
10424 /* Check if enabled_tc is same as existing or new TCs */
10425 if (veb->enabled_tc == tc_map)
10428 /* configure tc bandwidth */
10429 memset(&veb_bw, 0, sizeof(veb_bw));
10430 veb_bw.tc_valid_bits = tc_map;
10431 /* Enable ETS TCs with equal BW Share for now across all VSIs */
10432 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10433 if (tc_map & BIT_ULL(i))
10434 veb_bw.tc_bw_share_credits[i] = 1;
10436 ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10440 "AQ command Config switch_comp BW allocation per TC failed = %d",
10441 hw->aq.asq_last_status);
10445 memset(&ets_query, 0, sizeof(ets_query));
10446 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10448 if (ret != I40E_SUCCESS) {
10450 "Failed to get switch_comp ETS configuration %u",
10451 hw->aq.asq_last_status);
10454 memset(&bw_query, 0, sizeof(bw_query));
10455 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10457 if (ret != I40E_SUCCESS) {
10459 "Failed to get switch_comp bandwidth configuration %u",
10460 hw->aq.asq_last_status);
10464 /* store and print out BW info */
10465 veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10466 veb->bw_info.bw_max = ets_query.tc_bw_max;
10467 PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10468 PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10469 bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10470 (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10471 I40E_16_BIT_WIDTH);
10472 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10473 veb->bw_info.bw_ets_share_credits[i] =
10474 bw_query.tc_bw_share_credits[i];
10475 veb->bw_info.bw_ets_credits[i] =
10476 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10477 /* 4 bits per TC, 4th bit is reserved */
10478 veb->bw_info.bw_ets_max[i] =
10479 (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10480 RTE_LEN2MASK(3, uint8_t));
10481 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10482 veb->bw_info.bw_ets_share_credits[i]);
10483 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10484 veb->bw_info.bw_ets_credits[i]);
10485 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10486 veb->bw_info.bw_ets_max[i]);
10489 veb->enabled_tc = tc_map;
10496 * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10497 * @vsi: VSI to be configured
10498 * @tc_map: enabled TC bitmap
10500 * Returns 0 on success, negative value on failure
10502 static enum i40e_status_code
10503 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10505 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10506 struct i40e_vsi_context ctxt;
10507 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10508 enum i40e_status_code ret = I40E_SUCCESS;
10511 /* Check if enabled_tc is same as existing or new TCs */
10512 if (vsi->enabled_tc == tc_map)
10515 /* configure tc bandwidth */
10516 memset(&bw_data, 0, sizeof(bw_data));
10517 bw_data.tc_valid_bits = tc_map;
10518 /* Enable ETS TCs with equal BW Share for now across all VSIs */
10519 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10520 if (tc_map & BIT_ULL(i))
10521 bw_data.tc_bw_credits[i] = 1;
10523 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10526 "AQ command Config VSI BW allocation per TC failed = %d",
10527 hw->aq.asq_last_status);
10530 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10531 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10533 /* Update Queue Pairs Mapping for currently enabled UPs */
10534 ctxt.seid = vsi->seid;
10535 ctxt.pf_num = hw->pf_id;
10537 ctxt.uplink_seid = vsi->uplink_seid;
10538 ctxt.info = vsi->info;
10540 ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10544 /* Update the VSI after updating the VSI queue-mapping information */
10545 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10547 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10548 hw->aq.asq_last_status);
10551 /* update the local VSI info with updated queue map */
10552 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10553 sizeof(vsi->info.tc_mapping));
10554 rte_memcpy(&vsi->info.queue_mapping,
10555 &ctxt.info.queue_mapping,
10556 sizeof(vsi->info.queue_mapping));
10557 vsi->info.mapping_flags = ctxt.info.mapping_flags;
10558 vsi->info.valid_sections = 0;
10560 /* query and update current VSI BW information */
10561 ret = i40e_vsi_get_bw_config(vsi);
10564 "Failed updating vsi bw info, err %s aq_err %s",
10565 i40e_stat_str(hw, ret),
10566 i40e_aq_str(hw, hw->aq.asq_last_status));
10570 vsi->enabled_tc = tc_map;
10577 * i40e_dcb_hw_configure - program the dcb setting to hw
10578 * @pf: pf the configuration is taken on
10579 * @new_cfg: new configuration
10580 * @tc_map: enabled TC bitmap
10582 * Returns 0 on success, negative value on failure
10584 static enum i40e_status_code
10585 i40e_dcb_hw_configure(struct i40e_pf *pf,
10586 struct i40e_dcbx_config *new_cfg,
10589 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10590 struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10591 struct i40e_vsi *main_vsi = pf->main_vsi;
10592 struct i40e_vsi_list *vsi_list;
10593 enum i40e_status_code ret;
10597 /* Use the FW API if FW > v4.4*/
10598 if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10599 (hw->aq.fw_maj_ver >= 5))) {
10601 "FW < v4.4, can not use FW LLDP API to configure DCB");
10602 return I40E_ERR_FIRMWARE_API_VERSION;
10605 /* Check if need reconfiguration */
10606 if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10607 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10608 return I40E_SUCCESS;
10611 /* Copy the new config to the current config */
10612 *old_cfg = *new_cfg;
10613 old_cfg->etsrec = old_cfg->etscfg;
10614 ret = i40e_set_dcb_config(hw);
10616 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10617 i40e_stat_str(hw, ret),
10618 i40e_aq_str(hw, hw->aq.asq_last_status));
10621 /* set receive Arbiter to RR mode and ETS scheme by default */
10622 for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10623 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10624 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
10625 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10626 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10627 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10628 I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10629 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10630 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10631 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10632 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10633 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10634 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10636 /* get local mib to check whether it is configured correctly */
10638 hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10639 /* Get Local DCB Config */
10640 i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10641 &hw->local_dcbx_config);
10643 /* if Veb is created, need to update TC of it at first */
10644 if (main_vsi->veb) {
10645 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10647 PMD_INIT_LOG(WARNING,
10648 "Failed configuring TC for VEB seid=%d",
10649 main_vsi->veb->seid);
10651 /* Update each VSI */
10652 i40e_vsi_config_tc(main_vsi, tc_map);
10653 if (main_vsi->veb) {
10654 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10655 /* Beside main VSI and VMDQ VSIs, only enable default
10656 * TC for other VSIs
10658 if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
10659 ret = i40e_vsi_config_tc(vsi_list->vsi,
10662 ret = i40e_vsi_config_tc(vsi_list->vsi,
10663 I40E_DEFAULT_TCMAP);
10665 PMD_INIT_LOG(WARNING,
10666 "Failed configuring TC for VSI seid=%d",
10667 vsi_list->vsi->seid);
10671 return I40E_SUCCESS;
10675 * i40e_dcb_init_configure - initial dcb config
10676 * @dev: device being configured
10677 * @sw_dcb: indicate whether dcb is sw configured or hw offload
10679 * Returns 0 on success, negative value on failure
10682 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
10684 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10685 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10688 if ((pf->flags & I40E_FLAG_DCB) == 0) {
10689 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10693 /* DCB initialization:
10694 * Update DCB configuration from the Firmware and configure
10695 * LLDP MIB change event.
10697 if (sw_dcb == TRUE) {
10698 ret = i40e_init_dcb(hw);
10699 /* If lldp agent is stopped, the return value from
10700 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
10701 * adminq status. Otherwise, it should return success.
10703 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
10704 hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
10705 memset(&hw->local_dcbx_config, 0,
10706 sizeof(struct i40e_dcbx_config));
10707 /* set dcb default configuration */
10708 hw->local_dcbx_config.etscfg.willing = 0;
10709 hw->local_dcbx_config.etscfg.maxtcs = 0;
10710 hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
10711 hw->local_dcbx_config.etscfg.tsatable[0] =
10713 /* all UPs mapping to TC0 */
10714 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10715 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
10716 hw->local_dcbx_config.etsrec =
10717 hw->local_dcbx_config.etscfg;
10718 hw->local_dcbx_config.pfc.willing = 0;
10719 hw->local_dcbx_config.pfc.pfccap =
10720 I40E_MAX_TRAFFIC_CLASS;
10721 /* FW needs one App to configure HW */
10722 hw->local_dcbx_config.numapps = 1;
10723 hw->local_dcbx_config.app[0].selector =
10724 I40E_APP_SEL_ETHTYPE;
10725 hw->local_dcbx_config.app[0].priority = 3;
10726 hw->local_dcbx_config.app[0].protocolid =
10727 I40E_APP_PROTOID_FCOE;
10728 ret = i40e_set_dcb_config(hw);
10731 "default dcb config fails. err = %d, aq_err = %d.",
10732 ret, hw->aq.asq_last_status);
10737 "DCB initialization in FW fails, err = %d, aq_err = %d.",
10738 ret, hw->aq.asq_last_status);
10742 ret = i40e_aq_start_lldp(hw, NULL);
10743 if (ret != I40E_SUCCESS)
10744 PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10746 ret = i40e_init_dcb(hw);
10748 if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
10750 "HW doesn't support DCBX offload.");
10755 "DCBX configuration failed, err = %d, aq_err = %d.",
10756 ret, hw->aq.asq_last_status);
10764 * i40e_dcb_setup - setup dcb related config
10765 * @dev: device being configured
10767 * Returns 0 on success, negative value on failure
10770 i40e_dcb_setup(struct rte_eth_dev *dev)
10772 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10773 struct i40e_dcbx_config dcb_cfg;
10774 uint8_t tc_map = 0;
10777 if ((pf->flags & I40E_FLAG_DCB) == 0) {
10778 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10782 if (pf->vf_num != 0)
10783 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
10785 ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
10787 PMD_INIT_LOG(ERR, "invalid dcb config");
10790 ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
10792 PMD_INIT_LOG(ERR, "dcb sw configure fails");
10800 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
10801 struct rte_eth_dcb_info *dcb_info)
10803 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10804 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10805 struct i40e_vsi *vsi = pf->main_vsi;
10806 struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
10807 uint16_t bsf, tc_mapping;
10810 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
10811 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
10813 dcb_info->nb_tcs = 1;
10814 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10815 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
10816 for (i = 0; i < dcb_info->nb_tcs; i++)
10817 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
10819 /* get queue mapping if vmdq is disabled */
10820 if (!pf->nb_cfg_vmdq_vsi) {
10821 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10822 if (!(vsi->enabled_tc & (1 << i)))
10824 tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
10825 dcb_info->tc_queue.tc_rxq[j][i].base =
10826 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
10827 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
10828 dcb_info->tc_queue.tc_txq[j][i].base =
10829 dcb_info->tc_queue.tc_rxq[j][i].base;
10830 bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
10831 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
10832 dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
10833 dcb_info->tc_queue.tc_txq[j][i].nb_queue =
10834 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
10839 /* get queue mapping if vmdq is enabled */
10841 vsi = pf->vmdq[j].vsi;
10842 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10843 if (!(vsi->enabled_tc & (1 << i)))
10845 tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
10846 dcb_info->tc_queue.tc_rxq[j][i].base =
10847 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
10848 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
10849 dcb_info->tc_queue.tc_txq[j][i].base =
10850 dcb_info->tc_queue.tc_rxq[j][i].base;
10851 bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
10852 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
10853 dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
10854 dcb_info->tc_queue.tc_txq[j][i].nb_queue =
10855 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
10858 } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
10863 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
10865 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10866 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
10867 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10868 uint16_t interval =
10869 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1);
10870 uint16_t msix_intr;
10872 msix_intr = intr_handle->intr_vec[queue_id];
10873 if (msix_intr == I40E_MISC_VEC_ID)
10874 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
10875 I40E_PFINT_DYN_CTLN_INTENA_MASK |
10876 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
10877 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
10879 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
10882 I40E_PFINT_DYN_CTLN(msix_intr -
10883 I40E_RX_VEC_START),
10884 I40E_PFINT_DYN_CTLN_INTENA_MASK |
10885 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
10886 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
10888 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
10890 I40E_WRITE_FLUSH(hw);
10891 rte_intr_enable(&pci_dev->intr_handle);
10897 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
10899 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10900 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
10901 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10902 uint16_t msix_intr;
10904 msix_intr = intr_handle->intr_vec[queue_id];
10905 if (msix_intr == I40E_MISC_VEC_ID)
10906 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
10909 I40E_PFINT_DYN_CTLN(msix_intr -
10910 I40E_RX_VEC_START),
10912 I40E_WRITE_FLUSH(hw);
10917 static int i40e_get_regs(struct rte_eth_dev *dev,
10918 struct rte_dev_reg_info *regs)
10920 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10921 uint32_t *ptr_data = regs->data;
10922 uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
10923 const struct i40e_reg_info *reg_info;
10925 if (ptr_data == NULL) {
10926 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
10927 regs->width = sizeof(uint32_t);
10931 /* The first few registers have to be read using AQ operations */
10933 while (i40e_regs_adminq[reg_idx].name) {
10934 reg_info = &i40e_regs_adminq[reg_idx++];
10935 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
10937 arr_idx2 <= reg_info->count2;
10939 reg_offset = arr_idx * reg_info->stride1 +
10940 arr_idx2 * reg_info->stride2;
10941 reg_offset += reg_info->base_addr;
10942 ptr_data[reg_offset >> 2] =
10943 i40e_read_rx_ctl(hw, reg_offset);
10947 /* The remaining registers can be read using primitives */
10949 while (i40e_regs_others[reg_idx].name) {
10950 reg_info = &i40e_regs_others[reg_idx++];
10951 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
10953 arr_idx2 <= reg_info->count2;
10955 reg_offset = arr_idx * reg_info->stride1 +
10956 arr_idx2 * reg_info->stride2;
10957 reg_offset += reg_info->base_addr;
10958 ptr_data[reg_offset >> 2] =
10959 I40E_READ_REG(hw, reg_offset);
10966 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
10968 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10970 /* Convert word count to byte count */
10971 return hw->nvm.sr_size << 1;
10974 static int i40e_get_eeprom(struct rte_eth_dev *dev,
10975 struct rte_dev_eeprom_info *eeprom)
10977 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10978 uint16_t *data = eeprom->data;
10979 uint16_t offset, length, cnt_words;
10982 offset = eeprom->offset >> 1;
10983 length = eeprom->length >> 1;
10984 cnt_words = length;
10986 if (offset > hw->nvm.sr_size ||
10987 offset + length > hw->nvm.sr_size) {
10988 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
10992 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
10994 ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
10995 if (ret_code != I40E_SUCCESS || cnt_words != length) {
10996 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11003 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11004 struct ether_addr *mac_addr)
11006 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11007 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11008 struct i40e_vsi *vsi = pf->main_vsi;
11009 struct i40e_mac_filter_info mac_filter;
11010 struct i40e_mac_filter *f;
11013 if (!is_valid_assigned_ether_addr(mac_addr)) {
11014 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11018 TAILQ_FOREACH(f, &vsi->mac_list, next) {
11019 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
11024 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11028 mac_filter = f->mac_info;
11029 ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11030 if (ret != I40E_SUCCESS) {
11031 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11034 memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11035 ret = i40e_vsi_add_mac(vsi, &mac_filter);
11036 if (ret != I40E_SUCCESS) {
11037 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11040 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11042 i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11043 mac_addr->addr_bytes, NULL);
11047 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11049 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11050 struct rte_eth_dev_data *dev_data = pf->dev_data;
11051 uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11054 /* check if mtu is within the allowed range */
11055 if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
11058 /* mtu setting is forbidden if port is start */
11059 if (dev_data->dev_started) {
11060 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11061 dev_data->port_id);
11065 if (frame_size > ETHER_MAX_LEN)
11066 dev_data->dev_conf.rxmode.jumbo_frame = 1;
11068 dev_data->dev_conf.rxmode.jumbo_frame = 0;
11070 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11075 /* Restore ethertype filter */
11077 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11079 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11080 struct i40e_ethertype_filter_list
11081 *ethertype_list = &pf->ethertype.ethertype_list;
11082 struct i40e_ethertype_filter *f;
11083 struct i40e_control_filter_stats stats;
11086 TAILQ_FOREACH(f, ethertype_list, rules) {
11088 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11089 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11090 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11091 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11092 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11094 memset(&stats, 0, sizeof(stats));
11095 i40e_aq_add_rem_control_packet_filter(hw,
11096 f->input.mac_addr.addr_bytes,
11097 f->input.ether_type,
11098 flags, pf->main_vsi->seid,
11099 f->queue, 1, &stats, NULL);
11101 PMD_DRV_LOG(INFO, "Ethertype filter:"
11102 " mac_etype_used = %u, etype_used = %u,"
11103 " mac_etype_free = %u, etype_free = %u",
11104 stats.mac_etype_used, stats.etype_used,
11105 stats.mac_etype_free, stats.etype_free);
11108 /* Restore tunnel filter */
11110 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11112 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11113 struct i40e_vsi *vsi;
11114 struct i40e_pf_vf *vf;
11115 struct i40e_tunnel_filter_list
11116 *tunnel_list = &pf->tunnel.tunnel_list;
11117 struct i40e_tunnel_filter *f;
11118 struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
11119 bool big_buffer = 0;
11121 TAILQ_FOREACH(f, tunnel_list, rules) {
11123 vsi = pf->main_vsi;
11125 vf = &pf->vfs[f->vf_id];
11128 memset(&cld_filter, 0, sizeof(cld_filter));
11129 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
11130 (struct ether_addr *)&cld_filter.element.outer_mac);
11131 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
11132 (struct ether_addr *)&cld_filter.element.inner_mac);
11133 cld_filter.element.inner_vlan = f->input.inner_vlan;
11134 cld_filter.element.flags = f->input.flags;
11135 cld_filter.element.tenant_id = f->input.tenant_id;
11136 cld_filter.element.queue_number = f->queue;
11137 rte_memcpy(cld_filter.general_fields,
11138 f->input.general_fields,
11139 sizeof(f->input.general_fields));
11141 if (((f->input.flags &
11142 I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11143 I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11145 I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11146 I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11148 I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11149 I40E_AQC_ADD_CLOUD_FILTER_0X10))
11153 i40e_aq_add_cloud_filters_big_buffer(hw,
11154 vsi->seid, &cld_filter, 1);
11156 i40e_aq_add_cloud_filters(hw, vsi->seid,
11157 &cld_filter.element, 1);
11161 /* Restore rss filter */
11163 i40e_rss_filter_restore(struct i40e_pf *pf)
11165 struct i40e_rte_flow_rss_conf *conf =
11168 i40e_config_rss_filter(pf, conf, TRUE);
11172 i40e_filter_restore(struct i40e_pf *pf)
11174 i40e_ethertype_filter_restore(pf);
11175 i40e_tunnel_filter_restore(pf);
11176 i40e_fdir_filter_restore(pf);
11177 i40e_rss_filter_restore(pf);
11181 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11183 if (strcmp(dev->device->driver->name, drv->driver.name))
11190 is_i40e_supported(struct rte_eth_dev *dev)
11192 return is_device_supported(dev, &rte_i40e_pmd);
11195 struct i40e_customized_pctype*
11196 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11200 for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11201 if (pf->customized_pctype[i].index == index)
11202 return &pf->customized_pctype[i];
11208 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11209 uint32_t pkg_size, uint32_t proto_num,
11210 struct rte_pmd_i40e_proto_info *proto)
11212 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11213 uint32_t pctype_num;
11214 struct rte_pmd_i40e_ptype_info *pctype;
11215 uint32_t buff_size;
11216 struct i40e_customized_pctype *new_pctype = NULL;
11218 uint8_t pctype_value;
11223 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11224 (uint8_t *)&pctype_num, sizeof(pctype_num),
11225 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11227 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11231 PMD_DRV_LOG(INFO, "No new pctype added");
11235 buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11236 pctype = rte_zmalloc("new_pctype", buff_size, 0);
11238 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11241 /* get information about new pctype list */
11242 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11243 (uint8_t *)pctype, buff_size,
11244 RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11246 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11251 /* Update customized pctype. */
11252 for (i = 0; i < pctype_num; i++) {
11253 pctype_value = pctype[i].ptype_id;
11254 memset(name, 0, sizeof(name));
11255 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11256 proto_id = pctype[i].protocols[j];
11257 if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11259 for (n = 0; n < proto_num; n++) {
11260 if (proto[n].proto_id != proto_id)
11262 strcat(name, proto[n].name);
11267 name[strlen(name) - 1] = '\0';
11268 if (!strcmp(name, "GTPC"))
11270 i40e_find_customized_pctype(pf,
11271 I40E_CUSTOMIZED_GTPC);
11272 else if (!strcmp(name, "GTPU_IPV4"))
11274 i40e_find_customized_pctype(pf,
11275 I40E_CUSTOMIZED_GTPU_IPV4);
11276 else if (!strcmp(name, "GTPU_IPV6"))
11278 i40e_find_customized_pctype(pf,
11279 I40E_CUSTOMIZED_GTPU_IPV6);
11280 else if (!strcmp(name, "GTPU"))
11282 i40e_find_customized_pctype(pf,
11283 I40E_CUSTOMIZED_GTPU);
11285 new_pctype->pctype = pctype_value;
11286 new_pctype->valid = true;
11295 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11296 uint32_t pkg_size, uint32_t proto_num,
11297 struct rte_pmd_i40e_proto_info *proto)
11299 struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11300 uint16_t port_id = dev->data->port_id;
11301 uint32_t ptype_num;
11302 struct rte_pmd_i40e_ptype_info *ptype;
11303 uint32_t buff_size;
11305 char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11310 /* get information about new ptype num */
11311 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11312 (uint8_t *)&ptype_num, sizeof(ptype_num),
11313 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11315 PMD_DRV_LOG(ERR, "Failed to get ptype number");
11319 PMD_DRV_LOG(INFO, "No new ptype added");
11323 buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11324 ptype = rte_zmalloc("new_ptype", buff_size, 0);
11326 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11330 /* get information about new ptype list */
11331 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11332 (uint8_t *)ptype, buff_size,
11333 RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11335 PMD_DRV_LOG(ERR, "Failed to get ptype list");
11340 buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11341 ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11342 if (!ptype_mapping) {
11343 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11348 /* Update ptype mapping table. */
11349 for (i = 0; i < ptype_num; i++) {
11350 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11351 ptype_mapping[i].sw_ptype = 0;
11353 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11354 proto_id = ptype[i].protocols[j];
11355 if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11357 for (n = 0; n < proto_num; n++) {
11358 if (proto[n].proto_id != proto_id)
11360 memset(name, 0, sizeof(name));
11361 strcpy(name, proto[n].name);
11362 if (!strncasecmp(name, "PPPOE", 5))
11363 ptype_mapping[i].sw_ptype |=
11364 RTE_PTYPE_L2_ETHER_PPPOE;
11365 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11367 ptype_mapping[i].sw_ptype |=
11368 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11369 ptype_mapping[i].sw_ptype |=
11371 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11373 ptype_mapping[i].sw_ptype |=
11374 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11375 ptype_mapping[i].sw_ptype |=
11376 RTE_PTYPE_INNER_L4_FRAG;
11377 } else if (!strncasecmp(name, "OIPV4", 5)) {
11378 ptype_mapping[i].sw_ptype |=
11379 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11381 } else if (!strncasecmp(name, "IPV4", 4) &&
11383 ptype_mapping[i].sw_ptype |=
11384 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11385 else if (!strncasecmp(name, "IPV4", 4) &&
11387 ptype_mapping[i].sw_ptype |=
11388 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11389 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11391 ptype_mapping[i].sw_ptype |=
11392 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11393 ptype_mapping[i].sw_ptype |=
11395 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11397 ptype_mapping[i].sw_ptype |=
11398 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11399 ptype_mapping[i].sw_ptype |=
11400 RTE_PTYPE_INNER_L4_FRAG;
11401 } else if (!strncasecmp(name, "OIPV6", 5)) {
11402 ptype_mapping[i].sw_ptype |=
11403 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11405 } else if (!strncasecmp(name, "IPV6", 4) &&
11407 ptype_mapping[i].sw_ptype |=
11408 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11409 else if (!strncasecmp(name, "IPV6", 4) &&
11411 ptype_mapping[i].sw_ptype |=
11412 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11413 else if (!strncasecmp(name, "UDP", 3) &&
11415 ptype_mapping[i].sw_ptype |=
11417 else if (!strncasecmp(name, "UDP", 3) &&
11419 ptype_mapping[i].sw_ptype |=
11420 RTE_PTYPE_INNER_L4_UDP;
11421 else if (!strncasecmp(name, "TCP", 3) &&
11423 ptype_mapping[i].sw_ptype |=
11425 else if (!strncasecmp(name, "TCP", 3) &&
11427 ptype_mapping[i].sw_ptype |=
11428 RTE_PTYPE_INNER_L4_TCP;
11429 else if (!strncasecmp(name, "SCTP", 4) &&
11431 ptype_mapping[i].sw_ptype |=
11433 else if (!strncasecmp(name, "SCTP", 4) &&
11435 ptype_mapping[i].sw_ptype |=
11436 RTE_PTYPE_INNER_L4_SCTP;
11437 else if ((!strncasecmp(name, "ICMP", 4) ||
11438 !strncasecmp(name, "ICMPV6", 6)) &&
11440 ptype_mapping[i].sw_ptype |=
11442 else if ((!strncasecmp(name, "ICMP", 4) ||
11443 !strncasecmp(name, "ICMPV6", 6)) &&
11445 ptype_mapping[i].sw_ptype |=
11446 RTE_PTYPE_INNER_L4_ICMP;
11447 else if (!strncasecmp(name, "GTPC", 4)) {
11448 ptype_mapping[i].sw_ptype |=
11449 RTE_PTYPE_TUNNEL_GTPC;
11451 } else if (!strncasecmp(name, "GTPU", 4)) {
11452 ptype_mapping[i].sw_ptype |=
11453 RTE_PTYPE_TUNNEL_GTPU;
11455 } else if (!strncasecmp(name, "GRENAT", 6)) {
11456 ptype_mapping[i].sw_ptype |=
11457 RTE_PTYPE_TUNNEL_GRENAT;
11459 } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
11460 ptype_mapping[i].sw_ptype |=
11461 RTE_PTYPE_TUNNEL_L2TP;
11470 ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
11473 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
11475 rte_free(ptype_mapping);
11481 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
11484 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11485 uint32_t proto_num;
11486 struct rte_pmd_i40e_proto_info *proto;
11487 uint32_t buff_size;
11491 /* get information about protocol number */
11492 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11493 (uint8_t *)&proto_num, sizeof(proto_num),
11494 RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
11496 PMD_DRV_LOG(ERR, "Failed to get protocol number");
11500 PMD_DRV_LOG(INFO, "No new protocol added");
11504 buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
11505 proto = rte_zmalloc("new_proto", buff_size, 0);
11507 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11511 /* get information about protocol list */
11512 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11513 (uint8_t *)proto, buff_size,
11514 RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
11516 PMD_DRV_LOG(ERR, "Failed to get protocol list");
11521 /* Check if GTP is supported. */
11522 for (i = 0; i < proto_num; i++) {
11523 if (!strncmp(proto[i].name, "GTP", 3)) {
11524 pf->gtp_support = true;
11529 /* Update customized pctype info */
11530 ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
11533 PMD_DRV_LOG(INFO, "No pctype is updated.");
11535 /* Update customized ptype info */
11536 ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
11539 PMD_DRV_LOG(INFO, "No ptype is updated.");
11544 /* Create a QinQ cloud filter
11546 * The Fortville NIC has limited resources for tunnel filters,
11547 * so we can only reuse existing filters.
11549 * In step 1 we define which Field Vector fields can be used for
11551 * As we do not have the inner tag defined as a field,
11552 * we have to define it first, by reusing one of L1 entries.
11554 * In step 2 we are replacing one of existing filter types with
11555 * a new one for QinQ.
11556 * As we reusing L1 and replacing L2, some of the default filter
11557 * types will disappear,which depends on L1 and L2 entries we reuse.
11559 * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
11561 * 1. Create L1 filter of outer vlan (12b) which will be in use
11562 * later when we define the cloud filter.
11563 * a. Valid_flags.replace_cloud = 0
11564 * b. Old_filter = 10 (Stag_Inner_Vlan)
11565 * c. New_filter = 0x10
11566 * d. TR bit = 0xff (optional, not used here)
11567 * e. Buffer – 2 entries:
11568 * i. Byte 0 = 8 (outer vlan FV index).
11570 * Byte 2-3 = 0x0fff
11571 * ii. Byte 0 = 37 (inner vlan FV index).
11573 * Byte 2-3 = 0x0fff
11576 * 2. Create cloud filter using two L1 filters entries: stag and
11577 * new filter(outer vlan+ inner vlan)
11578 * a. Valid_flags.replace_cloud = 1
11579 * b. Old_filter = 1 (instead of outer IP)
11580 * c. New_filter = 0x10
11581 * d. Buffer – 2 entries:
11582 * i. Byte 0 = 0x80 | 7 (valid | Stag).
11583 * Byte 1-3 = 0 (rsv)
11584 * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
11585 * Byte 9-11 = 0 (rsv)
11588 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
11590 int ret = -ENOTSUP;
11591 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
11592 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
11593 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11596 memset(&filter_replace, 0,
11597 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11598 memset(&filter_replace_buf, 0,
11599 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11601 /* create L1 filter */
11602 filter_replace.old_filter_type =
11603 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
11604 filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11605 filter_replace.tr_bit = 0;
11607 /* Prepare the buffer, 2 entries */
11608 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
11609 filter_replace_buf.data[0] |=
11610 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11611 /* Field Vector 12b mask */
11612 filter_replace_buf.data[2] = 0xff;
11613 filter_replace_buf.data[3] = 0x0f;
11614 filter_replace_buf.data[4] =
11615 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
11616 filter_replace_buf.data[4] |=
11617 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11618 /* Field Vector 12b mask */
11619 filter_replace_buf.data[6] = 0xff;
11620 filter_replace_buf.data[7] = 0x0f;
11621 ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11622 &filter_replace_buf);
11623 if (ret != I40E_SUCCESS)
11626 /* Apply the second L2 cloud filter */
11627 memset(&filter_replace, 0,
11628 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11629 memset(&filter_replace_buf, 0,
11630 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11632 /* create L2 filter, input for L2 filter will be L1 filter */
11633 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
11634 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
11635 filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11637 /* Prepare the buffer, 2 entries */
11638 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
11639 filter_replace_buf.data[0] |=
11640 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11641 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11642 filter_replace_buf.data[4] |=
11643 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11644 ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11645 &filter_replace_buf);
11650 i40e_config_rss_filter(struct i40e_pf *pf,
11651 struct i40e_rte_flow_rss_conf *conf, bool add)
11653 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11654 uint32_t i, lut = 0;
11656 struct rte_eth_rss_conf rss_conf = conf->rss_conf;
11657 struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
11660 if (memcmp(conf, rss_info,
11661 sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
11662 i40e_pf_disable_rss(pf);
11663 memset(rss_info, 0,
11664 sizeof(struct i40e_rte_flow_rss_conf));
11673 /* If both VMDQ and RSS enabled, not all of PF queues are configured.
11674 * It's necessary to calculate the actual PF queues that are configured.
11676 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
11677 num = i40e_pf_calc_configured_queues_num(pf);
11679 num = pf->dev_data->nb_rx_queues;
11681 num = RTE_MIN(num, conf->num);
11682 PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
11686 PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
11690 /* Fill in redirection table */
11691 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
11694 lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
11695 hw->func_caps.rss_table_entry_width) - 1));
11697 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
11700 if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
11701 i40e_pf_disable_rss(pf);
11704 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
11705 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
11706 /* Random default keys */
11707 static uint32_t rss_key_default[] = {0x6b793944,
11708 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
11709 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
11710 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
11712 rss_conf.rss_key = (uint8_t *)rss_key_default;
11713 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
11717 i40e_hw_rss_hash_set(pf, &rss_conf);
11719 rte_memcpy(rss_info,
11720 conf, sizeof(struct i40e_rte_flow_rss_conf));
11725 RTE_INIT(i40e_init_log);
11727 i40e_init_log(void)
11729 i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
11730 if (i40e_logtype_init >= 0)
11731 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
11732 i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
11733 if (i40e_logtype_driver >= 0)
11734 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);