4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_string_fns.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memzone.h>
49 #include <rte_malloc.h>
50 #include <rte_memcpy.h>
51 #include <rte_alarm.h>
53 #include <rte_eth_ctrl.h>
55 #include "i40e_logs.h"
56 #include "base/i40e_prototype.h"
57 #include "base/i40e_adminq_cmd.h"
58 #include "base/i40e_type.h"
59 #include "base/i40e_register.h"
60 #include "base/i40e_dcb.h"
61 #include "i40e_ethdev.h"
62 #include "i40e_rxtx.h"
65 /* Maximun number of MAC addresses */
66 #define I40E_NUM_MACADDR_MAX 64
67 #define I40E_CLEAR_PXE_WAIT_MS 200
69 /* Maximun number of capability elements */
70 #define I40E_MAX_CAP_ELE_NUM 128
72 /* Wait count and inteval */
73 #define I40E_CHK_Q_ENA_COUNT 1000
74 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
76 /* Maximun number of VSI */
77 #define I40E_MAX_NUM_VSIS (384UL)
79 /* Default queue interrupt throttling time in microseconds */
80 #define I40E_ITR_INDEX_DEFAULT 0
81 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
82 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
84 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
86 /* Flow control default timer */
87 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
89 /* Flow control default high water */
90 #define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
92 /* Flow control default low water */
93 #define I40E_DEFAULT_LOW_WATER (0x1A40/1024)
95 /* Flow control enable fwd bit */
96 #define I40E_PRTMAC_FWD_CTRL 0x00000001
98 /* Receive Packet Buffer size */
99 #define I40E_RXPBSIZE (968 * 1024)
101 /* Kilobytes shift */
102 #define I40E_KILOSHIFT 10
104 /* Receive Average Packet Size in Byte*/
105 #define I40E_PACKET_AVERAGE_SIZE 128
107 /* Mask of PF interrupt causes */
108 #define I40E_PFINT_ICR0_ENA_MASK ( \
109 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
110 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
111 I40E_PFINT_ICR0_ENA_GRST_MASK | \
112 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
113 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
114 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
115 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
116 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
117 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
118 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
120 #define I40E_FLOW_TYPES ( \
121 (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
122 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
123 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
124 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
125 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
126 (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
127 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
128 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
129 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
130 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
131 (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
133 #define I40E_PTP_40GB_INCVAL 0x0199999999ULL
134 #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
135 #define I40E_PTP_1GB_INCVAL 0x2000000000ULL
136 #define I40E_PRTTSYN_TSYNENA 0x80000000
137 #define I40E_PRTTSYN_TSYNTYPE 0x0e000000
139 #define I40E_MAX_PERCENT 100
140 #define I40E_DEFAULT_DCB_APP_NUM 1
141 #define I40E_DEFAULT_DCB_APP_PRIO 3
143 #define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32))
144 #define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8))
145 #define I40E_GLQF_FD_MSK_FIELD 0x0000FFFF
146 #define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8))
147 #define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8))
148 #define I40E_GLQF_HASH_MSK_FIELD 0x0000FFFF
150 #define I40E_INSET_NONE 0x00000000000000000ULL
153 #define I40E_INSET_DMAC 0x0000000000000001ULL
154 #define I40E_INSET_SMAC 0x0000000000000002ULL
155 #define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
156 #define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
157 #define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
160 #define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
161 #define I40E_INSET_IPV4_DST 0x0000000000000200ULL
162 #define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
163 #define I40E_INSET_IPV6_DST 0x0000000000000800ULL
164 #define I40E_INSET_SRC_PORT 0x0000000000001000ULL
165 #define I40E_INSET_DST_PORT 0x0000000000002000ULL
166 #define I40E_INSET_SCTP_VT 0x0000000000004000ULL
168 /* bit 16 ~ bit 31 */
169 #define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
170 #define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
171 #define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
172 #define I40E_INSET_IPV6_TC 0x0000000000080000ULL
173 #define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
174 #define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
175 #define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
176 #define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
178 /* bit 32 ~ bit 47, tunnel fields */
179 #define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
180 #define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
181 #define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
182 #define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
183 #define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
184 #define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
186 /* bit 48 ~ bit 55 */
187 #define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
189 /* bit 56 ~ bit 63, Flex Payload */
190 #define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
191 #define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
192 #define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
193 #define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
194 #define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
195 #define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
196 #define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
197 #define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
198 #define I40E_INSET_FLEX_PAYLOAD \
199 (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
200 I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W3 | \
201 I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
202 I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
205 * Below are values for writing un-exposed registers suggested
208 /* Destination MAC address */
209 #define I40E_REG_INSET_L2_DMAC 0xE000000000000000ULL
210 /* Source MAC address */
211 #define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL
212 /* VLAN tag in the outer L2 header */
213 #define I40E_REG_INSET_L2_OUTER_VLAN 0x0000000000800000ULL
214 /* VLAN tag in the inner L2 header */
215 #define I40E_REG_INSET_L2_INNER_VLAN 0x0000000001000000ULL
216 /* Source IPv4 address */
217 #define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL
218 /* Destination IPv4 address */
219 #define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL
220 /* IPv4 Type of Service (TOS) */
221 #define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL
223 #define I40E_REG_INSET_L3_IP4_PROTO 0x0004000000000000ULL
224 /* Source IPv6 address */
225 #define I40E_REG_INSET_L3_SRC_IP6 0x0007F80000000000ULL
226 /* Destination IPv6 address */
227 #define I40E_REG_INSET_L3_DST_IP6 0x000007F800000000ULL
228 /* IPv6 Traffic Class (TC) */
229 #define I40E_REG_INSET_L3_IP6_TC 0x0040000000000000ULL
230 /* IPv6 Next Header */
231 #define I40E_REG_INSET_L3_IP6_NEXT_HDR 0x0008000000000000ULL
233 #define I40E_REG_INSET_L4_SRC_PORT 0x0000000400000000ULL
234 /* Destination L4 port */
235 #define I40E_REG_INSET_L4_DST_PORT 0x0000000200000000ULL
236 /* SCTP verification tag */
237 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG 0x0000000180000000ULL
238 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
239 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC 0x0000000001C00000ULL
240 /* Source port of tunneling UDP */
241 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT 0x0000000000200000ULL
242 /* Destination port of tunneling UDP */
243 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT 0x0000000000100000ULL
244 /* UDP Tunneling ID, NVGRE/GRE key */
245 #define I40E_REG_INSET_TUNNEL_ID 0x00000000000C0000ULL
246 /* Last ether type */
247 #define I40E_REG_INSET_LAST_ETHER_TYPE 0x0000000000004000ULL
248 /* Tunneling outer destination IPv4 address */
249 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4 0x00000000000000C0ULL
250 /* Tunneling outer destination IPv6 address */
251 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6 0x0000000000003FC0ULL
252 /* 1st word of flex payload */
253 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1 0x0000000000002000ULL
254 /* 2nd word of flex payload */
255 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2 0x0000000000001000ULL
256 /* 3rd word of flex payload */
257 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3 0x0000000000000800ULL
258 /* 4th word of flex payload */
259 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4 0x0000000000000400ULL
260 /* 5th word of flex payload */
261 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5 0x0000000000000200ULL
262 /* 6th word of flex payload */
263 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6 0x0000000000000100ULL
264 /* 7th word of flex payload */
265 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7 0x0000000000000080ULL
266 /* 8th word of flex payload */
267 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8 0x0000000000000040ULL
269 #define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL
271 #define I40E_TRANSLATE_INSET 0
272 #define I40E_TRANSLATE_REG 1
274 #define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL
275 #define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL
276 #define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL
277 #define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
279 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
280 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
281 static int i40e_dev_configure(struct rte_eth_dev *dev);
282 static int i40e_dev_start(struct rte_eth_dev *dev);
283 static void i40e_dev_stop(struct rte_eth_dev *dev);
284 static void i40e_dev_close(struct rte_eth_dev *dev);
285 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
286 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
287 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
288 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
289 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
290 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
291 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
292 struct rte_eth_stats *stats);
293 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
294 struct rte_eth_xstats *xstats, unsigned n);
295 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
296 static void i40e_dev_xstats_reset(struct rte_eth_dev *dev);
297 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
301 static void i40e_dev_info_get(struct rte_eth_dev *dev,
302 struct rte_eth_dev_info *dev_info);
303 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
306 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
307 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
308 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
311 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
312 static int i40e_dev_led_on(struct rte_eth_dev *dev);
313 static int i40e_dev_led_off(struct rte_eth_dev *dev);
314 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
315 struct rte_eth_fc_conf *fc_conf);
316 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
317 struct rte_eth_fc_conf *fc_conf);
318 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
319 struct rte_eth_pfc_conf *pfc_conf);
320 static void i40e_macaddr_add(struct rte_eth_dev *dev,
321 struct ether_addr *mac_addr,
324 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
325 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
326 struct rte_eth_rss_reta_entry64 *reta_conf,
328 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
329 struct rte_eth_rss_reta_entry64 *reta_conf,
332 static int i40e_get_cap(struct i40e_hw *hw);
333 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
334 static int i40e_pf_setup(struct i40e_pf *pf);
335 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
336 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
337 static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
338 static int i40e_dcb_setup(struct rte_eth_dev *dev);
339 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
340 bool offset_loaded, uint64_t *offset, uint64_t *stat);
341 static void i40e_stat_update_48(struct i40e_hw *hw,
347 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
348 static void i40e_dev_interrupt_handler(
349 __rte_unused struct rte_intr_handle *handle, void *param);
350 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
351 uint32_t base, uint32_t num);
352 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
353 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
355 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
357 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
358 static int i40e_veb_release(struct i40e_veb *veb);
359 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
360 struct i40e_vsi *vsi);
361 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
362 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
363 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
364 struct i40e_macvlan_filter *mv_f,
366 struct ether_addr *addr);
367 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
368 struct i40e_macvlan_filter *mv_f,
371 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
372 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
373 struct rte_eth_rss_conf *rss_conf);
374 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
375 struct rte_eth_rss_conf *rss_conf);
376 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
377 struct rte_eth_udp_tunnel *udp_tunnel);
378 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
379 struct rte_eth_udp_tunnel *udp_tunnel);
380 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
381 struct rte_eth_ethertype_filter *filter,
383 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
384 enum rte_filter_op filter_op,
386 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
387 enum rte_filter_type filter_type,
388 enum rte_filter_op filter_op,
390 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
391 struct rte_eth_dcb_info *dcb_info);
392 static void i40e_configure_registers(struct i40e_hw *hw);
393 static void i40e_hw_init(struct i40e_hw *hw);
394 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
395 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
396 struct rte_eth_mirror_conf *mirror_conf,
397 uint8_t sw_id, uint8_t on);
398 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
400 static int i40e_timesync_enable(struct rte_eth_dev *dev);
401 static int i40e_timesync_disable(struct rte_eth_dev *dev);
402 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
403 struct timespec *timestamp,
405 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
406 struct timespec *timestamp);
407 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
408 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
410 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
413 static const struct rte_pci_id pci_id_i40e_map[] = {
414 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
415 #include "rte_pci_dev_ids.h"
416 { .vendor_id = 0, /* sentinel */ },
419 static const struct eth_dev_ops i40e_eth_dev_ops = {
420 .dev_configure = i40e_dev_configure,
421 .dev_start = i40e_dev_start,
422 .dev_stop = i40e_dev_stop,
423 .dev_close = i40e_dev_close,
424 .promiscuous_enable = i40e_dev_promiscuous_enable,
425 .promiscuous_disable = i40e_dev_promiscuous_disable,
426 .allmulticast_enable = i40e_dev_allmulticast_enable,
427 .allmulticast_disable = i40e_dev_allmulticast_disable,
428 .dev_set_link_up = i40e_dev_set_link_up,
429 .dev_set_link_down = i40e_dev_set_link_down,
430 .link_update = i40e_dev_link_update,
431 .stats_get = i40e_dev_stats_get,
432 .xstats_get = i40e_dev_xstats_get,
433 .stats_reset = i40e_dev_stats_reset,
434 .xstats_reset = i40e_dev_xstats_reset,
435 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
436 .dev_infos_get = i40e_dev_info_get,
437 .vlan_filter_set = i40e_vlan_filter_set,
438 .vlan_tpid_set = i40e_vlan_tpid_set,
439 .vlan_offload_set = i40e_vlan_offload_set,
440 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
441 .vlan_pvid_set = i40e_vlan_pvid_set,
442 .rx_queue_start = i40e_dev_rx_queue_start,
443 .rx_queue_stop = i40e_dev_rx_queue_stop,
444 .tx_queue_start = i40e_dev_tx_queue_start,
445 .tx_queue_stop = i40e_dev_tx_queue_stop,
446 .rx_queue_setup = i40e_dev_rx_queue_setup,
447 .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
448 .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
449 .rx_queue_release = i40e_dev_rx_queue_release,
450 .rx_queue_count = i40e_dev_rx_queue_count,
451 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
452 .tx_queue_setup = i40e_dev_tx_queue_setup,
453 .tx_queue_release = i40e_dev_tx_queue_release,
454 .dev_led_on = i40e_dev_led_on,
455 .dev_led_off = i40e_dev_led_off,
456 .flow_ctrl_get = i40e_flow_ctrl_get,
457 .flow_ctrl_set = i40e_flow_ctrl_set,
458 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
459 .mac_addr_add = i40e_macaddr_add,
460 .mac_addr_remove = i40e_macaddr_remove,
461 .reta_update = i40e_dev_rss_reta_update,
462 .reta_query = i40e_dev_rss_reta_query,
463 .rss_hash_update = i40e_dev_rss_hash_update,
464 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
465 .udp_tunnel_add = i40e_dev_udp_tunnel_add,
466 .udp_tunnel_del = i40e_dev_udp_tunnel_del,
467 .filter_ctrl = i40e_dev_filter_ctrl,
468 .rxq_info_get = i40e_rxq_info_get,
469 .txq_info_get = i40e_txq_info_get,
470 .mirror_rule_set = i40e_mirror_rule_set,
471 .mirror_rule_reset = i40e_mirror_rule_reset,
472 .timesync_enable = i40e_timesync_enable,
473 .timesync_disable = i40e_timesync_disable,
474 .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
475 .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp,
476 .get_dcb_info = i40e_dev_get_dcb_info,
479 /* store statistics names and its offset in stats structure */
480 struct rte_i40e_xstats_name_off {
481 char name[RTE_ETH_XSTATS_NAME_SIZE];
485 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
486 {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
487 {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
488 {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
489 {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
490 {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
491 rx_unknown_protocol)},
492 {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
493 {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
494 {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
495 {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
498 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
499 {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
500 tx_dropped_link_down)},
501 {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
502 {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
504 {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
505 {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
507 {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
509 {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
511 {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
512 {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
513 {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
514 {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
515 {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
516 {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
518 {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
520 {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
522 {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
524 {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
526 {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
528 {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
530 {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
532 {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
533 mac_short_packet_dropped)},
534 {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
536 {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
537 {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
538 {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
540 {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
542 {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
544 {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
546 {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
548 {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
550 {"rx_flow_director_atr_match_packets",
551 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
552 {"rx_flow_director_sb_match_packets",
553 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
554 {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
556 {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
558 {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
560 {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
564 /* Q Stats: 5 stats are exposed for each queue, implemented in xstats_get() */
565 #define I40E_NB_HW_PORT_Q_STATS (8 * 5)
567 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
568 sizeof(rte_i40e_stats_strings[0]))
569 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
570 sizeof(rte_i40e_hw_port_strings[0]))
571 #define I40E_NB_XSTATS (I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS + \
572 I40E_NB_HW_PORT_Q_STATS)
574 static struct eth_driver rte_i40e_pmd = {
576 .name = "rte_i40e_pmd",
577 .id_table = pci_id_i40e_map,
578 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
579 RTE_PCI_DRV_DETACHABLE,
581 .eth_dev_init = eth_i40e_dev_init,
582 .eth_dev_uninit = eth_i40e_dev_uninit,
583 .dev_private_size = sizeof(struct i40e_adapter),
587 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
588 struct rte_eth_link *link)
590 struct rte_eth_link *dst = link;
591 struct rte_eth_link *src = &(dev->data->dev_link);
593 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
594 *(uint64_t *)src) == 0)
601 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
602 struct rte_eth_link *link)
604 struct rte_eth_link *dst = &(dev->data->dev_link);
605 struct rte_eth_link *src = link;
607 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
608 *(uint64_t *)src) == 0)
615 * Driver initialization routine.
616 * Invoked once at EAL init time.
617 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
620 rte_i40e_pmd_init(const char *name __rte_unused,
621 const char *params __rte_unused)
623 PMD_INIT_FUNC_TRACE();
624 rte_eth_driver_register(&rte_i40e_pmd);
629 static struct rte_driver rte_i40e_driver = {
631 .init = rte_i40e_pmd_init,
634 PMD_REGISTER_DRIVER(rte_i40e_driver);
637 * Initialize registers for flexible payload, which should be set by NVM.
638 * This should be removed from code once it is fixed in NVM.
640 #ifndef I40E_GLQF_ORT
641 #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
643 #ifndef I40E_GLQF_PIT
644 #define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
647 static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
649 I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
650 I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
651 I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
652 I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
653 I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
654 I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
655 I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
656 I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
657 I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
658 I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
660 /* GLQF_PIT Registers */
661 I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
662 I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
665 #define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
668 * Add a ethertype filter to drop all flow control frames transmitted
672 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
674 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
675 uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
676 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
677 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
680 ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
681 I40E_FLOW_CONTROL_ETHERTYPE, flags,
682 pf->main_vsi_seid, 0,
685 PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
686 " frames from VSIs.");
690 eth_i40e_dev_init(struct rte_eth_dev *dev)
692 struct rte_pci_device *pci_dev;
693 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
694 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
695 struct i40e_vsi *vsi;
700 PMD_INIT_FUNC_TRACE();
702 dev->dev_ops = &i40e_eth_dev_ops;
703 dev->rx_pkt_burst = i40e_recv_pkts;
704 dev->tx_pkt_burst = i40e_xmit_pkts;
706 /* for secondary processes, we don't initialise any further as primary
707 * has already done this work. Only check we don't need a different
709 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
710 i40e_set_rx_function(dev);
711 i40e_set_tx_function(dev);
714 pci_dev = dev->pci_dev;
716 rte_eth_copy_pci_info(dev, pci_dev);
718 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
719 pf->adapter->eth_dev = dev;
720 pf->dev_data = dev->data;
722 hw->back = I40E_PF_TO_ADAPTER(pf);
723 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
725 PMD_INIT_LOG(ERR, "Hardware is not available, "
726 "as address is NULL");
730 hw->vendor_id = pci_dev->id.vendor_id;
731 hw->device_id = pci_dev->id.device_id;
732 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
733 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
734 hw->bus.device = pci_dev->addr.devid;
735 hw->bus.func = pci_dev->addr.function;
736 hw->adapter_stopped = 0;
738 /* Make sure all is clean before doing PF reset */
741 /* Initialize the hardware */
744 /* Reset here to make sure all is clean for each PF */
745 ret = i40e_pf_reset(hw);
747 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
751 /* Initialize the shared code (base driver) */
752 ret = i40e_init_shared_code(hw);
754 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
759 * To work around the NVM issue,initialize registers
760 * for flexible payload by software.
761 * It should be removed once issues are fixed in NVM.
763 i40e_flex_payload_reg_init(hw);
765 /* Initialize the parameters for adminq */
766 i40e_init_adminq_parameter(hw);
767 ret = i40e_init_adminq(hw);
768 if (ret != I40E_SUCCESS) {
769 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
772 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
773 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
774 hw->aq.api_maj_ver, hw->aq.api_min_ver,
775 ((hw->nvm.version >> 12) & 0xf),
776 ((hw->nvm.version >> 4) & 0xff),
777 (hw->nvm.version & 0xf), hw->nvm.eetrack);
780 i40e_clear_pxe_mode(hw);
783 * On X710, performance number is far from the expectation on recent
784 * firmware versions. The fix for this issue may not be integrated in
785 * the following firmware version. So the workaround in software driver
786 * is needed. It needs to modify the initial values of 3 internal only
787 * registers. Note that the workaround can be removed when it is fixed
788 * in firmware in the future.
790 i40e_configure_registers(hw);
792 /* Get hw capabilities */
793 ret = i40e_get_cap(hw);
794 if (ret != I40E_SUCCESS) {
795 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
796 goto err_get_capabilities;
799 /* Initialize parameters for PF */
800 ret = i40e_pf_parameter_init(dev);
802 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
803 goto err_parameter_init;
806 /* Initialize the queue management */
807 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
809 PMD_INIT_LOG(ERR, "Failed to init queue pool");
810 goto err_qp_pool_init;
812 ret = i40e_res_pool_init(&pf->msix_pool, 1,
813 hw->func_caps.num_msix_vectors - 1);
815 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
816 goto err_msix_pool_init;
819 /* Initialize lan hmc */
820 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
821 hw->func_caps.num_rx_qp, 0, 0);
822 if (ret != I40E_SUCCESS) {
823 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
824 goto err_init_lan_hmc;
827 /* Configure lan hmc */
828 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
829 if (ret != I40E_SUCCESS) {
830 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
831 goto err_configure_lan_hmc;
834 /* Get and check the mac address */
835 i40e_get_mac_addr(hw, hw->mac.addr);
836 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
837 PMD_INIT_LOG(ERR, "mac address is not valid");
839 goto err_get_mac_addr;
841 /* Copy the permanent MAC address */
842 ether_addr_copy((struct ether_addr *) hw->mac.addr,
843 (struct ether_addr *) hw->mac.perm_addr);
845 /* Disable flow control */
846 hw->fc.requested_mode = I40E_FC_NONE;
847 i40e_set_fc(hw, &aq_fail, TRUE);
849 /* PF setup, which includes VSI setup */
850 ret = i40e_pf_setup(pf);
852 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
853 goto err_setup_pf_switch;
858 /* Disable double vlan by default */
859 i40e_vsi_config_double_vlan(vsi, FALSE);
861 if (!vsi->max_macaddrs)
862 len = ETHER_ADDR_LEN;
864 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
866 /* Should be after VSI initialized */
867 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
868 if (!dev->data->mac_addrs) {
869 PMD_INIT_LOG(ERR, "Failed to allocated memory "
870 "for storing mac address");
873 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
874 &dev->data->mac_addrs[0]);
876 /* initialize pf host driver to setup SRIOV resource if applicable */
877 i40e_pf_host_init(dev);
879 /* register callback func to eal lib */
880 rte_intr_callback_register(&(pci_dev->intr_handle),
881 i40e_dev_interrupt_handler, (void *)dev);
883 /* configure and enable device interrupt */
884 i40e_pf_config_irq0(hw, TRUE);
885 i40e_pf_enable_irq0(hw);
887 /* enable uio intr after callback register */
888 rte_intr_enable(&(pci_dev->intr_handle));
890 * Add an ethertype filter to drop all flow control frames transmitted
891 * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
894 i40e_add_tx_flow_control_drop_filter(pf);
896 /* initialize mirror rule list */
897 TAILQ_INIT(&pf->mirror_list);
899 /* Init dcb to sw mode by default */
900 ret = i40e_dcb_init_configure(dev, TRUE);
901 if (ret != I40E_SUCCESS) {
902 PMD_INIT_LOG(INFO, "Failed to init dcb.");
903 pf->flags &= ~I40E_FLAG_DCB;
909 i40e_vsi_release(pf->main_vsi);
912 err_configure_lan_hmc:
913 (void)i40e_shutdown_lan_hmc(hw);
915 i40e_res_pool_destroy(&pf->msix_pool);
917 i40e_res_pool_destroy(&pf->qp_pool);
920 err_get_capabilities:
921 (void)i40e_shutdown_adminq(hw);
927 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
929 struct rte_pci_device *pci_dev;
931 struct i40e_filter_control_settings settings;
935 PMD_INIT_FUNC_TRACE();
937 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
940 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
941 pci_dev = dev->pci_dev;
943 if (hw->adapter_stopped == 0)
947 dev->rx_pkt_burst = NULL;
948 dev->tx_pkt_burst = NULL;
951 ret = i40e_aq_stop_lldp(hw, true, NULL);
952 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
953 PMD_INIT_LOG(INFO, "Failed to stop lldp");
956 i40e_clear_pxe_mode(hw);
958 /* Unconfigure filter control */
959 memset(&settings, 0, sizeof(settings));
960 ret = i40e_set_filter_control(hw, &settings);
962 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
965 /* Disable flow control */
966 hw->fc.requested_mode = I40E_FC_NONE;
967 i40e_set_fc(hw, &aq_fail, TRUE);
969 /* uninitialize pf host driver */
970 i40e_pf_host_uninit(dev);
972 rte_free(dev->data->mac_addrs);
973 dev->data->mac_addrs = NULL;
975 /* disable uio intr before callback unregister */
976 rte_intr_disable(&(pci_dev->intr_handle));
978 /* register callback func to eal lib */
979 rte_intr_callback_unregister(&(pci_dev->intr_handle),
980 i40e_dev_interrupt_handler, (void *)dev);
986 i40e_dev_configure(struct rte_eth_dev *dev)
988 struct i40e_adapter *ad =
989 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
990 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
991 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
994 /* Initialize to TRUE. If any of Rx queues doesn't meet the
995 * bulk allocation or vector Rx preconditions we will reset it.
997 ad->rx_bulk_alloc_allowed = true;
998 ad->rx_vec_allowed = true;
999 ad->tx_simple_allowed = true;
1000 ad->tx_vec_allowed = true;
1002 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1003 ret = i40e_fdir_setup(pf);
1004 if (ret != I40E_SUCCESS) {
1005 PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1008 ret = i40e_fdir_configure(dev);
1010 PMD_DRV_LOG(ERR, "failed to configure fdir.");
1014 i40e_fdir_teardown(pf);
1016 ret = i40e_dev_init_vlan(dev);
1021 * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1022 * RSS setting have different requirements.
1023 * General PMD driver call sequence are NIC init, configure,
1024 * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1025 * will try to lookup the VSI that specific queue belongs to if VMDQ
1026 * applicable. So, VMDQ setting has to be done before
1027 * rx/tx_queue_setup(). This function is good to place vmdq_setup.
1028 * For RSS setting, it will try to calculate actual configured RX queue
1029 * number, which will be available after rx_queue_setup(). dev_start()
1030 * function is good to place RSS setup.
1032 if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1033 ret = i40e_vmdq_setup(dev);
1038 if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1039 ret = i40e_dcb_setup(dev);
1041 PMD_DRV_LOG(ERR, "failed to configure DCB.");
1049 /* need to release vmdq resource if exists */
1050 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1051 i40e_vsi_release(pf->vmdq[i].vsi);
1052 pf->vmdq[i].vsi = NULL;
1057 /* need to release fdir resource if exists */
1058 i40e_fdir_teardown(pf);
1063 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1065 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1066 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1067 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1068 uint16_t msix_vect = vsi->msix_intr;
1071 for (i = 0; i < vsi->nb_qps; i++) {
1072 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1073 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1077 if (vsi->type != I40E_VSI_SRIOV) {
1078 if (!rte_intr_allow_others(intr_handle)) {
1079 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1080 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1082 I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1085 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1086 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1088 I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1093 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1094 vsi->user_param + (msix_vect - 1);
1096 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1097 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1099 I40E_WRITE_FLUSH(hw);
1102 static inline uint16_t
1103 i40e_calc_itr_interval(int16_t interval)
1105 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
1106 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
1108 /* Convert to hardware count, as writing each 1 represents 2 us */
1109 return (interval/2);
1113 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1114 int base_queue, int nb_queue)
1118 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1120 /* Bind all RX queues to allocated MSIX interrupt */
1121 for (i = 0; i < nb_queue; i++) {
1122 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1123 I40E_QINT_RQCTL_ITR_INDX_MASK |
1124 ((base_queue + i + 1) <<
1125 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1126 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1127 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1129 if (i == nb_queue - 1)
1130 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1131 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1134 /* Write first RX queue to Link list register as the head element */
1135 if (vsi->type != I40E_VSI_SRIOV) {
1137 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1139 if (msix_vect == I40E_MISC_VEC_ID) {
1140 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1142 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1144 I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1146 I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1149 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1151 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1153 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1155 I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1162 /* num_msix_vectors_vf needs to minus irq0 */
1163 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1164 vsi->user_param + (msix_vect - 1);
1166 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (base_queue <<
1167 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1168 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1171 I40E_WRITE_FLUSH(hw);
1175 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
1177 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1178 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1179 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1180 uint16_t msix_vect = vsi->msix_intr;
1181 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1182 uint16_t queue_idx = 0;
1187 for (i = 0; i < vsi->nb_qps; i++) {
1188 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1189 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1192 /* INTENA flag is not auto-cleared for interrupt */
1193 val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1194 val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1195 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1196 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1197 I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1199 /* VF bind interrupt */
1200 if (vsi->type == I40E_VSI_SRIOV) {
1201 __vsi_queues_bind_intr(vsi, msix_vect,
1202 vsi->base_queue, vsi->nb_qps);
1206 /* PF & VMDq bind interrupt */
1207 if (rte_intr_dp_is_en(intr_handle)) {
1208 if (vsi->type == I40E_VSI_MAIN) {
1211 } else if (vsi->type == I40E_VSI_VMDQ2) {
1212 struct i40e_vsi *main_vsi =
1213 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1214 queue_idx = vsi->base_queue - main_vsi->nb_qps;
1219 for (i = 0; i < vsi->nb_used_qps; i++) {
1221 if (!rte_intr_allow_others(intr_handle))
1222 /* allow to share MISC_VEC_ID */
1223 msix_vect = I40E_MISC_VEC_ID;
1225 /* no enough msix_vect, map all to one */
1226 __vsi_queues_bind_intr(vsi, msix_vect,
1227 vsi->base_queue + i,
1228 vsi->nb_used_qps - i);
1229 for (; !!record && i < vsi->nb_used_qps; i++)
1230 intr_handle->intr_vec[queue_idx + i] =
1234 /* 1:1 queue/msix_vect mapping */
1235 __vsi_queues_bind_intr(vsi, msix_vect,
1236 vsi->base_queue + i, 1);
1238 intr_handle->intr_vec[queue_idx + i] = msix_vect;
1246 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1248 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1249 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1250 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1251 uint16_t interval = i40e_calc_itr_interval(\
1252 RTE_LIBRTE_I40E_ITR_INTERVAL);
1253 uint16_t msix_intr, i;
1255 if (rte_intr_allow_others(intr_handle))
1256 for (i = 0; i < vsi->nb_msix; i++) {
1257 msix_intr = vsi->msix_intr + i;
1258 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1259 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1260 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1261 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1263 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1266 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1267 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1268 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1269 (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1271 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1273 I40E_WRITE_FLUSH(hw);
1277 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1279 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1280 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1281 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1282 uint16_t msix_intr, i;
1284 if (rte_intr_allow_others(intr_handle))
1285 for (i = 0; i < vsi->nb_msix; i++) {
1286 msix_intr = vsi->msix_intr + i;
1287 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1291 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1293 I40E_WRITE_FLUSH(hw);
1296 static inline uint8_t
1297 i40e_parse_link_speed(uint16_t eth_link_speed)
1299 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1301 switch (eth_link_speed) {
1302 case ETH_LINK_SPEED_40G:
1303 link_speed = I40E_LINK_SPEED_40GB;
1305 case ETH_LINK_SPEED_20G:
1306 link_speed = I40E_LINK_SPEED_20GB;
1308 case ETH_LINK_SPEED_10G:
1309 link_speed = I40E_LINK_SPEED_10GB;
1311 case ETH_LINK_SPEED_1000:
1312 link_speed = I40E_LINK_SPEED_1GB;
1314 case ETH_LINK_SPEED_100:
1315 link_speed = I40E_LINK_SPEED_100MB;
1323 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
1325 enum i40e_status_code status;
1326 struct i40e_aq_get_phy_abilities_resp phy_ab;
1327 struct i40e_aq_set_phy_config phy_conf;
1328 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1329 I40E_AQ_PHY_FLAG_PAUSE_RX |
1330 I40E_AQ_PHY_FLAG_LOW_POWER;
1331 const uint8_t advt = I40E_LINK_SPEED_40GB |
1332 I40E_LINK_SPEED_10GB |
1333 I40E_LINK_SPEED_1GB |
1334 I40E_LINK_SPEED_100MB;
1337 /* Skip it on 40G interfaces, as a workaround for the link issue */
1338 if (i40e_is_40G_device(hw->device_id))
1339 return I40E_SUCCESS;
1341 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1346 memset(&phy_conf, 0, sizeof(phy_conf));
1348 /* bits 0-2 use the values from get_phy_abilities_resp */
1350 abilities |= phy_ab.abilities & mask;
1352 /* update ablities and speed */
1353 if (abilities & I40E_AQ_PHY_AN_ENABLED)
1354 phy_conf.link_speed = advt;
1356 phy_conf.link_speed = force_speed;
1358 phy_conf.abilities = abilities;
1360 /* use get_phy_abilities_resp value for the rest */
1361 phy_conf.phy_type = phy_ab.phy_type;
1362 phy_conf.eee_capability = phy_ab.eee_capability;
1363 phy_conf.eeer = phy_ab.eeer_val;
1364 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1366 PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1367 phy_ab.abilities, phy_ab.link_speed);
1368 PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
1369 phy_conf.abilities, phy_conf.link_speed);
1371 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1375 return I40E_SUCCESS;
1379 i40e_apply_link_speed(struct rte_eth_dev *dev)
1382 uint8_t abilities = 0;
1383 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1384 struct rte_eth_conf *conf = &dev->data->dev_conf;
1386 speed = i40e_parse_link_speed(conf->link_speed);
1387 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1388 if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
1389 abilities |= I40E_AQ_PHY_AN_ENABLED;
1391 abilities |= I40E_AQ_PHY_LINK_ENABLED;
1393 return i40e_phy_conf_link(hw, abilities, speed);
1397 i40e_dev_start(struct rte_eth_dev *dev)
1399 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1400 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1401 struct i40e_vsi *main_vsi = pf->main_vsi;
1403 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1404 uint32_t intr_vector = 0;
1406 hw->adapter_stopped = 0;
1408 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
1409 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
1410 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
1411 dev->data->dev_conf.link_duplex,
1412 dev->data->port_id);
1416 rte_intr_disable(intr_handle);
1418 if ((rte_intr_cap_multiple(intr_handle) ||
1419 !RTE_ETH_DEV_SRIOV(dev).active) &&
1420 dev->data->dev_conf.intr_conf.rxq != 0) {
1421 intr_vector = dev->data->nb_rx_queues;
1422 if (rte_intr_efd_enable(intr_handle, intr_vector))
1426 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1427 intr_handle->intr_vec =
1428 rte_zmalloc("intr_vec",
1429 dev->data->nb_rx_queues * sizeof(int),
1431 if (!intr_handle->intr_vec) {
1432 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1433 " intr_vec\n", dev->data->nb_rx_queues);
1438 /* Initialize VSI */
1439 ret = i40e_dev_rxtx_init(pf);
1440 if (ret != I40E_SUCCESS) {
1441 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1445 /* Map queues with MSIX interrupt */
1446 main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1447 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1448 i40e_vsi_queues_bind_intr(main_vsi);
1449 i40e_vsi_enable_queues_intr(main_vsi);
1451 /* Map VMDQ VSI queues with MSIX interrupt */
1452 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1453 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1454 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
1455 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1458 /* enable FDIR MSIX interrupt */
1459 if (pf->fdir.fdir_vsi) {
1460 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1461 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1464 /* Enable all queues which have been configured */
1465 ret = i40e_dev_switch_queues(pf, TRUE);
1466 if (ret != I40E_SUCCESS) {
1467 PMD_DRV_LOG(ERR, "Failed to enable VSI");
1471 /* Enable receiving broadcast packets */
1472 ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
1473 if (ret != I40E_SUCCESS)
1474 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1476 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1477 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
1479 if (ret != I40E_SUCCESS)
1480 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1483 /* Apply link configure */
1484 ret = i40e_apply_link_speed(dev);
1485 if (I40E_SUCCESS != ret) {
1486 PMD_DRV_LOG(ERR, "Fail to apply link setting");
1490 if (!rte_intr_allow_others(intr_handle)) {
1491 rte_intr_callback_unregister(intr_handle,
1492 i40e_dev_interrupt_handler,
1494 /* configure and enable device interrupt */
1495 i40e_pf_config_irq0(hw, FALSE);
1496 i40e_pf_enable_irq0(hw);
1498 if (dev->data->dev_conf.intr_conf.lsc != 0)
1499 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1500 " no intr multiplex\n");
1503 /* enable uio intr after callback register */
1504 rte_intr_enable(intr_handle);
1506 return I40E_SUCCESS;
1509 i40e_dev_switch_queues(pf, FALSE);
1510 i40e_dev_clear_queues(dev);
1516 i40e_dev_stop(struct rte_eth_dev *dev)
1518 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1519 struct i40e_vsi *main_vsi = pf->main_vsi;
1520 struct i40e_mirror_rule *p_mirror;
1521 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1524 /* Disable all queues */
1525 i40e_dev_switch_queues(pf, FALSE);
1527 /* un-map queues with interrupt registers */
1528 i40e_vsi_disable_queues_intr(main_vsi);
1529 i40e_vsi_queues_unbind_intr(main_vsi);
1531 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1532 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
1533 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
1536 if (pf->fdir.fdir_vsi) {
1537 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1538 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1540 /* Clear all queues and release memory */
1541 i40e_dev_clear_queues(dev);
1544 i40e_dev_set_link_down(dev);
1546 /* Remove all mirror rules */
1547 while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
1548 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
1551 pf->nb_mirror_rule = 0;
1553 if (!rte_intr_allow_others(intr_handle))
1554 /* resume to the default handler */
1555 rte_intr_callback_register(intr_handle,
1556 i40e_dev_interrupt_handler,
1559 /* Clean datapath event and queue/vec mapping */
1560 rte_intr_efd_disable(intr_handle);
1561 if (intr_handle->intr_vec) {
1562 rte_free(intr_handle->intr_vec);
1563 intr_handle->intr_vec = NULL;
1568 i40e_dev_close(struct rte_eth_dev *dev)
1570 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1571 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1575 PMD_INIT_FUNC_TRACE();
1578 hw->adapter_stopped = 1;
1579 i40e_dev_free_queues(dev);
1581 /* Disable interrupt */
1582 i40e_pf_disable_irq0(hw);
1583 rte_intr_disable(&(dev->pci_dev->intr_handle));
1585 /* shutdown and destroy the HMC */
1586 i40e_shutdown_lan_hmc(hw);
1588 /* release all the existing VSIs and VEBs */
1589 i40e_fdir_teardown(pf);
1590 i40e_vsi_release(pf->main_vsi);
1592 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1593 i40e_vsi_release(pf->vmdq[i].vsi);
1594 pf->vmdq[i].vsi = NULL;
1600 /* shutdown the adminq */
1601 i40e_aq_queue_shutdown(hw, true);
1602 i40e_shutdown_adminq(hw);
1604 i40e_res_pool_destroy(&pf->qp_pool);
1605 i40e_res_pool_destroy(&pf->msix_pool);
1607 /* force a PF reset to clean anything leftover */
1608 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
1609 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
1610 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1611 I40E_WRITE_FLUSH(hw);
1615 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
1617 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1618 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1619 struct i40e_vsi *vsi = pf->main_vsi;
1622 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1624 if (status != I40E_SUCCESS)
1625 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1627 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1629 if (status != I40E_SUCCESS)
1630 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1635 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1637 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1638 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1639 struct i40e_vsi *vsi = pf->main_vsi;
1642 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1644 if (status != I40E_SUCCESS)
1645 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1647 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1649 if (status != I40E_SUCCESS)
1650 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1654 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1656 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1657 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1658 struct i40e_vsi *vsi = pf->main_vsi;
1661 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1662 if (ret != I40E_SUCCESS)
1663 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1667 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1669 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1670 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1671 struct i40e_vsi *vsi = pf->main_vsi;
1674 if (dev->data->promiscuous == 1)
1675 return; /* must remain in all_multicast mode */
1677 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1678 vsi->seid, FALSE, NULL);
1679 if (ret != I40E_SUCCESS)
1680 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1684 * Set device link up.
1687 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1689 /* re-apply link speed setting */
1690 return i40e_apply_link_speed(dev);
1694 * Set device link down.
1697 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
1699 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1700 uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1701 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1703 return i40e_phy_conf_link(hw, abilities, speed);
1707 i40e_dev_link_update(struct rte_eth_dev *dev,
1708 int wait_to_complete)
1710 #define CHECK_INTERVAL 100 /* 100ms */
1711 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
1712 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1713 struct i40e_link_status link_status;
1714 struct rte_eth_link link, old;
1716 unsigned rep_cnt = MAX_REPEAT_TIME;
1718 memset(&link, 0, sizeof(link));
1719 memset(&old, 0, sizeof(old));
1720 memset(&link_status, 0, sizeof(link_status));
1721 rte_i40e_dev_atomic_read_link_status(dev, &old);
1724 /* Get link status information from hardware */
1725 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
1726 if (status != I40E_SUCCESS) {
1727 link.link_speed = ETH_LINK_SPEED_100;
1728 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1729 PMD_DRV_LOG(ERR, "Failed to get link info");
1733 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
1734 if (!wait_to_complete)
1737 rte_delay_ms(CHECK_INTERVAL);
1738 } while (!link.link_status && rep_cnt--);
1740 if (!link.link_status)
1743 /* i40e uses full duplex only */
1744 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1746 /* Parse the link status */
1747 switch (link_status.link_speed) {
1748 case I40E_LINK_SPEED_100MB:
1749 link.link_speed = ETH_LINK_SPEED_100;
1751 case I40E_LINK_SPEED_1GB:
1752 link.link_speed = ETH_LINK_SPEED_1000;
1754 case I40E_LINK_SPEED_10GB:
1755 link.link_speed = ETH_LINK_SPEED_10G;
1757 case I40E_LINK_SPEED_20GB:
1758 link.link_speed = ETH_LINK_SPEED_20G;
1760 case I40E_LINK_SPEED_40GB:
1761 link.link_speed = ETH_LINK_SPEED_40G;
1764 link.link_speed = ETH_LINK_SPEED_100;
1769 rte_i40e_dev_atomic_write_link_status(dev, &link);
1770 if (link.link_status == old.link_status)
1776 /* Get all the statistics of a VSI */
1778 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1780 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1781 struct i40e_eth_stats *nes = &vsi->eth_stats;
1782 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1783 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1785 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1786 vsi->offset_loaded, &oes->rx_bytes,
1788 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1789 vsi->offset_loaded, &oes->rx_unicast,
1791 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1792 vsi->offset_loaded, &oes->rx_multicast,
1793 &nes->rx_multicast);
1794 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1795 vsi->offset_loaded, &oes->rx_broadcast,
1796 &nes->rx_broadcast);
1797 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1798 &oes->rx_discards, &nes->rx_discards);
1799 /* GLV_REPC not supported */
1800 /* GLV_RMPC not supported */
1801 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1802 &oes->rx_unknown_protocol,
1803 &nes->rx_unknown_protocol);
1804 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1805 vsi->offset_loaded, &oes->tx_bytes,
1807 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1808 vsi->offset_loaded, &oes->tx_unicast,
1810 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1811 vsi->offset_loaded, &oes->tx_multicast,
1812 &nes->tx_multicast);
1813 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1814 vsi->offset_loaded, &oes->tx_broadcast,
1815 &nes->tx_broadcast);
1816 /* GLV_TDPC not supported */
1817 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1818 &oes->tx_errors, &nes->tx_errors);
1819 vsi->offset_loaded = true;
1821 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1823 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
1824 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
1825 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
1826 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
1827 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
1828 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1829 nes->rx_unknown_protocol);
1830 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
1831 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
1832 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
1833 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
1834 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
1835 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
1836 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1841 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
1844 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1845 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1846 /* Get statistics of struct i40e_eth_stats */
1847 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1848 I40E_GLPRT_GORCL(hw->port),
1849 pf->offset_loaded, &os->eth.rx_bytes,
1851 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1852 I40E_GLPRT_UPRCL(hw->port),
1853 pf->offset_loaded, &os->eth.rx_unicast,
1854 &ns->eth.rx_unicast);
1855 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1856 I40E_GLPRT_MPRCL(hw->port),
1857 pf->offset_loaded, &os->eth.rx_multicast,
1858 &ns->eth.rx_multicast);
1859 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1860 I40E_GLPRT_BPRCL(hw->port),
1861 pf->offset_loaded, &os->eth.rx_broadcast,
1862 &ns->eth.rx_broadcast);
1863 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1864 pf->offset_loaded, &os->eth.rx_discards,
1865 &ns->eth.rx_discards);
1866 /* GLPRT_REPC not supported */
1867 /* GLPRT_RMPC not supported */
1868 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1870 &os->eth.rx_unknown_protocol,
1871 &ns->eth.rx_unknown_protocol);
1872 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1873 I40E_GLPRT_GOTCL(hw->port),
1874 pf->offset_loaded, &os->eth.tx_bytes,
1876 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1877 I40E_GLPRT_UPTCL(hw->port),
1878 pf->offset_loaded, &os->eth.tx_unicast,
1879 &ns->eth.tx_unicast);
1880 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1881 I40E_GLPRT_MPTCL(hw->port),
1882 pf->offset_loaded, &os->eth.tx_multicast,
1883 &ns->eth.tx_multicast);
1884 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1885 I40E_GLPRT_BPTCL(hw->port),
1886 pf->offset_loaded, &os->eth.tx_broadcast,
1887 &ns->eth.tx_broadcast);
1888 /* GLPRT_TEPC not supported */
1890 /* additional port specific stats */
1891 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1892 pf->offset_loaded, &os->tx_dropped_link_down,
1893 &ns->tx_dropped_link_down);
1894 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1895 pf->offset_loaded, &os->crc_errors,
1897 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1898 pf->offset_loaded, &os->illegal_bytes,
1899 &ns->illegal_bytes);
1900 /* GLPRT_ERRBC not supported */
1901 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1902 pf->offset_loaded, &os->mac_local_faults,
1903 &ns->mac_local_faults);
1904 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1905 pf->offset_loaded, &os->mac_remote_faults,
1906 &ns->mac_remote_faults);
1907 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1908 pf->offset_loaded, &os->rx_length_errors,
1909 &ns->rx_length_errors);
1910 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1911 pf->offset_loaded, &os->link_xon_rx,
1913 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1914 pf->offset_loaded, &os->link_xoff_rx,
1916 for (i = 0; i < 8; i++) {
1917 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1919 &os->priority_xon_rx[i],
1920 &ns->priority_xon_rx[i]);
1921 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1923 &os->priority_xoff_rx[i],
1924 &ns->priority_xoff_rx[i]);
1926 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1927 pf->offset_loaded, &os->link_xon_tx,
1929 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1930 pf->offset_loaded, &os->link_xoff_tx,
1932 for (i = 0; i < 8; i++) {
1933 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1935 &os->priority_xon_tx[i],
1936 &ns->priority_xon_tx[i]);
1937 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1939 &os->priority_xoff_tx[i],
1940 &ns->priority_xoff_tx[i]);
1941 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1943 &os->priority_xon_2_xoff[i],
1944 &ns->priority_xon_2_xoff[i]);
1946 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1947 I40E_GLPRT_PRC64L(hw->port),
1948 pf->offset_loaded, &os->rx_size_64,
1950 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1951 I40E_GLPRT_PRC127L(hw->port),
1952 pf->offset_loaded, &os->rx_size_127,
1954 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1955 I40E_GLPRT_PRC255L(hw->port),
1956 pf->offset_loaded, &os->rx_size_255,
1958 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1959 I40E_GLPRT_PRC511L(hw->port),
1960 pf->offset_loaded, &os->rx_size_511,
1962 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1963 I40E_GLPRT_PRC1023L(hw->port),
1964 pf->offset_loaded, &os->rx_size_1023,
1966 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1967 I40E_GLPRT_PRC1522L(hw->port),
1968 pf->offset_loaded, &os->rx_size_1522,
1970 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1971 I40E_GLPRT_PRC9522L(hw->port),
1972 pf->offset_loaded, &os->rx_size_big,
1974 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1975 pf->offset_loaded, &os->rx_undersize,
1977 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1978 pf->offset_loaded, &os->rx_fragments,
1980 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1981 pf->offset_loaded, &os->rx_oversize,
1983 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1984 pf->offset_loaded, &os->rx_jabber,
1986 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1987 I40E_GLPRT_PTC64L(hw->port),
1988 pf->offset_loaded, &os->tx_size_64,
1990 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1991 I40E_GLPRT_PTC127L(hw->port),
1992 pf->offset_loaded, &os->tx_size_127,
1994 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1995 I40E_GLPRT_PTC255L(hw->port),
1996 pf->offset_loaded, &os->tx_size_255,
1998 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1999 I40E_GLPRT_PTC511L(hw->port),
2000 pf->offset_loaded, &os->tx_size_511,
2002 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2003 I40E_GLPRT_PTC1023L(hw->port),
2004 pf->offset_loaded, &os->tx_size_1023,
2006 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2007 I40E_GLPRT_PTC1522L(hw->port),
2008 pf->offset_loaded, &os->tx_size_1522,
2010 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2011 I40E_GLPRT_PTC9522L(hw->port),
2012 pf->offset_loaded, &os->tx_size_big,
2014 i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2016 &os->fd_sb_match, &ns->fd_sb_match);
2017 /* GLPRT_MSPDC not supported */
2018 /* GLPRT_XEC not supported */
2020 pf->offset_loaded = true;
2023 i40e_update_vsi_stats(pf->main_vsi);
2026 /* Get all statistics of a port */
2028 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2030 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2031 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2032 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2035 /* call read registers - updates values, now write them to struct */
2036 i40e_read_stats_registers(pf, hw);
2038 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
2039 pf->main_vsi->eth_stats.rx_multicast +
2040 pf->main_vsi->eth_stats.rx_broadcast -
2041 pf->main_vsi->eth_stats.rx_discards;
2042 stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
2043 pf->main_vsi->eth_stats.tx_multicast +
2044 pf->main_vsi->eth_stats.tx_broadcast;
2045 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
2046 stats->obytes = pf->main_vsi->eth_stats.tx_bytes;
2047 stats->oerrors = ns->eth.tx_errors +
2048 pf->main_vsi->eth_stats.tx_errors;
2049 stats->imcasts = pf->main_vsi->eth_stats.rx_multicast;
2050 stats->fdirmatch = ns->fd_sb_match;
2053 stats->ibadcrc = ns->crc_errors;
2054 stats->ibadlen = ns->rx_length_errors + ns->rx_undersize +
2055 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2056 stats->imissed = ns->eth.rx_discards +
2057 pf->main_vsi->eth_stats.rx_discards;
2058 stats->ierrors = stats->ibadcrc + stats->ibadlen + stats->imissed;
2060 PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2061 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
2062 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
2063 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast);
2064 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast);
2065 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards);
2066 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2067 ns->eth.rx_unknown_protocol);
2068 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
2069 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
2070 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast);
2071 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast);
2072 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards);
2073 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
2075 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
2076 ns->tx_dropped_link_down);
2077 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
2078 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
2080 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
2081 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
2082 ns->mac_local_faults);
2083 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
2084 ns->mac_remote_faults);
2085 PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"",
2086 ns->rx_length_errors);
2087 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
2088 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
2089 for (i = 0; i < 8; i++) {
2090 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"",
2091 i, ns->priority_xon_rx[i]);
2092 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"",
2093 i, ns->priority_xoff_rx[i]);
2095 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
2096 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
2097 for (i = 0; i < 8; i++) {
2098 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"",
2099 i, ns->priority_xon_tx[i]);
2100 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"",
2101 i, ns->priority_xoff_tx[i]);
2102 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"",
2103 i, ns->priority_xon_2_xoff[i]);
2105 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
2106 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
2107 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
2108 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
2109 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
2110 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
2111 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
2112 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
2113 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
2114 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
2115 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
2116 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
2117 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
2118 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
2119 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
2120 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
2121 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
2122 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
2123 PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2124 ns->mac_short_packet_dropped);
2125 PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"",
2126 ns->checksum_error);
2127 PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
2128 PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2132 i40e_dev_xstats_reset(struct rte_eth_dev *dev)
2134 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2135 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2136 struct i40e_hw_port_stats *hw_stats = &pf->stats;
2138 /* The hw registers are cleared on read */
2139 pf->offset_loaded = false;
2140 i40e_read_stats_registers(pf, hw);
2142 /* reset software counters */
2143 memset(hw_stats, 0, sizeof(*hw_stats));
2147 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2150 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2151 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2152 unsigned i, count = 0;
2153 struct i40e_hw_port_stats *hw_stats = &pf->stats;
2155 if (n < I40E_NB_XSTATS)
2156 return I40E_NB_XSTATS;
2158 i40e_read_stats_registers(pf, hw);
2164 /* Get stats from i40e_eth_stats struct */
2165 for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2166 snprintf(xstats[count].name, sizeof(xstats[count].name),
2167 "%s", rte_i40e_stats_strings[i].name);
2168 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2169 rte_i40e_stats_strings[i].offset);
2173 /* Get individiual stats from i40e_hw_port struct */
2174 for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2175 snprintf(xstats[count].name, sizeof(xstats[count].name),
2176 "%s", rte_i40e_hw_port_strings[i].name);
2177 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2178 rte_i40e_hw_port_strings[i].offset);
2182 /* Get per-queue stats from i40e_hw_port struct */
2183 for (i = 0; i < 8; i++) {
2184 snprintf(xstats[count].name, sizeof(xstats[count].name),
2185 "rx_q%u_xon_priority_packets", i);
2186 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2187 offsetof(struct i40e_hw_port_stats,
2188 priority_xon_rx[i]));
2191 snprintf(xstats[count].name, sizeof(xstats[count].name),
2192 "rx_q%u_xoff_priority_packets", i);
2193 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2194 offsetof(struct i40e_hw_port_stats,
2195 priority_xoff_rx[i]));
2198 snprintf(xstats[count].name, sizeof(xstats[count].name),
2199 "tx_q%u_xon_priority_packets", i);
2200 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2201 offsetof(struct i40e_hw_port_stats,
2202 priority_xon_tx[i]));
2205 snprintf(xstats[count].name, sizeof(xstats[count].name),
2206 "tx_q%u_xoff_priority_packets", i);
2207 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2208 offsetof(struct i40e_hw_port_stats,
2209 priority_xoff_tx[i]));
2212 snprintf(xstats[count].name, sizeof(xstats[count].name),
2213 "xx_q%u_xon_to_xoff_priority_packets", i);
2214 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2215 offsetof(struct i40e_hw_port_stats,
2216 priority_xon_2_xoff[i]));
2220 return I40E_NB_XSTATS;
2223 /* Reset the statistics */
2225 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2227 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2229 /* It results in reloading the start point of each counter */
2230 pf->offset_loaded = false;
2234 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
2235 __rte_unused uint16_t queue_id,
2236 __rte_unused uint8_t stat_idx,
2237 __rte_unused uint8_t is_rx)
2239 PMD_INIT_FUNC_TRACE();
2245 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2247 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2248 struct i40e_vsi *vsi = pf->main_vsi;
2250 dev_info->max_rx_queues = vsi->nb_qps;
2251 dev_info->max_tx_queues = vsi->nb_qps;
2252 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2253 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2254 dev_info->max_mac_addrs = vsi->max_macaddrs;
2255 dev_info->max_vfs = dev->pci_dev->max_vfs;
2256 dev_info->rx_offload_capa =
2257 DEV_RX_OFFLOAD_VLAN_STRIP |
2258 DEV_RX_OFFLOAD_QINQ_STRIP |
2259 DEV_RX_OFFLOAD_IPV4_CKSUM |
2260 DEV_RX_OFFLOAD_UDP_CKSUM |
2261 DEV_RX_OFFLOAD_TCP_CKSUM;
2262 dev_info->tx_offload_capa =
2263 DEV_TX_OFFLOAD_VLAN_INSERT |
2264 DEV_TX_OFFLOAD_QINQ_INSERT |
2265 DEV_TX_OFFLOAD_IPV4_CKSUM |
2266 DEV_TX_OFFLOAD_UDP_CKSUM |
2267 DEV_TX_OFFLOAD_TCP_CKSUM |
2268 DEV_TX_OFFLOAD_SCTP_CKSUM |
2269 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2270 DEV_TX_OFFLOAD_TCP_TSO;
2271 dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
2273 dev_info->reta_size = pf->hash_lut_size;
2274 dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2276 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2278 .pthresh = I40E_DEFAULT_RX_PTHRESH,
2279 .hthresh = I40E_DEFAULT_RX_HTHRESH,
2280 .wthresh = I40E_DEFAULT_RX_WTHRESH,
2282 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2286 dev_info->default_txconf = (struct rte_eth_txconf) {
2288 .pthresh = I40E_DEFAULT_TX_PTHRESH,
2289 .hthresh = I40E_DEFAULT_TX_HTHRESH,
2290 .wthresh = I40E_DEFAULT_TX_WTHRESH,
2292 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2293 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2294 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2295 ETH_TXQ_FLAGS_NOOFFLOADS,
2298 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2299 .nb_max = I40E_MAX_RING_DESC,
2300 .nb_min = I40E_MIN_RING_DESC,
2301 .nb_align = I40E_ALIGN_RING_DESC,
2304 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2305 .nb_max = I40E_MAX_RING_DESC,
2306 .nb_min = I40E_MIN_RING_DESC,
2307 .nb_align = I40E_ALIGN_RING_DESC,
2310 if (pf->flags & I40E_FLAG_VMDQ) {
2311 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
2312 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
2313 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
2314 pf->max_nb_vmdq_vsi;
2315 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
2316 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
2317 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
2322 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2324 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2325 struct i40e_vsi *vsi = pf->main_vsi;
2326 PMD_INIT_FUNC_TRACE();
2329 return i40e_vsi_add_vlan(vsi, vlan_id);
2331 return i40e_vsi_delete_vlan(vsi, vlan_id);
2335 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
2336 __rte_unused uint16_t tpid)
2338 PMD_INIT_FUNC_TRACE();
2342 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2344 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2345 struct i40e_vsi *vsi = pf->main_vsi;
2347 if (mask & ETH_VLAN_STRIP_MASK) {
2348 /* Enable or disable VLAN stripping */
2349 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2350 i40e_vsi_config_vlan_stripping(vsi, TRUE);
2352 i40e_vsi_config_vlan_stripping(vsi, FALSE);
2355 if (mask & ETH_VLAN_EXTEND_MASK) {
2356 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2357 i40e_vsi_config_double_vlan(vsi, TRUE);
2359 i40e_vsi_config_double_vlan(vsi, FALSE);
2364 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
2365 __rte_unused uint16_t queue,
2366 __rte_unused int on)
2368 PMD_INIT_FUNC_TRACE();
2372 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2374 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2375 struct i40e_vsi *vsi = pf->main_vsi;
2376 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
2377 struct i40e_vsi_vlan_pvid_info info;
2379 memset(&info, 0, sizeof(info));
2382 info.config.pvid = pvid;
2384 info.config.reject.tagged =
2385 data->dev_conf.txmode.hw_vlan_reject_tagged;
2386 info.config.reject.untagged =
2387 data->dev_conf.txmode.hw_vlan_reject_untagged;
2390 return i40e_vsi_vlan_pvid_set(vsi, &info);
2394 i40e_dev_led_on(struct rte_eth_dev *dev)
2396 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2397 uint32_t mode = i40e_led_get(hw);
2400 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
2406 i40e_dev_led_off(struct rte_eth_dev *dev)
2408 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2409 uint32_t mode = i40e_led_get(hw);
2412 i40e_led_set(hw, 0, false);
2418 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2420 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2421 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2423 fc_conf->pause_time = pf->fc_conf.pause_time;
2424 fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
2425 fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
2427 /* Return current mode according to actual setting*/
2428 switch (hw->fc.current_mode) {
2430 fc_conf->mode = RTE_FC_FULL;
2432 case I40E_FC_TX_PAUSE:
2433 fc_conf->mode = RTE_FC_TX_PAUSE;
2435 case I40E_FC_RX_PAUSE:
2436 fc_conf->mode = RTE_FC_RX_PAUSE;
2440 fc_conf->mode = RTE_FC_NONE;
2447 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2449 uint32_t mflcn_reg, fctrl_reg, reg;
2450 uint32_t max_high_water;
2451 uint8_t i, aq_failure;
2455 enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
2456 [RTE_FC_NONE] = I40E_FC_NONE,
2457 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
2458 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
2459 [RTE_FC_FULL] = I40E_FC_FULL
2462 /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
2464 max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
2465 if ((fc_conf->high_water > max_high_water) ||
2466 (fc_conf->high_water < fc_conf->low_water)) {
2467 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
2468 "High_water must <= %d.", max_high_water);
2472 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2473 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2474 hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
2476 pf->fc_conf.pause_time = fc_conf->pause_time;
2477 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
2478 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
2480 PMD_INIT_FUNC_TRACE();
2482 /* All the link flow control related enable/disable register
2483 * configuration is handle by the F/W
2485 err = i40e_set_fc(hw, &aq_failure, true);
2489 if (i40e_is_40G_device(hw->device_id)) {
2490 /* Configure flow control refresh threshold,
2491 * the value for stat_tx_pause_refresh_timer[8]
2492 * is used for global pause operation.
2496 I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
2497 pf->fc_conf.pause_time);
2499 /* configure the timer value included in transmitted pause
2501 * the value for stat_tx_pause_quanta[8] is used for global
2504 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
2505 pf->fc_conf.pause_time);
2507 fctrl_reg = I40E_READ_REG(hw,
2508 I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
2510 if (fc_conf->mac_ctrl_frame_fwd != 0)
2511 fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
2513 fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
2515 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
2518 /* Configure pause time (2 TCs per register) */
2519 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
2520 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
2521 I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
2523 /* Configure flow control refresh threshold value */
2524 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
2525 pf->fc_conf.pause_time / 2);
2527 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2529 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
2530 *depending on configuration
2532 if (fc_conf->mac_ctrl_frame_fwd != 0) {
2533 mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
2534 mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
2536 mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
2537 mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
2540 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
2543 /* config the water marker both based on the packets and bytes */
2544 I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
2545 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2546 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2547 I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
2548 (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2549 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2550 I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
2551 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2553 I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
2554 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2557 I40E_WRITE_FLUSH(hw);
2563 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
2564 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
2566 PMD_INIT_FUNC_TRACE();
2571 /* Add a MAC address, and update filters */
2573 i40e_macaddr_add(struct rte_eth_dev *dev,
2574 struct ether_addr *mac_addr,
2575 __rte_unused uint32_t index,
2578 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2579 struct i40e_mac_filter_info mac_filter;
2580 struct i40e_vsi *vsi;
2583 /* If VMDQ not enabled or configured, return */
2584 if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
2585 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
2586 pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
2591 if (pool > pf->nb_cfg_vmdq_vsi) {
2592 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
2593 pool, pf->nb_cfg_vmdq_vsi);
2597 (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
2598 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2603 vsi = pf->vmdq[pool - 1].vsi;
2605 ret = i40e_vsi_add_mac(vsi, &mac_filter);
2606 if (ret != I40E_SUCCESS) {
2607 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
2612 /* Remove a MAC address, and update filters */
2614 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2616 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2617 struct i40e_vsi *vsi;
2618 struct rte_eth_dev_data *data = dev->data;
2619 struct ether_addr *macaddr;
2624 macaddr = &(data->mac_addrs[index]);
2626 pool_sel = dev->data->mac_pool_sel[index];
2628 for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
2629 if (pool_sel & (1ULL << i)) {
2633 /* No VMDQ pool enabled or configured */
2634 if (!(pf->flags | I40E_FLAG_VMDQ) ||
2635 (i > pf->nb_cfg_vmdq_vsi)) {
2636 PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
2640 vsi = pf->vmdq[i - 1].vsi;
2642 ret = i40e_vsi_delete_mac(vsi, macaddr);
2645 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
2652 /* Set perfect match or hash match of MAC and VLAN for a VF */
2654 i40e_vf_mac_filter_set(struct i40e_pf *pf,
2655 struct rte_eth_mac_filter *filter,
2659 struct i40e_mac_filter_info mac_filter;
2660 struct ether_addr old_mac;
2661 struct ether_addr *new_mac;
2662 struct i40e_pf_vf *vf = NULL;
2667 PMD_DRV_LOG(ERR, "Invalid PF argument.");
2670 hw = I40E_PF_TO_HW(pf);
2672 if (filter == NULL) {
2673 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
2677 new_mac = &filter->mac_addr;
2679 if (is_zero_ether_addr(new_mac)) {
2680 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
2684 vf_id = filter->dst_id;
2686 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
2687 PMD_DRV_LOG(ERR, "Invalid argument.");
2690 vf = &pf->vfs[vf_id];
2692 if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
2693 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
2698 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
2699 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
2701 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
2704 mac_filter.filter_type = filter->filter_type;
2705 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
2706 if (ret != I40E_SUCCESS) {
2707 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2710 ether_addr_copy(new_mac, &pf->dev_addr);
2712 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
2714 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
2715 if (ret != I40E_SUCCESS) {
2716 PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
2720 /* Clear device address as it has been removed */
2721 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
2722 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
2728 /* MAC filter handle */
2730 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
2733 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2734 struct rte_eth_mac_filter *filter;
2735 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2736 int ret = I40E_NOT_SUPPORTED;
2738 filter = (struct rte_eth_mac_filter *)(arg);
2740 switch (filter_op) {
2741 case RTE_ETH_FILTER_NOP:
2744 case RTE_ETH_FILTER_ADD:
2745 i40e_pf_disable_irq0(hw);
2747 ret = i40e_vf_mac_filter_set(pf, filter, 1);
2748 i40e_pf_enable_irq0(hw);
2750 case RTE_ETH_FILTER_DELETE:
2751 i40e_pf_disable_irq0(hw);
2753 ret = i40e_vf_mac_filter_set(pf, filter, 0);
2754 i40e_pf_enable_irq0(hw);
2757 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
2758 ret = I40E_ERR_PARAM;
2766 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2768 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2769 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2775 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2776 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
2779 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2783 uint32_t *lut_dw = (uint32_t *)lut;
2784 uint16_t i, lut_size_dw = lut_size / 4;
2786 for (i = 0; i < lut_size_dw; i++)
2787 lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
2794 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2796 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2797 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2803 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2804 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
2807 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2811 uint32_t *lut_dw = (uint32_t *)lut;
2812 uint16_t i, lut_size_dw = lut_size / 4;
2814 for (i = 0; i < lut_size_dw; i++)
2815 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
2816 I40E_WRITE_FLUSH(hw);
2823 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
2824 struct rte_eth_rss_reta_entry64 *reta_conf,
2827 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2828 uint16_t i, lut_size = pf->hash_lut_size;
2829 uint16_t idx, shift;
2833 if (reta_size != lut_size ||
2834 reta_size > ETH_RSS_RETA_SIZE_512) {
2835 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2836 "(%d) doesn't match the number hardware can supported "
2837 "(%d)\n", reta_size, lut_size);
2841 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2843 PMD_DRV_LOG(ERR, "No memory can be allocated");
2846 ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
2849 for (i = 0; i < reta_size; i++) {
2850 idx = i / RTE_RETA_GROUP_SIZE;
2851 shift = i % RTE_RETA_GROUP_SIZE;
2852 if (reta_conf[idx].mask & (1ULL << shift))
2853 lut[i] = reta_conf[idx].reta[shift];
2855 ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
2864 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
2865 struct rte_eth_rss_reta_entry64 *reta_conf,
2868 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2869 uint16_t i, lut_size = pf->hash_lut_size;
2870 uint16_t idx, shift;
2874 if (reta_size != lut_size ||
2875 reta_size > ETH_RSS_RETA_SIZE_512) {
2876 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2877 "(%d) doesn't match the number hardware can supported "
2878 "(%d)\n", reta_size, lut_size);
2882 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2884 PMD_DRV_LOG(ERR, "No memory can be allocated");
2888 ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
2891 for (i = 0; i < reta_size; i++) {
2892 idx = i / RTE_RETA_GROUP_SIZE;
2893 shift = i % RTE_RETA_GROUP_SIZE;
2894 if (reta_conf[idx].mask & (1ULL << shift))
2895 reta_conf[idx].reta[shift] = lut[i];
2905 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
2906 * @hw: pointer to the HW structure
2907 * @mem: pointer to mem struct to fill out
2908 * @size: size of memory requested
2909 * @alignment: what to align the allocation to
2911 enum i40e_status_code
2912 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2913 struct i40e_dma_mem *mem,
2917 static uint64_t id = 0;
2918 const struct rte_memzone *mz = NULL;
2919 char z_name[RTE_MEMZONE_NAMESIZE];
2922 return I40E_ERR_PARAM;
2925 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
2926 #ifdef RTE_LIBRTE_XEN_DOM0
2927 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
2928 alignment, RTE_PGSIZE_2M);
2930 mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY, 0,
2934 return I40E_ERR_NO_MEMORY;
2939 #ifdef RTE_LIBRTE_XEN_DOM0
2940 mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
2942 mem->pa = mz->phys_addr;
2945 return I40E_SUCCESS;
2949 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
2950 * @hw: pointer to the HW structure
2951 * @mem: ptr to mem struct to free
2953 enum i40e_status_code
2954 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2955 struct i40e_dma_mem *mem)
2957 if (!mem || !mem->va)
2958 return I40E_ERR_PARAM;
2963 return I40E_SUCCESS;
2967 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
2968 * @hw: pointer to the HW structure
2969 * @mem: pointer to mem struct to fill out
2970 * @size: size of memory requested
2972 enum i40e_status_code
2973 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2974 struct i40e_virt_mem *mem,
2978 return I40E_ERR_PARAM;
2981 mem->va = rte_zmalloc("i40e", size, 0);
2984 return I40E_SUCCESS;
2986 return I40E_ERR_NO_MEMORY;
2990 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
2991 * @hw: pointer to the HW structure
2992 * @mem: pointer to mem struct to free
2994 enum i40e_status_code
2995 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2996 struct i40e_virt_mem *mem)
2999 return I40E_ERR_PARAM;
3004 return I40E_SUCCESS;
3008 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3010 rte_spinlock_init(&sp->spinlock);
3014 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3016 rte_spinlock_lock(&sp->spinlock);
3020 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3022 rte_spinlock_unlock(&sp->spinlock);
3026 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
3032 * Get the hardware capabilities, which will be parsed
3033 * and saved into struct i40e_hw.
3036 i40e_get_cap(struct i40e_hw *hw)
3038 struct i40e_aqc_list_capabilities_element_resp *buf;
3039 uint16_t len, size = 0;
3042 /* Calculate a huge enough buff for saving response data temporarily */
3043 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
3044 I40E_MAX_CAP_ELE_NUM;
3045 buf = rte_zmalloc("i40e", len, 0);
3047 PMD_DRV_LOG(ERR, "Failed to allocate memory");
3048 return I40E_ERR_NO_MEMORY;
3051 /* Get, parse the capabilities and save it to hw */
3052 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
3053 i40e_aqc_opc_list_func_capabilities, NULL);
3054 if (ret != I40E_SUCCESS)
3055 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
3057 /* Free the temporary buffer after being used */
3064 i40e_pf_parameter_init(struct rte_eth_dev *dev)
3066 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3067 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3068 uint16_t qp_count = 0, vsi_count = 0;
3070 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
3071 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
3074 /* Add the parameter init for LFC */
3075 pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
3076 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
3077 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
3079 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
3080 pf->max_num_vsi = hw->func_caps.num_vsis;
3081 pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
3082 pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
3083 pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3085 /* FDir queue/VSI allocation */
3086 pf->fdir_qp_offset = 0;
3087 if (hw->func_caps.fd) {
3088 pf->flags |= I40E_FLAG_FDIR;
3089 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
3091 pf->fdir_nb_qps = 0;
3093 qp_count += pf->fdir_nb_qps;
3096 /* LAN queue/VSI allocation */
3097 pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
3098 if (!hw->func_caps.rss) {
3101 pf->flags |= I40E_FLAG_RSS;
3102 if (hw->mac.type == I40E_MAC_X722)
3103 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
3104 pf->lan_nb_qps = pf->lan_nb_qp_max;
3106 qp_count += pf->lan_nb_qps;
3109 /* VF queue/VSI allocation */
3110 pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
3111 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
3112 pf->flags |= I40E_FLAG_SRIOV;
3113 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3114 pf->vf_num = dev->pci_dev->max_vfs;
3115 PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
3116 "in total %u queues", pf->vf_num, pf->vf_nb_qps,
3117 pf->vf_nb_qps * pf->vf_num);
3122 qp_count += pf->vf_nb_qps * pf->vf_num;
3123 vsi_count += pf->vf_num;
3125 /* VMDq queue/VSI allocation */
3126 pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
3127 if (hw->func_caps.vmdq) {
3128 pf->flags |= I40E_FLAG_VMDQ;
3129 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
3130 pf->max_nb_vmdq_vsi = 1;
3131 PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues per VMDQ VSI, "
3132 "in total %u queues", pf->max_nb_vmdq_vsi,
3134 pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
3136 pf->vmdq_nb_qps = 0;
3137 pf->max_nb_vmdq_vsi = 0;
3139 qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
3140 vsi_count += pf->max_nb_vmdq_vsi;
3142 if (hw->func_caps.dcb)
3143 pf->flags |= I40E_FLAG_DCB;
3145 if (qp_count > hw->func_caps.num_tx_qp) {
3146 PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
3147 "the hardware maximum %u", qp_count,
3148 hw->func_caps.num_tx_qp);
3151 if (vsi_count > hw->func_caps.num_vsis) {
3152 PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
3153 "the hardware maximum %u", vsi_count,
3154 hw->func_caps.num_vsis);
3162 i40e_pf_get_switch_config(struct i40e_pf *pf)
3164 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3165 struct i40e_aqc_get_switch_config_resp *switch_config;
3166 struct i40e_aqc_switch_config_element_resp *element;
3167 uint16_t start_seid = 0, num_reported;
3170 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
3171 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
3172 if (!switch_config) {
3173 PMD_DRV_LOG(ERR, "Failed to allocated memory");
3177 /* Get the switch configurations */
3178 ret = i40e_aq_get_switch_config(hw, switch_config,
3179 I40E_AQ_LARGE_BUF, &start_seid, NULL);
3180 if (ret != I40E_SUCCESS) {
3181 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
3184 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
3185 if (num_reported != 1) { /* The number should be 1 */
3186 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
3190 /* Parse the switch configuration elements */
3191 element = &(switch_config->element[0]);
3192 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
3193 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
3194 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
3196 PMD_DRV_LOG(INFO, "Unknown element type");
3199 rte_free(switch_config);
3205 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
3208 struct pool_entry *entry;
3210 if (pool == NULL || num == 0)
3213 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
3214 if (entry == NULL) {
3215 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
3219 /* queue heap initialize */
3220 pool->num_free = num;
3221 pool->num_alloc = 0;
3223 LIST_INIT(&pool->alloc_list);
3224 LIST_INIT(&pool->free_list);
3226 /* Initialize element */
3230 LIST_INSERT_HEAD(&pool->free_list, entry, next);
3235 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
3237 struct pool_entry *entry;
3242 LIST_FOREACH(entry, &pool->alloc_list, next) {
3243 LIST_REMOVE(entry, next);
3247 LIST_FOREACH(entry, &pool->free_list, next) {
3248 LIST_REMOVE(entry, next);
3253 pool->num_alloc = 0;
3255 LIST_INIT(&pool->alloc_list);
3256 LIST_INIT(&pool->free_list);
3260 i40e_res_pool_free(struct i40e_res_pool_info *pool,
3263 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
3264 uint32_t pool_offset;
3268 PMD_DRV_LOG(ERR, "Invalid parameter");
3272 pool_offset = base - pool->base;
3273 /* Lookup in alloc list */
3274 LIST_FOREACH(entry, &pool->alloc_list, next) {
3275 if (entry->base == pool_offset) {
3276 valid_entry = entry;
3277 LIST_REMOVE(entry, next);
3282 /* Not find, return */
3283 if (valid_entry == NULL) {
3284 PMD_DRV_LOG(ERR, "Failed to find entry");
3289 * Found it, move it to free list and try to merge.
3290 * In order to make merge easier, always sort it by qbase.
3291 * Find adjacent prev and last entries.
3294 LIST_FOREACH(entry, &pool->free_list, next) {
3295 if (entry->base > valid_entry->base) {
3303 /* Try to merge with next one*/
3305 /* Merge with next one */
3306 if (valid_entry->base + valid_entry->len == next->base) {
3307 next->base = valid_entry->base;
3308 next->len += valid_entry->len;
3309 rte_free(valid_entry);
3316 /* Merge with previous one */
3317 if (prev->base + prev->len == valid_entry->base) {
3318 prev->len += valid_entry->len;
3319 /* If it merge with next one, remove next node */
3321 LIST_REMOVE(valid_entry, next);
3322 rte_free(valid_entry);
3324 rte_free(valid_entry);
3330 /* Not find any entry to merge, insert */
3333 LIST_INSERT_AFTER(prev, valid_entry, next);
3334 else if (next != NULL)
3335 LIST_INSERT_BEFORE(next, valid_entry, next);
3336 else /* It's empty list, insert to head */
3337 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
3340 pool->num_free += valid_entry->len;
3341 pool->num_alloc -= valid_entry->len;
3347 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
3350 struct pool_entry *entry, *valid_entry;
3352 if (pool == NULL || num == 0) {
3353 PMD_DRV_LOG(ERR, "Invalid parameter");
3357 if (pool->num_free < num) {
3358 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
3359 num, pool->num_free);
3364 /* Lookup in free list and find most fit one */
3365 LIST_FOREACH(entry, &pool->free_list, next) {
3366 if (entry->len >= num) {
3368 if (entry->len == num) {
3369 valid_entry = entry;
3372 if (valid_entry == NULL || valid_entry->len > entry->len)
3373 valid_entry = entry;
3377 /* Not find one to satisfy the request, return */
3378 if (valid_entry == NULL) {
3379 PMD_DRV_LOG(ERR, "No valid entry found");
3383 * The entry have equal queue number as requested,
3384 * remove it from alloc_list.
3386 if (valid_entry->len == num) {
3387 LIST_REMOVE(valid_entry, next);
3390 * The entry have more numbers than requested,
3391 * create a new entry for alloc_list and minus its
3392 * queue base and number in free_list.
3394 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
3395 if (entry == NULL) {
3396 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3400 entry->base = valid_entry->base;
3402 valid_entry->base += num;
3403 valid_entry->len -= num;
3404 valid_entry = entry;
3407 /* Insert it into alloc list, not sorted */
3408 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
3410 pool->num_free -= valid_entry->len;
3411 pool->num_alloc += valid_entry->len;
3413 return (valid_entry->base + pool->base);
3417 * bitmap_is_subset - Check whether src2 is subset of src1
3420 bitmap_is_subset(uint8_t src1, uint8_t src2)
3422 return !((src1 ^ src2) & src2);
3426 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3428 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3430 /* If DCB is not supported, only default TC is supported */
3431 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
3432 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
3436 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
3437 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
3438 "HW support 0x%x", hw->func_caps.enabled_tcmap,
3442 return I40E_SUCCESS;
3446 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
3447 struct i40e_vsi_vlan_pvid_info *info)
3450 struct i40e_vsi_context ctxt;
3451 uint8_t vlan_flags = 0;
3454 if (vsi == NULL || info == NULL) {
3455 PMD_DRV_LOG(ERR, "invalid parameters");
3456 return I40E_ERR_PARAM;
3460 vsi->info.pvid = info->config.pvid;
3462 * If insert pvid is enabled, only tagged pkts are
3463 * allowed to be sent out.
3465 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
3466 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3469 if (info->config.reject.tagged == 0)
3470 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3472 if (info->config.reject.untagged == 0)
3473 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
3475 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
3476 I40E_AQ_VSI_PVLAN_MODE_MASK);
3477 vsi->info.port_vlan_flags |= vlan_flags;
3478 vsi->info.valid_sections =
3479 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3480 memset(&ctxt, 0, sizeof(ctxt));
3481 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3482 ctxt.seid = vsi->seid;
3484 hw = I40E_VSI_TO_HW(vsi);
3485 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3486 if (ret != I40E_SUCCESS)
3487 PMD_DRV_LOG(ERR, "Failed to update VSI params");
3493 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3495 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3497 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
3499 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3500 if (ret != I40E_SUCCESS)
3504 PMD_DRV_LOG(ERR, "seid not valid");
3508 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
3509 tc_bw_data.tc_valid_bits = enabled_tcmap;
3510 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3511 tc_bw_data.tc_bw_credits[i] =
3512 (enabled_tcmap & (1 << i)) ? 1 : 0;
3514 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
3515 if (ret != I40E_SUCCESS) {
3516 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
3520 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
3521 sizeof(vsi->info.qs_handle));
3522 return I40E_SUCCESS;
3526 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
3527 struct i40e_aqc_vsi_properties_data *info,
3528 uint8_t enabled_tcmap)
3530 int ret, i, total_tc = 0;
3531 uint16_t qpnum_per_tc, bsf, qp_idx;
3533 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3534 if (ret != I40E_SUCCESS)
3537 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3538 if (enabled_tcmap & (1 << i))
3540 vsi->enabled_tc = enabled_tcmap;
3542 /* Number of queues per enabled TC */
3543 qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
3544 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
3545 bsf = rte_bsf32(qpnum_per_tc);
3547 /* Adjust the queue number to actual queues that can be applied */
3548 if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
3549 vsi->nb_qps = qpnum_per_tc * total_tc;
3552 * Configure TC and queue mapping parameters, for enabled TC,
3553 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
3554 * default queue will serve it.
3557 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3558 if (vsi->enabled_tc & (1 << i)) {
3559 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
3560 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
3561 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
3562 qp_idx += qpnum_per_tc;
3564 info->tc_mapping[i] = 0;
3567 /* Associate queue number with VSI */
3568 if (vsi->type == I40E_VSI_SRIOV) {
3569 info->mapping_flags |=
3570 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
3571 for (i = 0; i < vsi->nb_qps; i++)
3572 info->queue_mapping[i] =
3573 rte_cpu_to_le_16(vsi->base_queue + i);
3575 info->mapping_flags |=
3576 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
3577 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
3579 info->valid_sections |=
3580 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
3582 return I40E_SUCCESS;
3586 i40e_veb_release(struct i40e_veb *veb)
3588 struct i40e_vsi *vsi;
3591 if (veb == NULL || veb->associate_vsi == NULL)
3594 if (!TAILQ_EMPTY(&veb->head)) {
3595 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
3599 vsi = veb->associate_vsi;
3600 hw = I40E_VSI_TO_HW(vsi);
3602 vsi->uplink_seid = veb->uplink_seid;
3603 i40e_aq_delete_element(hw, veb->seid, NULL);
3606 return I40E_SUCCESS;
3610 static struct i40e_veb *
3611 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
3613 struct i40e_veb *veb;
3617 if (NULL == pf || vsi == NULL) {
3618 PMD_DRV_LOG(ERR, "veb setup failed, "
3619 "associated VSI shouldn't null");
3622 hw = I40E_PF_TO_HW(pf);
3624 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
3626 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
3630 veb->associate_vsi = vsi;
3631 TAILQ_INIT(&veb->head);
3632 veb->uplink_seid = vsi->uplink_seid;
3634 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
3635 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
3637 if (ret != I40E_SUCCESS) {
3638 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
3639 hw->aq.asq_last_status);
3643 /* get statistics index */
3644 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
3645 &veb->stats_idx, NULL, NULL, NULL);
3646 if (ret != I40E_SUCCESS) {
3647 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
3648 hw->aq.asq_last_status);
3652 /* Get VEB bandwidth, to be implemented */
3653 /* Now associated vsi binding to the VEB, set uplink to this VEB */
3654 vsi->uplink_seid = veb->seid;
3663 i40e_vsi_release(struct i40e_vsi *vsi)
3667 struct i40e_vsi_list *vsi_list;
3669 struct i40e_mac_filter *f;
3672 return I40E_SUCCESS;
3674 pf = I40E_VSI_TO_PF(vsi);
3675 hw = I40E_VSI_TO_HW(vsi);
3677 /* VSI has child to attach, release child first */
3679 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
3680 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
3682 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
3684 i40e_veb_release(vsi->veb);
3687 /* Remove all macvlan filters of the VSI */
3688 i40e_vsi_remove_all_macvlan_filter(vsi);
3689 TAILQ_FOREACH(f, &vsi->mac_list, next)
3692 if (vsi->type != I40E_VSI_MAIN) {
3693 /* Remove vsi from parent's sibling list */
3694 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
3695 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
3696 return I40E_ERR_PARAM;
3698 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
3699 &vsi->sib_vsi_list, list);
3701 /* Remove all switch element of the VSI */
3702 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
3703 if (ret != I40E_SUCCESS)
3704 PMD_DRV_LOG(ERR, "Failed to delete element");
3706 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
3708 if (vsi->type != I40E_VSI_SRIOV)
3709 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
3712 return I40E_SUCCESS;
3716 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
3718 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3719 struct i40e_aqc_remove_macvlan_element_data def_filter;
3720 struct i40e_mac_filter_info filter;
3723 if (vsi->type != I40E_VSI_MAIN)
3724 return I40E_ERR_CONFIG;
3725 memset(&def_filter, 0, sizeof(def_filter));
3726 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
3728 def_filter.vlan_tag = 0;
3729 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
3730 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3731 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
3732 if (ret != I40E_SUCCESS) {
3733 struct i40e_mac_filter *f;
3734 struct ether_addr *mac;
3736 PMD_DRV_LOG(WARNING, "Cannot remove the default "
3738 /* It needs to add the permanent mac into mac list */
3739 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3741 PMD_DRV_LOG(ERR, "failed to allocate memory");
3742 return I40E_ERR_NO_MEMORY;
3744 mac = &f->mac_info.mac_addr;
3745 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
3747 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3748 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3753 (void)rte_memcpy(&filter.mac_addr,
3754 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
3755 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3756 return i40e_vsi_add_mac(vsi, &filter);
3760 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
3762 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
3763 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
3764 struct i40e_hw *hw = &vsi->adapter->hw;
3768 memset(&bw_config, 0, sizeof(bw_config));
3769 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3770 if (ret != I40E_SUCCESS) {
3771 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
3772 hw->aq.asq_last_status);
3776 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
3777 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
3778 &ets_sla_config, NULL);
3779 if (ret != I40E_SUCCESS) {
3780 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
3781 "configuration %u", hw->aq.asq_last_status);
3785 /* Not store the info yet, just print out */
3786 PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
3787 PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
3788 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3789 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
3790 ets_sla_config.share_credits[i]);
3791 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
3792 rte_le_to_cpu_16(ets_sla_config.credits[i]));
3793 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
3794 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
3803 i40e_vsi_setup(struct i40e_pf *pf,
3804 enum i40e_vsi_type type,
3805 struct i40e_vsi *uplink_vsi,
3806 uint16_t user_param)
3808 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3809 struct i40e_vsi *vsi;
3810 struct i40e_mac_filter_info filter;
3812 struct i40e_vsi_context ctxt;
3813 struct ether_addr broadcast =
3814 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
3816 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
3817 PMD_DRV_LOG(ERR, "VSI setup failed, "
3818 "VSI link shouldn't be NULL");
3822 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
3823 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
3824 "uplink VSI should be NULL");
3828 /* If uplink vsi didn't setup VEB, create one first */
3829 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
3830 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
3832 if (NULL == uplink_vsi->veb) {
3833 PMD_DRV_LOG(ERR, "VEB setup failed");
3838 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
3840 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
3843 TAILQ_INIT(&vsi->mac_list);
3845 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
3846 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
3847 vsi->parent_vsi = uplink_vsi;
3848 vsi->user_param = user_param;
3849 /* Allocate queues */
3850 switch (vsi->type) {
3851 case I40E_VSI_MAIN :
3852 vsi->nb_qps = pf->lan_nb_qps;
3854 case I40E_VSI_SRIOV :
3855 vsi->nb_qps = pf->vf_nb_qps;
3857 case I40E_VSI_VMDQ2:
3858 vsi->nb_qps = pf->vmdq_nb_qps;
3861 vsi->nb_qps = pf->fdir_nb_qps;
3867 * The filter status descriptor is reported in rx queue 0,
3868 * while the tx queue for fdir filter programming has no
3869 * such constraints, can be non-zero queues.
3870 * To simplify it, choose FDIR vsi use queue 0 pair.
3871 * To make sure it will use queue 0 pair, queue allocation
3872 * need be done before this function is called
3874 if (type != I40E_VSI_FDIR) {
3875 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
3877 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
3881 vsi->base_queue = ret;
3883 vsi->base_queue = I40E_FDIR_QUEUE_ID;
3885 /* VF has MSIX interrupt in VF range, don't allocate here */
3886 if (type == I40E_VSI_MAIN) {
3887 ret = i40e_res_pool_alloc(&pf->msix_pool,
3888 RTE_MIN(vsi->nb_qps,
3889 RTE_MAX_RXTX_INTR_VEC_ID));
3891 PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
3893 goto fail_queue_alloc;
3895 vsi->msix_intr = ret;
3896 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
3897 } else if (type != I40E_VSI_SRIOV) {
3898 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
3900 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
3901 goto fail_queue_alloc;
3903 vsi->msix_intr = ret;
3911 if (type == I40E_VSI_MAIN) {
3912 /* For main VSI, no need to add since it's default one */
3913 vsi->uplink_seid = pf->mac_seid;
3914 vsi->seid = pf->main_vsi_seid;
3915 /* Bind queues with specific MSIX interrupt */
3917 * Needs 2 interrupt at least, one for misc cause which will
3918 * enabled from OS side, Another for queues binding the
3919 * interrupt from device side only.
3922 /* Get default VSI parameters from hardware */
3923 memset(&ctxt, 0, sizeof(ctxt));
3924 ctxt.seid = vsi->seid;
3925 ctxt.pf_num = hw->pf_id;
3926 ctxt.uplink_seid = vsi->uplink_seid;
3928 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
3929 if (ret != I40E_SUCCESS) {
3930 PMD_DRV_LOG(ERR, "Failed to get VSI params");
3931 goto fail_msix_alloc;
3933 (void)rte_memcpy(&vsi->info, &ctxt.info,
3934 sizeof(struct i40e_aqc_vsi_properties_data));
3935 vsi->vsi_id = ctxt.vsi_number;
3936 vsi->info.valid_sections = 0;
3938 /* Configure tc, enabled TC0 only */
3939 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
3941 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
3942 goto fail_msix_alloc;
3945 /* TC, queue mapping */
3946 memset(&ctxt, 0, sizeof(ctxt));
3947 vsi->info.valid_sections |=
3948 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3949 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3950 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3951 (void)rte_memcpy(&ctxt.info, &vsi->info,
3952 sizeof(struct i40e_aqc_vsi_properties_data));
3953 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3954 I40E_DEFAULT_TCMAP);
3955 if (ret != I40E_SUCCESS) {
3956 PMD_DRV_LOG(ERR, "Failed to configure "
3957 "TC queue mapping");
3958 goto fail_msix_alloc;
3960 ctxt.seid = vsi->seid;
3961 ctxt.pf_num = hw->pf_id;
3962 ctxt.uplink_seid = vsi->uplink_seid;
3965 /* Update VSI parameters */
3966 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3967 if (ret != I40E_SUCCESS) {
3968 PMD_DRV_LOG(ERR, "Failed to update VSI params");
3969 goto fail_msix_alloc;
3972 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
3973 sizeof(vsi->info.tc_mapping));
3974 (void)rte_memcpy(&vsi->info.queue_mapping,
3975 &ctxt.info.queue_mapping,
3976 sizeof(vsi->info.queue_mapping));
3977 vsi->info.mapping_flags = ctxt.info.mapping_flags;
3978 vsi->info.valid_sections = 0;
3980 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
3984 * Updating default filter settings are necessary to prevent
3985 * reception of tagged packets.
3986 * Some old firmware configurations load a default macvlan
3987 * filter which accepts both tagged and untagged packets.
3988 * The updating is to use a normal filter instead if needed.
3989 * For NVM 4.2.2 or after, the updating is not needed anymore.
3990 * The firmware with correct configurations load the default
3991 * macvlan filter which is expected and cannot be removed.
3993 i40e_update_default_filter_setting(vsi);
3994 i40e_config_qinq(hw, vsi);
3995 } else if (type == I40E_VSI_SRIOV) {
3996 memset(&ctxt, 0, sizeof(ctxt));
3998 * For other VSI, the uplink_seid equals to uplink VSI's
3999 * uplink_seid since they share same VEB
4001 vsi->uplink_seid = uplink_vsi->uplink_seid;
4002 ctxt.pf_num = hw->pf_id;
4003 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
4004 ctxt.uplink_seid = vsi->uplink_seid;
4005 ctxt.connection_type = 0x1;
4006 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
4009 * Do not configure switch ID to enable VEB switch by
4010 * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville,
4011 * if the source mac address of packet sent from VF is not
4012 * listed in the VEB's mac table, the VEB will switch the
4013 * packet back to the VF. Need to enable it when HW issue
4017 /* Configure port/vlan */
4018 ctxt.info.valid_sections |=
4019 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4020 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4021 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4022 I40E_DEFAULT_TCMAP);
4023 if (ret != I40E_SUCCESS) {
4024 PMD_DRV_LOG(ERR, "Failed to configure "
4025 "TC queue mapping");
4026 goto fail_msix_alloc;
4028 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4029 ctxt.info.valid_sections |=
4030 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4032 * Since VSI is not created yet, only configure parameter,
4033 * will add vsi below.
4036 i40e_config_qinq(hw, vsi);
4037 } else if (type == I40E_VSI_VMDQ2) {
4038 memset(&ctxt, 0, sizeof(ctxt));
4040 * For other VSI, the uplink_seid equals to uplink VSI's
4041 * uplink_seid since they share same VEB
4043 vsi->uplink_seid = uplink_vsi->uplink_seid;
4044 ctxt.pf_num = hw->pf_id;
4046 ctxt.uplink_seid = vsi->uplink_seid;
4047 ctxt.connection_type = 0x1;
4048 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
4050 ctxt.info.valid_sections |=
4051 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4052 /* user_param carries flag to enable loop back */
4054 ctxt.info.switch_id =
4055 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
4056 ctxt.info.switch_id |=
4057 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4060 /* Configure port/vlan */
4061 ctxt.info.valid_sections |=
4062 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4063 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4064 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4065 I40E_DEFAULT_TCMAP);
4066 if (ret != I40E_SUCCESS) {
4067 PMD_DRV_LOG(ERR, "Failed to configure "
4068 "TC queue mapping");
4069 goto fail_msix_alloc;
4071 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4072 ctxt.info.valid_sections |=
4073 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4074 } else if (type == I40E_VSI_FDIR) {
4075 memset(&ctxt, 0, sizeof(ctxt));
4076 vsi->uplink_seid = uplink_vsi->uplink_seid;
4077 ctxt.pf_num = hw->pf_id;
4079 ctxt.uplink_seid = vsi->uplink_seid;
4080 ctxt.connection_type = 0x1; /* regular data port */
4081 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4082 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4083 I40E_DEFAULT_TCMAP);
4084 if (ret != I40E_SUCCESS) {
4085 PMD_DRV_LOG(ERR, "Failed to configure "
4086 "TC queue mapping.");
4087 goto fail_msix_alloc;
4089 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4090 ctxt.info.valid_sections |=
4091 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4093 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
4094 goto fail_msix_alloc;
4097 if (vsi->type != I40E_VSI_MAIN) {
4098 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
4099 if (ret != I40E_SUCCESS) {
4100 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
4101 hw->aq.asq_last_status);
4102 goto fail_msix_alloc;
4104 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
4105 vsi->info.valid_sections = 0;
4106 vsi->seid = ctxt.seid;
4107 vsi->vsi_id = ctxt.vsi_number;
4108 vsi->sib_vsi_list.vsi = vsi;
4109 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
4110 &vsi->sib_vsi_list, list);
4113 /* MAC/VLAN configuration */
4114 (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
4115 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4117 ret = i40e_vsi_add_mac(vsi, &filter);
4118 if (ret != I40E_SUCCESS) {
4119 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4120 goto fail_msix_alloc;
4123 /* Get VSI BW information */
4124 i40e_vsi_dump_bw_config(vsi);
4127 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
4129 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
4135 /* Configure vlan stripping on or off */
4137 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
4139 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4140 struct i40e_vsi_context ctxt;
4142 int ret = I40E_SUCCESS;
4144 /* Check if it has been already on or off */
4145 if (vsi->info.valid_sections &
4146 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
4148 if ((vsi->info.port_vlan_flags &
4149 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
4150 return 0; /* already on */
4152 if ((vsi->info.port_vlan_flags &
4153 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
4154 I40E_AQ_VSI_PVLAN_EMOD_MASK)
4155 return 0; /* already off */
4160 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4162 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
4163 vsi->info.valid_sections =
4164 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4165 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
4166 vsi->info.port_vlan_flags |= vlan_flags;
4167 ctxt.seid = vsi->seid;
4168 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4169 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4171 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
4172 on ? "enable" : "disable");
4178 i40e_dev_init_vlan(struct rte_eth_dev *dev)
4180 struct rte_eth_dev_data *data = dev->data;
4183 /* Apply vlan offload setting */
4184 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
4186 /* Apply double-vlan setting, not implemented yet */
4188 /* Apply pvid setting */
4189 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
4190 data->dev_conf.txmode.hw_vlan_insert_pvid);
4192 PMD_DRV_LOG(INFO, "Failed to update VSI params");
4198 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
4200 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4202 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
4206 i40e_update_flow_control(struct i40e_hw *hw)
4208 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
4209 struct i40e_link_status link_status;
4210 uint32_t rxfc = 0, txfc = 0, reg;
4214 memset(&link_status, 0, sizeof(link_status));
4215 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
4216 if (ret != I40E_SUCCESS) {
4217 PMD_DRV_LOG(ERR, "Failed to get link status information");
4218 goto write_reg; /* Disable flow control */
4221 an_info = hw->phy.link_info.an_info;
4222 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
4223 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
4224 ret = I40E_ERR_NOT_READY;
4225 goto write_reg; /* Disable flow control */
4228 * If link auto negotiation is enabled, flow control needs to
4229 * be configured according to it
4231 switch (an_info & I40E_LINK_PAUSE_RXTX) {
4232 case I40E_LINK_PAUSE_RXTX:
4235 hw->fc.current_mode = I40E_FC_FULL;
4237 case I40E_AQ_LINK_PAUSE_RX:
4239 hw->fc.current_mode = I40E_FC_RX_PAUSE;
4241 case I40E_AQ_LINK_PAUSE_TX:
4243 hw->fc.current_mode = I40E_FC_TX_PAUSE;
4246 hw->fc.current_mode = I40E_FC_NONE;
4251 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
4252 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
4253 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4254 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
4255 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
4256 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
4263 i40e_pf_setup(struct i40e_pf *pf)
4265 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4266 struct i40e_filter_control_settings settings;
4267 struct i40e_vsi *vsi;
4270 /* Clear all stats counters */
4271 pf->offset_loaded = FALSE;
4272 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
4273 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
4275 ret = i40e_pf_get_switch_config(pf);
4276 if (ret != I40E_SUCCESS) {
4277 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
4280 if (pf->flags & I40E_FLAG_FDIR) {
4281 /* make queue allocated first, let FDIR use queue pair 0*/
4282 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
4283 if (ret != I40E_FDIR_QUEUE_ID) {
4284 PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
4286 pf->flags &= ~I40E_FLAG_FDIR;
4289 /* main VSI setup */
4290 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
4292 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
4293 return I40E_ERR_NOT_READY;
4297 /* Configure filter control */
4298 memset(&settings, 0, sizeof(settings));
4299 if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
4300 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
4301 else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
4302 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
4304 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
4305 hw->func_caps.rss_table_size);
4306 return I40E_ERR_PARAM;
4308 PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
4309 "size: %u\n", hw->func_caps.rss_table_size);
4310 pf->hash_lut_size = hw->func_caps.rss_table_size;
4312 /* Enable ethtype and macvlan filters */
4313 settings.enable_ethtype = TRUE;
4314 settings.enable_macvlan = TRUE;
4315 ret = i40e_set_filter_control(hw, &settings);
4317 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
4320 /* Update flow control according to the auto negotiation */
4321 i40e_update_flow_control(hw);
4323 return I40E_SUCCESS;
4327 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
4333 * Set or clear TX Queue Disable flags,
4334 * which is required by hardware.
4336 i40e_pre_tx_queue_cfg(hw, q_idx, on);
4337 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
4339 /* Wait until the request is finished */
4340 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4341 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4342 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4343 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
4344 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
4350 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
4351 return I40E_SUCCESS; /* already on, skip next steps */
4353 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
4354 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4356 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4357 return I40E_SUCCESS; /* already off, skip next steps */
4358 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4360 /* Write the register */
4361 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
4362 /* Check the result */
4363 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4364 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4365 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4367 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
4368 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
4371 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
4372 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4376 /* Check if it is timeout */
4377 if (j >= I40E_CHK_Q_ENA_COUNT) {
4378 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
4379 (on ? "enable" : "disable"), q_idx);
4380 return I40E_ERR_TIMEOUT;
4383 return I40E_SUCCESS;
4386 /* Swith on or off the tx queues */
4388 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
4390 struct rte_eth_dev_data *dev_data = pf->dev_data;
4391 struct i40e_tx_queue *txq;
4392 struct rte_eth_dev *dev = pf->adapter->eth_dev;
4396 for (i = 0; i < dev_data->nb_tx_queues; i++) {
4397 txq = dev_data->tx_queues[i];
4398 /* Don't operate the queue if not configured or
4399 * if starting only per queue */
4400 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
4403 ret = i40e_dev_tx_queue_start(dev, i);
4405 ret = i40e_dev_tx_queue_stop(dev, i);
4406 if ( ret != I40E_SUCCESS)
4410 return I40E_SUCCESS;
4414 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
4419 /* Wait until the request is finished */
4420 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4421 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4422 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
4423 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
4424 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
4429 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
4430 return I40E_SUCCESS; /* Already on, skip next steps */
4431 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4433 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4434 return I40E_SUCCESS; /* Already off, skip next steps */
4435 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4438 /* Write the register */
4439 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
4440 /* Check the result */
4441 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4442 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4443 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
4445 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
4446 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
4449 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
4450 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4455 /* Check if it is timeout */
4456 if (j >= I40E_CHK_Q_ENA_COUNT) {
4457 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
4458 (on ? "enable" : "disable"), q_idx);
4459 return I40E_ERR_TIMEOUT;
4462 return I40E_SUCCESS;
4464 /* Switch on or off the rx queues */
4466 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
4468 struct rte_eth_dev_data *dev_data = pf->dev_data;
4469 struct i40e_rx_queue *rxq;
4470 struct rte_eth_dev *dev = pf->adapter->eth_dev;
4474 for (i = 0; i < dev_data->nb_rx_queues; i++) {
4475 rxq = dev_data->rx_queues[i];
4476 /* Don't operate the queue if not configured or
4477 * if starting only per queue */
4478 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
4481 ret = i40e_dev_rx_queue_start(dev, i);
4483 ret = i40e_dev_rx_queue_stop(dev, i);
4484 if (ret != I40E_SUCCESS)
4488 return I40E_SUCCESS;
4491 /* Switch on or off all the rx/tx queues */
4493 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
4498 /* enable rx queues before enabling tx queues */
4499 ret = i40e_dev_switch_rx_queues(pf, on);
4501 PMD_DRV_LOG(ERR, "Failed to switch rx queues");
4504 ret = i40e_dev_switch_tx_queues(pf, on);
4506 /* Stop tx queues before stopping rx queues */
4507 ret = i40e_dev_switch_tx_queues(pf, on);
4509 PMD_DRV_LOG(ERR, "Failed to switch tx queues");
4512 ret = i40e_dev_switch_rx_queues(pf, on);
4518 /* Initialize VSI for TX */
4520 i40e_dev_tx_init(struct i40e_pf *pf)
4522 struct rte_eth_dev_data *data = pf->dev_data;
4524 uint32_t ret = I40E_SUCCESS;
4525 struct i40e_tx_queue *txq;
4527 for (i = 0; i < data->nb_tx_queues; i++) {
4528 txq = data->tx_queues[i];
4529 if (!txq || !txq->q_set)
4531 ret = i40e_tx_queue_init(txq);
4532 if (ret != I40E_SUCCESS)
4535 if (ret == I40E_SUCCESS)
4536 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
4542 /* Initialize VSI for RX */
4544 i40e_dev_rx_init(struct i40e_pf *pf)
4546 struct rte_eth_dev_data *data = pf->dev_data;
4547 int ret = I40E_SUCCESS;
4549 struct i40e_rx_queue *rxq;
4551 i40e_pf_config_mq_rx(pf);
4552 for (i = 0; i < data->nb_rx_queues; i++) {
4553 rxq = data->rx_queues[i];
4554 if (!rxq || !rxq->q_set)
4557 ret = i40e_rx_queue_init(rxq);
4558 if (ret != I40E_SUCCESS) {
4559 PMD_DRV_LOG(ERR, "Failed to do RX queue "
4564 if (ret == I40E_SUCCESS)
4565 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
4572 i40e_dev_rxtx_init(struct i40e_pf *pf)
4576 err = i40e_dev_tx_init(pf);
4578 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
4581 err = i40e_dev_rx_init(pf);
4583 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
4591 i40e_vmdq_setup(struct rte_eth_dev *dev)
4593 struct rte_eth_conf *conf = &dev->data->dev_conf;
4594 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4595 int i, err, conf_vsis, j, loop;
4596 struct i40e_vsi *vsi;
4597 struct i40e_vmdq_info *vmdq_info;
4598 struct rte_eth_vmdq_rx_conf *vmdq_conf;
4599 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4602 * Disable interrupt to avoid message from VF. Furthermore, it will
4603 * avoid race condition in VSI creation/destroy.
4605 i40e_pf_disable_irq0(hw);
4607 if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
4608 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
4612 conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
4613 if (conf_vsis > pf->max_nb_vmdq_vsi) {
4614 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
4615 conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
4616 pf->max_nb_vmdq_vsi);
4620 if (pf->vmdq != NULL) {
4621 PMD_INIT_LOG(INFO, "VMDQ already configured");
4625 pf->vmdq = rte_zmalloc("vmdq_info_struct",
4626 sizeof(*vmdq_info) * conf_vsis, 0);
4628 if (pf->vmdq == NULL) {
4629 PMD_INIT_LOG(ERR, "Failed to allocate memory");
4633 vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
4635 /* Create VMDQ VSI */
4636 for (i = 0; i < conf_vsis; i++) {
4637 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
4638 vmdq_conf->enable_loop_back);
4640 PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
4644 vmdq_info = &pf->vmdq[i];
4646 vmdq_info->vsi = vsi;
4648 pf->nb_cfg_vmdq_vsi = conf_vsis;
4650 /* Configure Vlan */
4651 loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
4652 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
4653 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
4654 if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
4655 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
4656 vmdq_conf->pool_map[i].vlan_id, j);
4658 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
4659 vmdq_conf->pool_map[i].vlan_id);
4661 PMD_INIT_LOG(ERR, "Failed to add vlan");
4669 i40e_pf_enable_irq0(hw);
4674 for (i = 0; i < conf_vsis; i++)
4675 if (pf->vmdq[i].vsi == NULL)
4678 i40e_vsi_release(pf->vmdq[i].vsi);
4682 i40e_pf_enable_irq0(hw);
4687 i40e_stat_update_32(struct i40e_hw *hw,
4695 new_data = (uint64_t)I40E_READ_REG(hw, reg);
4699 if (new_data >= *offset)
4700 *stat = (uint64_t)(new_data - *offset);
4702 *stat = (uint64_t)((new_data +
4703 ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
4707 i40e_stat_update_48(struct i40e_hw *hw,
4716 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
4717 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
4718 I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
4723 if (new_data >= *offset)
4724 *stat = new_data - *offset;
4726 *stat = (uint64_t)((new_data +
4727 ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
4729 *stat &= I40E_48_BIT_MASK;
4734 i40e_pf_disable_irq0(struct i40e_hw *hw)
4736 /* Disable all interrupt types */
4737 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
4738 I40E_WRITE_FLUSH(hw);
4743 i40e_pf_enable_irq0(struct i40e_hw *hw)
4745 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
4746 I40E_PFINT_DYN_CTL0_INTENA_MASK |
4747 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4748 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
4749 I40E_WRITE_FLUSH(hw);
4753 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
4755 /* read pending request and disable first */
4756 i40e_pf_disable_irq0(hw);
4757 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
4758 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
4759 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
4762 /* Link no queues with irq0 */
4763 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
4764 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
4768 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
4770 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4771 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4774 uint32_t index, offset, val;
4779 * Try to find which VF trigger a reset, use absolute VF id to access
4780 * since the reg is global register.
4782 for (i = 0; i < pf->vf_num; i++) {
4783 abs_vf_id = hw->func_caps.vf_base_id + i;
4784 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
4785 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
4786 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
4787 /* VFR event occured */
4788 if (val & (0x1 << offset)) {
4791 /* Clear the event first */
4792 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
4794 PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
4796 * Only notify a VF reset event occured,
4797 * don't trigger another SW reset
4799 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
4800 if (ret != I40E_SUCCESS)
4801 PMD_DRV_LOG(ERR, "Failed to do VF reset");
4807 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
4809 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4810 struct i40e_arq_event_info info;
4811 uint16_t pending, opcode;
4814 info.buf_len = I40E_AQ_BUF_SZ;
4815 info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
4816 if (!info.msg_buf) {
4817 PMD_DRV_LOG(ERR, "Failed to allocate mem");
4823 ret = i40e_clean_arq_element(hw, &info, &pending);
4825 if (ret != I40E_SUCCESS) {
4826 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
4827 "aq_err: %u", hw->aq.asq_last_status);
4830 opcode = rte_le_to_cpu_16(info.desc.opcode);
4833 case i40e_aqc_opc_send_msg_to_pf:
4834 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
4835 i40e_pf_host_handle_vf_msg(dev,
4836 rte_le_to_cpu_16(info.desc.retval),
4837 rte_le_to_cpu_32(info.desc.cookie_high),
4838 rte_le_to_cpu_32(info.desc.cookie_low),
4843 PMD_DRV_LOG(ERR, "Request %u is not supported yet",
4848 rte_free(info.msg_buf);
4852 * Interrupt handler is registered as the alarm callback for handling LSC
4853 * interrupt in a definite of time, in order to wait the NIC into a stable
4854 * state. Currently it waits 1 sec in i40e for the link up interrupt, and
4855 * no need for link down interrupt.
4858 i40e_dev_interrupt_delayed_handler(void *param)
4860 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4861 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4864 /* read interrupt causes again */
4865 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
4867 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
4868 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
4869 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
4870 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
4871 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
4872 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
4873 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
4874 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
4875 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
4876 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
4877 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
4879 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
4880 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
4881 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
4882 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
4883 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
4885 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4886 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
4887 i40e_dev_handle_vfr_event(dev);
4889 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4890 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
4891 i40e_dev_handle_aq_msg(dev);
4894 /* handle the link up interrupt in an alarm callback */
4895 i40e_dev_link_update(dev, 0);
4896 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
4898 i40e_pf_enable_irq0(hw);
4899 rte_intr_enable(&(dev->pci_dev->intr_handle));
4903 * Interrupt handler triggered by NIC for handling
4904 * specific interrupt.
4907 * Pointer to interrupt handle.
4909 * The address of parameter (struct rte_eth_dev *) regsitered before.
4915 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
4918 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4919 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4922 /* Disable interrupt */
4923 i40e_pf_disable_irq0(hw);
4925 /* read out interrupt causes */
4926 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
4928 /* No interrupt event indicated */
4929 if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
4930 PMD_DRV_LOG(INFO, "No interrupt event");
4933 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
4934 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
4935 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
4936 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
4937 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
4938 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
4939 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
4940 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
4941 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
4942 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
4943 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
4944 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
4945 PMD_DRV_LOG(ERR, "ICR0: HMC error");
4946 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
4947 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
4948 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
4950 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4951 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
4952 i40e_dev_handle_vfr_event(dev);
4954 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4955 PMD_DRV_LOG(INFO, "ICR0: adminq event");
4956 i40e_dev_handle_aq_msg(dev);
4959 /* Link Status Change interrupt */
4960 if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
4961 #define I40E_US_PER_SECOND 1000000
4962 struct rte_eth_link link;
4964 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
4965 memset(&link, 0, sizeof(link));
4966 rte_i40e_dev_atomic_read_link_status(dev, &link);
4967 i40e_dev_link_update(dev, 0);
4970 * For link up interrupt, it needs to wait 1 second to let the
4971 * hardware be a stable state. Otherwise several consecutive
4972 * interrupts can be observed.
4973 * For link down interrupt, no need to wait.
4975 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
4976 i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
4979 _rte_eth_dev_callback_process(dev,
4980 RTE_ETH_EVENT_INTR_LSC);
4984 /* Enable interrupt */
4985 i40e_pf_enable_irq0(hw);
4986 rte_intr_enable(&(dev->pci_dev->intr_handle));
4990 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
4991 struct i40e_macvlan_filter *filter,
4994 int ele_num, ele_buff_size;
4995 int num, actual_num, i;
4997 int ret = I40E_SUCCESS;
4998 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4999 struct i40e_aqc_add_macvlan_element_data *req_list;
5001 if (filter == NULL || total == 0)
5002 return I40E_ERR_PARAM;
5003 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5004 ele_buff_size = hw->aq.asq_buf_size;
5006 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
5007 if (req_list == NULL) {
5008 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5009 return I40E_ERR_NO_MEMORY;
5014 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5015 memset(req_list, 0, ele_buff_size);
5017 for (i = 0; i < actual_num; i++) {
5018 (void)rte_memcpy(req_list[i].mac_addr,
5019 &filter[num + i].macaddr, ETH_ADDR_LEN);
5020 req_list[i].vlan_tag =
5021 rte_cpu_to_le_16(filter[num + i].vlan_id);
5023 switch (filter[num + i].filter_type) {
5024 case RTE_MAC_PERFECT_MATCH:
5025 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
5026 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5028 case RTE_MACVLAN_PERFECT_MATCH:
5029 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
5031 case RTE_MAC_HASH_MATCH:
5032 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
5033 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5035 case RTE_MACVLAN_HASH_MATCH:
5036 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
5039 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
5040 ret = I40E_ERR_PARAM;
5044 req_list[i].queue_number = 0;
5046 req_list[i].flags = rte_cpu_to_le_16(flags);
5049 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
5051 if (ret != I40E_SUCCESS) {
5052 PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
5056 } while (num < total);
5064 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
5065 struct i40e_macvlan_filter *filter,
5068 int ele_num, ele_buff_size;
5069 int num, actual_num, i;
5071 int ret = I40E_SUCCESS;
5072 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5073 struct i40e_aqc_remove_macvlan_element_data *req_list;
5075 if (filter == NULL || total == 0)
5076 return I40E_ERR_PARAM;
5078 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5079 ele_buff_size = hw->aq.asq_buf_size;
5081 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
5082 if (req_list == NULL) {
5083 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5084 return I40E_ERR_NO_MEMORY;
5089 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5090 memset(req_list, 0, ele_buff_size);
5092 for (i = 0; i < actual_num; i++) {
5093 (void)rte_memcpy(req_list[i].mac_addr,
5094 &filter[num + i].macaddr, ETH_ADDR_LEN);
5095 req_list[i].vlan_tag =
5096 rte_cpu_to_le_16(filter[num + i].vlan_id);
5098 switch (filter[num + i].filter_type) {
5099 case RTE_MAC_PERFECT_MATCH:
5100 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5101 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5103 case RTE_MACVLAN_PERFECT_MATCH:
5104 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
5106 case RTE_MAC_HASH_MATCH:
5107 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
5108 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5110 case RTE_MACVLAN_HASH_MATCH:
5111 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
5114 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
5115 ret = I40E_ERR_PARAM;
5118 req_list[i].flags = rte_cpu_to_le_16(flags);
5121 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
5123 if (ret != I40E_SUCCESS) {
5124 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
5128 } while (num < total);
5135 /* Find out specific MAC filter */
5136 static struct i40e_mac_filter *
5137 i40e_find_mac_filter(struct i40e_vsi *vsi,
5138 struct ether_addr *macaddr)
5140 struct i40e_mac_filter *f;
5142 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5143 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
5151 i40e_find_vlan_filter(struct i40e_vsi *vsi,
5154 uint32_t vid_idx, vid_bit;
5156 if (vlan_id > ETH_VLAN_ID_MAX)
5159 vid_idx = I40E_VFTA_IDX(vlan_id);
5160 vid_bit = I40E_VFTA_BIT(vlan_id);
5162 if (vsi->vfta[vid_idx] & vid_bit)
5169 i40e_set_vlan_filter(struct i40e_vsi *vsi,
5170 uint16_t vlan_id, bool on)
5172 uint32_t vid_idx, vid_bit;
5174 if (vlan_id > ETH_VLAN_ID_MAX)
5177 vid_idx = I40E_VFTA_IDX(vlan_id);
5178 vid_bit = I40E_VFTA_BIT(vlan_id);
5181 vsi->vfta[vid_idx] |= vid_bit;
5183 vsi->vfta[vid_idx] &= ~vid_bit;
5187 * Find all vlan options for specific mac addr,
5188 * return with actual vlan found.
5191 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
5192 struct i40e_macvlan_filter *mv_f,
5193 int num, struct ether_addr *addr)
5199 * Not to use i40e_find_vlan_filter to decrease the loop time,
5200 * although the code looks complex.
5202 if (num < vsi->vlan_num)
5203 return I40E_ERR_PARAM;
5206 for (j = 0; j < I40E_VFTA_SIZE; j++) {
5208 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
5209 if (vsi->vfta[j] & (1 << k)) {
5211 PMD_DRV_LOG(ERR, "vlan number "
5213 return I40E_ERR_PARAM;
5215 (void)rte_memcpy(&mv_f[i].macaddr,
5216 addr, ETH_ADDR_LEN);
5218 j * I40E_UINT32_BIT_SIZE + k;
5224 return I40E_SUCCESS;
5228 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
5229 struct i40e_macvlan_filter *mv_f,
5234 struct i40e_mac_filter *f;
5236 if (num < vsi->mac_num)
5237 return I40E_ERR_PARAM;
5239 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5241 PMD_DRV_LOG(ERR, "buffer number not match");
5242 return I40E_ERR_PARAM;
5244 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
5246 mv_f[i].vlan_id = vlan;
5247 mv_f[i].filter_type = f->mac_info.filter_type;
5251 return I40E_SUCCESS;
5255 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
5258 struct i40e_mac_filter *f;
5259 struct i40e_macvlan_filter *mv_f;
5260 int ret = I40E_SUCCESS;
5262 if (vsi == NULL || vsi->mac_num == 0)
5263 return I40E_ERR_PARAM;
5265 /* Case that no vlan is set */
5266 if (vsi->vlan_num == 0)
5269 num = vsi->mac_num * vsi->vlan_num;
5271 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
5273 PMD_DRV_LOG(ERR, "failed to allocate memory");
5274 return I40E_ERR_NO_MEMORY;
5278 if (vsi->vlan_num == 0) {
5279 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5280 (void)rte_memcpy(&mv_f[i].macaddr,
5281 &f->mac_info.mac_addr, ETH_ADDR_LEN);
5282 mv_f[i].vlan_id = 0;
5286 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5287 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
5288 vsi->vlan_num, &f->mac_info.mac_addr);
5289 if (ret != I40E_SUCCESS)
5295 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
5303 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5305 struct i40e_macvlan_filter *mv_f;
5307 int ret = I40E_SUCCESS;
5309 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
5310 return I40E_ERR_PARAM;
5312 /* If it's already set, just return */
5313 if (i40e_find_vlan_filter(vsi,vlan))
5314 return I40E_SUCCESS;
5316 mac_num = vsi->mac_num;
5319 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5320 return I40E_ERR_PARAM;
5323 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5326 PMD_DRV_LOG(ERR, "failed to allocate memory");
5327 return I40E_ERR_NO_MEMORY;
5330 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
5332 if (ret != I40E_SUCCESS)
5335 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
5337 if (ret != I40E_SUCCESS)
5340 i40e_set_vlan_filter(vsi, vlan, 1);
5350 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5352 struct i40e_macvlan_filter *mv_f;
5354 int ret = I40E_SUCCESS;
5357 * Vlan 0 is the generic filter for untagged packets
5358 * and can't be removed.
5360 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
5361 return I40E_ERR_PARAM;
5363 /* If can't find it, just return */
5364 if (!i40e_find_vlan_filter(vsi, vlan))
5365 return I40E_ERR_PARAM;
5367 mac_num = vsi->mac_num;
5370 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5371 return I40E_ERR_PARAM;
5374 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5377 PMD_DRV_LOG(ERR, "failed to allocate memory");
5378 return I40E_ERR_NO_MEMORY;
5381 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
5383 if (ret != I40E_SUCCESS)
5386 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
5388 if (ret != I40E_SUCCESS)
5391 /* This is last vlan to remove, replace all mac filter with vlan 0 */
5392 if (vsi->vlan_num == 1) {
5393 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
5394 if (ret != I40E_SUCCESS)
5397 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
5398 if (ret != I40E_SUCCESS)
5402 i40e_set_vlan_filter(vsi, vlan, 0);
5412 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
5414 struct i40e_mac_filter *f;
5415 struct i40e_macvlan_filter *mv_f;
5416 int i, vlan_num = 0;
5417 int ret = I40E_SUCCESS;
5419 /* If it's add and we've config it, return */
5420 f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
5422 return I40E_SUCCESS;
5423 if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
5424 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
5427 * If vlan_num is 0, that's the first time to add mac,
5428 * set mask for vlan_id 0.
5430 if (vsi->vlan_num == 0) {
5431 i40e_set_vlan_filter(vsi, 0, 1);
5434 vlan_num = vsi->vlan_num;
5435 } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
5436 (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
5439 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
5441 PMD_DRV_LOG(ERR, "failed to allocate memory");
5442 return I40E_ERR_NO_MEMORY;
5445 for (i = 0; i < vlan_num; i++) {
5446 mv_f[i].filter_type = mac_filter->filter_type;
5447 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
5451 if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
5452 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
5453 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
5454 &mac_filter->mac_addr);
5455 if (ret != I40E_SUCCESS)
5459 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
5460 if (ret != I40E_SUCCESS)
5463 /* Add the mac addr into mac list */
5464 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5466 PMD_DRV_LOG(ERR, "failed to allocate memory");
5467 ret = I40E_ERR_NO_MEMORY;
5470 (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
5472 f->mac_info.filter_type = mac_filter->filter_type;
5473 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5484 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
5486 struct i40e_mac_filter *f;
5487 struct i40e_macvlan_filter *mv_f;
5489 enum rte_mac_filter_type filter_type;
5490 int ret = I40E_SUCCESS;
5492 /* Can't find it, return an error */
5493 f = i40e_find_mac_filter(vsi, addr);
5495 return I40E_ERR_PARAM;
5497 vlan_num = vsi->vlan_num;
5498 filter_type = f->mac_info.filter_type;
5499 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
5500 filter_type == RTE_MACVLAN_HASH_MATCH) {
5501 if (vlan_num == 0) {
5502 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
5503 return I40E_ERR_PARAM;
5505 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
5506 filter_type == RTE_MAC_HASH_MATCH)
5509 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
5511 PMD_DRV_LOG(ERR, "failed to allocate memory");
5512 return I40E_ERR_NO_MEMORY;
5515 for (i = 0; i < vlan_num; i++) {
5516 mv_f[i].filter_type = filter_type;
5517 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
5520 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
5521 filter_type == RTE_MACVLAN_HASH_MATCH) {
5522 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
5523 if (ret != I40E_SUCCESS)
5527 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
5528 if (ret != I40E_SUCCESS)
5531 /* Remove the mac addr into mac list */
5532 TAILQ_REMOVE(&vsi->mac_list, f, next);
5542 /* Configure hash enable flags for RSS */
5544 i40e_config_hena(uint64_t flags)
5551 if (flags & ETH_RSS_FRAG_IPV4)
5552 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
5553 if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
5554 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
5555 if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
5556 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5557 if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
5558 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
5559 if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
5560 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
5561 if (flags & ETH_RSS_FRAG_IPV6)
5562 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
5563 if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
5564 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
5565 if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
5566 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
5567 if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
5568 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
5569 if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
5570 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
5571 if (flags & ETH_RSS_L2_PAYLOAD)
5572 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
5577 /* Parse the hash enable flags */
5579 i40e_parse_hena(uint64_t flags)
5581 uint64_t rss_hf = 0;
5585 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
5586 rss_hf |= ETH_RSS_FRAG_IPV4;
5587 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
5588 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
5589 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
5590 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
5591 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
5592 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
5593 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
5594 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
5595 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
5596 rss_hf |= ETH_RSS_FRAG_IPV6;
5597 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
5598 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
5599 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
5600 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
5601 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
5602 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
5603 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
5604 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
5605 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
5606 rss_hf |= ETH_RSS_L2_PAYLOAD;
5613 i40e_pf_disable_rss(struct i40e_pf *pf)
5615 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5618 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5619 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5620 hena &= ~I40E_RSS_HENA_ALL;
5621 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
5622 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
5623 I40E_WRITE_FLUSH(hw);
5627 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
5629 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
5630 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5633 if (!key || key_len != ((I40E_PFQF_HKEY_MAX_INDEX + 1) *
5637 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
5638 struct i40e_aqc_get_set_rss_key_data *key_dw =
5639 (struct i40e_aqc_get_set_rss_key_data *)key;
5641 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
5643 PMD_INIT_LOG(ERR, "Failed to configure RSS key "
5646 uint32_t *hash_key = (uint32_t *)key;
5649 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5650 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
5651 I40E_WRITE_FLUSH(hw);
5658 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
5660 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
5661 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5664 if (!key || !key_len)
5667 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
5668 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
5669 (struct i40e_aqc_get_set_rss_key_data *)key);
5671 PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
5675 uint32_t *key_dw = (uint32_t *)key;
5678 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5679 key_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
5681 *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
5687 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
5689 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5694 ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
5695 rss_conf->rss_key_len);
5699 rss_hf = rss_conf->rss_hf;
5700 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5701 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5702 hena &= ~I40E_RSS_HENA_ALL;
5703 hena |= i40e_config_hena(rss_hf);
5704 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
5705 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
5706 I40E_WRITE_FLUSH(hw);
5712 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
5713 struct rte_eth_rss_conf *rss_conf)
5715 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5716 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5717 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
5720 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5721 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5722 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
5723 if (rss_hf != 0) /* Enable RSS */
5725 return 0; /* Nothing to do */
5728 if (rss_hf == 0) /* Disable RSS */
5731 return i40e_hw_rss_hash_set(pf, rss_conf);
5735 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
5736 struct rte_eth_rss_conf *rss_conf)
5738 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5739 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5742 i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
5743 &rss_conf->rss_key_len);
5745 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5746 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5747 rss_conf->rss_hf = i40e_parse_hena(hena);
5753 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
5755 switch (filter_type) {
5756 case RTE_TUNNEL_FILTER_IMAC_IVLAN:
5757 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
5759 case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
5760 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
5762 case RTE_TUNNEL_FILTER_IMAC_TENID:
5763 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
5765 case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
5766 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
5768 case ETH_TUNNEL_FILTER_IMAC:
5769 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
5772 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
5780 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
5781 struct rte_eth_tunnel_filter_conf *tunnel_filter,
5785 uint8_t tun_type = 0;
5787 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5788 struct i40e_vsi *vsi = pf->main_vsi;
5789 struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
5790 struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
5792 cld_filter = rte_zmalloc("tunnel_filter",
5793 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
5796 if (NULL == cld_filter) {
5797 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
5800 pfilter = cld_filter;
5802 (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
5803 sizeof(struct ether_addr));
5804 (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
5805 sizeof(struct ether_addr));
5807 pfilter->inner_vlan = tunnel_filter->inner_vlan;
5808 if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
5809 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
5810 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
5811 &tunnel_filter->ip_addr,
5812 sizeof(pfilter->ipaddr.v4.data));
5814 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
5815 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
5816 &tunnel_filter->ip_addr,
5817 sizeof(pfilter->ipaddr.v6.data));
5820 /* check tunneled type */
5821 switch (tunnel_filter->tunnel_type) {
5822 case RTE_TUNNEL_TYPE_VXLAN:
5823 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
5825 case RTE_TUNNEL_TYPE_NVGRE:
5826 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
5829 /* Other tunnel types is not supported. */
5830 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
5831 rte_free(cld_filter);
5835 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
5838 rte_free(cld_filter);
5842 pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
5843 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
5844 pfilter->tenant_id = tunnel_filter->tenant_id;
5845 pfilter->queue_number = tunnel_filter->queue_id;
5848 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
5850 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
5853 rte_free(cld_filter);
5858 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
5862 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
5863 if (pf->vxlan_ports[i] == port)
5871 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
5875 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5877 idx = i40e_get_vxlan_port_idx(pf, port);
5879 /* Check if port already exists */
5881 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
5885 /* Now check if there is space to add the new port */
5886 idx = i40e_get_vxlan_port_idx(pf, 0);
5888 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
5889 "not adding port %d", port);
5893 ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
5896 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
5900 PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
5903 /* New port: add it and mark its index in the bitmap */
5904 pf->vxlan_ports[idx] = port;
5905 pf->vxlan_bitmap |= (1 << idx);
5907 if (!(pf->flags & I40E_FLAG_VXLAN))
5908 pf->flags |= I40E_FLAG_VXLAN;
5914 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
5917 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5919 if (!(pf->flags & I40E_FLAG_VXLAN)) {
5920 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
5924 idx = i40e_get_vxlan_port_idx(pf, port);
5927 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
5931 if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
5932 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
5936 PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
5939 pf->vxlan_ports[idx] = 0;
5940 pf->vxlan_bitmap &= ~(1 << idx);
5942 if (!pf->vxlan_bitmap)
5943 pf->flags &= ~I40E_FLAG_VXLAN;
5948 /* Add UDP tunneling port */
5950 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
5951 struct rte_eth_udp_tunnel *udp_tunnel)
5954 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5956 if (udp_tunnel == NULL)
5959 switch (udp_tunnel->prot_type) {
5960 case RTE_TUNNEL_TYPE_VXLAN:
5961 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
5964 case RTE_TUNNEL_TYPE_GENEVE:
5965 case RTE_TUNNEL_TYPE_TEREDO:
5966 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
5971 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5979 /* Remove UDP tunneling port */
5981 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
5982 struct rte_eth_udp_tunnel *udp_tunnel)
5985 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5987 if (udp_tunnel == NULL)
5990 switch (udp_tunnel->prot_type) {
5991 case RTE_TUNNEL_TYPE_VXLAN:
5992 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
5994 case RTE_TUNNEL_TYPE_GENEVE:
5995 case RTE_TUNNEL_TYPE_TEREDO:
5996 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6000 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6008 /* Calculate the maximum number of contiguous PF queues that are configured */
6010 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
6012 struct rte_eth_dev_data *data = pf->dev_data;
6014 struct i40e_rx_queue *rxq;
6017 for (i = 0; i < pf->lan_nb_qps; i++) {
6018 rxq = data->rx_queues[i];
6019 if (rxq && rxq->q_set)
6030 i40e_pf_config_rss(struct i40e_pf *pf)
6032 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6033 struct rte_eth_rss_conf rss_conf;
6034 uint32_t i, lut = 0;
6038 * If both VMDQ and RSS enabled, not all of PF queues are configured.
6039 * It's necessary to calulate the actual PF queues that are configured.
6041 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
6042 num = i40e_pf_calc_configured_queues_num(pf);
6044 num = pf->dev_data->nb_rx_queues;
6046 num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
6047 PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
6051 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
6055 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
6058 lut = (lut << 8) | (j & ((0x1 <<
6059 hw->func_caps.rss_table_entry_width) - 1));
6061 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
6064 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
6065 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
6066 i40e_pf_disable_rss(pf);
6069 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
6070 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
6071 /* Random default keys */
6072 static uint32_t rss_key_default[] = {0x6b793944,
6073 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
6074 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
6075 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
6077 rss_conf.rss_key = (uint8_t *)rss_key_default;
6078 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6082 return i40e_hw_rss_hash_set(pf, &rss_conf);
6086 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
6087 struct rte_eth_tunnel_filter_conf *filter)
6089 if (pf == NULL || filter == NULL) {
6090 PMD_DRV_LOG(ERR, "Invalid parameter");
6094 if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
6095 PMD_DRV_LOG(ERR, "Invalid queue ID");
6099 if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
6100 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
6104 if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
6105 (is_zero_ether_addr(filter->outer_mac))) {
6106 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
6110 if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
6111 (is_zero_ether_addr(filter->inner_mac))) {
6112 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
6119 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
6120 #define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
6122 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
6127 val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
6128 PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
6131 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
6132 } else if (len == 4) {
6133 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
6135 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
6140 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
6147 PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
6148 I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
6154 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
6161 switch (cfg->cfg_type) {
6162 case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
6163 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
6166 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
6174 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
6175 enum rte_filter_op filter_op,
6178 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6179 int ret = I40E_ERR_PARAM;
6181 switch (filter_op) {
6182 case RTE_ETH_FILTER_SET:
6183 ret = i40e_dev_global_config_set(hw,
6184 (struct rte_eth_global_cfg *)arg);
6187 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6195 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
6196 enum rte_filter_op filter_op,
6199 struct rte_eth_tunnel_filter_conf *filter;
6200 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6201 int ret = I40E_SUCCESS;
6203 filter = (struct rte_eth_tunnel_filter_conf *)(arg);
6205 if (i40e_tunnel_filter_param_check(pf, filter) < 0)
6206 return I40E_ERR_PARAM;
6208 switch (filter_op) {
6209 case RTE_ETH_FILTER_NOP:
6210 if (!(pf->flags & I40E_FLAG_VXLAN))
6211 ret = I40E_NOT_SUPPORTED;
6213 case RTE_ETH_FILTER_ADD:
6214 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
6216 case RTE_ETH_FILTER_DELETE:
6217 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
6220 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6221 ret = I40E_ERR_PARAM;
6229 i40e_pf_config_mq_rx(struct i40e_pf *pf)
6232 enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
6235 if (mq_mode & ETH_MQ_RX_RSS_FLAG)
6236 ret = i40e_pf_config_rss(pf);
6238 i40e_pf_disable_rss(pf);
6243 /* Get the symmetric hash enable configurations per port */
6245 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
6247 uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
6249 *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
6252 /* Set the symmetric hash enable configurations per port */
6254 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
6256 uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
6259 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
6260 PMD_DRV_LOG(INFO, "Symmetric hash has already "
6264 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6266 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
6267 PMD_DRV_LOG(INFO, "Symmetric hash has already "
6271 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6273 I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg);
6274 I40E_WRITE_FLUSH(hw);
6278 * Get global configurations of hash function type and symmetric hash enable
6279 * per flow type (pctype). Note that global configuration means it affects all
6280 * the ports on the same NIC.
6283 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
6284 struct rte_eth_hash_global_conf *g_cfg)
6286 uint32_t reg, mask = I40E_FLOW_TYPES;
6288 enum i40e_filter_pctype pctype;
6290 memset(g_cfg, 0, sizeof(*g_cfg));
6291 reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
6292 if (reg & I40E_GLQF_CTL_HTOEP_MASK)
6293 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
6295 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
6296 PMD_DRV_LOG(DEBUG, "Hash function is %s",
6297 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
6299 for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
6300 if (!(mask & (1UL << i)))
6302 mask &= ~(1UL << i);
6303 /* Bit set indicats the coresponding flow type is supported */
6304 g_cfg->valid_bit_mask[0] |= (1UL << i);
6305 pctype = i40e_flowtype_to_pctype(i);
6306 reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype));
6307 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
6308 g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
6315 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
6318 uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
6320 if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
6321 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
6322 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
6323 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
6329 * As i40e supports less than 32 flow types, only first 32 bits need to
6332 mask0 = g_cfg->valid_bit_mask[0];
6333 for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
6335 /* Check if any unsupported flow type configured */
6336 if ((mask0 | i40e_mask) ^ i40e_mask)
6339 if (g_cfg->valid_bit_mask[i])
6347 PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
6353 * Set global configurations of hash function type and symmetric hash enable
6354 * per flow type (pctype). Note any modifying global configuration will affect
6355 * all the ports on the same NIC.
6358 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
6359 struct rte_eth_hash_global_conf *g_cfg)
6364 uint32_t mask0 = g_cfg->valid_bit_mask[0];
6365 enum i40e_filter_pctype pctype;
6367 /* Check the input parameters */
6368 ret = i40e_hash_global_config_check(g_cfg);
6372 for (i = 0; mask0 && i < UINT32_BIT; i++) {
6373 if (!(mask0 & (1UL << i)))
6375 mask0 &= ~(1UL << i);
6376 pctype = i40e_flowtype_to_pctype(i);
6377 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
6378 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
6379 I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg);
6382 reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
6383 if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
6385 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
6386 PMD_DRV_LOG(DEBUG, "Hash function already set to "
6390 reg |= I40E_GLQF_CTL_HTOEP_MASK;
6391 } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
6393 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
6394 PMD_DRV_LOG(DEBUG, "Hash function already set to "
6398 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
6400 /* Use the default, and keep it as it is */
6403 I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg);
6406 I40E_WRITE_FLUSH(hw);
6412 * Valid input sets for hash and flow director filters per PCTYPE
6415 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
6416 enum rte_filter_type filter)
6420 static const uint64_t valid_hash_inset_table[] = {
6421 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
6422 I40E_INSET_DMAC | I40E_INSET_SMAC |
6423 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6424 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
6425 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
6426 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6427 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6428 I40E_INSET_FLEX_PAYLOAD,
6429 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
6430 I40E_INSET_DMAC | I40E_INSET_SMAC |
6431 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6432 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6433 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6434 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6435 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6436 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6437 I40E_INSET_FLEX_PAYLOAD,
6438 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
6439 I40E_INSET_DMAC | I40E_INSET_SMAC |
6440 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6441 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6442 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6443 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6444 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6445 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6446 I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
6447 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
6448 I40E_INSET_DMAC | I40E_INSET_SMAC |
6449 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6450 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6451 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6452 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6453 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6454 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6455 I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
6456 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
6457 I40E_INSET_DMAC | I40E_INSET_SMAC |
6458 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6459 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6460 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6461 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6462 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6463 I40E_INSET_FLEX_PAYLOAD,
6464 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
6465 I40E_INSET_DMAC | I40E_INSET_SMAC |
6466 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6467 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6468 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6469 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
6470 I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
6471 I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
6472 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
6473 I40E_INSET_DMAC | I40E_INSET_SMAC |
6474 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6475 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6476 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6477 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6478 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
6479 I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
6480 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
6481 I40E_INSET_DMAC | I40E_INSET_SMAC |
6482 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6483 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6484 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6485 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6486 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
6487 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
6488 I40E_INSET_FLEX_PAYLOAD,
6489 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
6490 I40E_INSET_DMAC | I40E_INSET_SMAC |
6491 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6492 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6493 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6494 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6495 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
6496 I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
6497 I40E_INSET_FLEX_PAYLOAD,
6498 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
6499 I40E_INSET_DMAC | I40E_INSET_SMAC |
6500 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6501 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6502 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6503 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6504 I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
6505 I40E_INSET_FLEX_PAYLOAD,
6506 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
6507 I40E_INSET_DMAC | I40E_INSET_SMAC |
6508 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6509 I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
6510 I40E_INSET_FLEX_PAYLOAD,
6514 * Flow director supports only fields defined in
6515 * union rte_eth_fdir_flow.
6517 static const uint64_t valid_fdir_inset_table[] = {
6518 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
6519 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6520 I40E_INSET_FLEX_PAYLOAD,
6521 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
6522 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6523 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6524 I40E_INSET_FLEX_PAYLOAD,
6525 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
6526 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6527 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6528 I40E_INSET_FLEX_PAYLOAD,
6529 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
6530 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6531 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6532 I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
6533 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
6534 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6535 I40E_INSET_FLEX_PAYLOAD,
6536 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
6537 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6538 I40E_INSET_FLEX_PAYLOAD,
6539 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
6540 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6541 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6542 I40E_INSET_FLEX_PAYLOAD,
6543 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
6544 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6545 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6546 I40E_INSET_FLEX_PAYLOAD,
6547 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
6548 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6549 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6550 I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
6551 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
6552 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6553 I40E_INSET_FLEX_PAYLOAD,
6554 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
6555 I40E_INSET_LAST_ETHER_TYPE | I40E_INSET_FLEX_PAYLOAD,
6558 if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
6560 if (filter == RTE_ETH_FILTER_HASH)
6561 valid = valid_hash_inset_table[pctype];
6563 valid = valid_fdir_inset_table[pctype];
6569 * Validate if the input set is allowed for a specific PCTYPE
6572 i40e_validate_input_set(enum i40e_filter_pctype pctype,
6573 enum rte_filter_type filter, uint64_t inset)
6577 valid = i40e_get_valid_input_set(pctype, filter);
6578 if (inset & (~valid))
6584 /* default input set fields combination per pctype */
6586 i40e_get_default_input_set(uint16_t pctype)
6588 static const uint64_t default_inset_table[] = {
6589 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
6590 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
6591 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
6592 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6593 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6594 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
6595 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6596 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6597 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
6598 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6599 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6601 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
6602 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
6603 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
6604 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
6605 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
6606 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6607 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6608 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
6609 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6610 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6611 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
6612 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6613 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6615 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
6616 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
6617 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
6618 I40E_INSET_LAST_ETHER_TYPE,
6621 if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
6624 return default_inset_table[pctype];
6628 * Parse the input set from index to logical bit masks
6631 i40e_parse_input_set(uint64_t *inset,
6632 enum i40e_filter_pctype pctype,
6633 enum rte_eth_input_set_field *field,
6639 static const struct {
6640 enum rte_eth_input_set_field field;
6642 } inset_convert_table[] = {
6643 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
6644 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
6645 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
6646 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
6647 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
6648 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
6649 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
6650 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
6651 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
6652 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
6653 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
6654 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
6655 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
6656 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
6657 I40E_INSET_IPV6_NEXT_HDR},
6658 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
6659 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
6660 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
6661 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
6662 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
6663 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
6664 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
6665 I40E_INSET_SCTP_VT},
6666 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
6667 I40E_INSET_TUNNEL_DMAC},
6668 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
6669 I40E_INSET_VLAN_TUNNEL},
6670 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
6671 I40E_INSET_TUNNEL_ID},
6672 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
6673 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
6674 I40E_INSET_FLEX_PAYLOAD_W1},
6675 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
6676 I40E_INSET_FLEX_PAYLOAD_W2},
6677 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
6678 I40E_INSET_FLEX_PAYLOAD_W3},
6679 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
6680 I40E_INSET_FLEX_PAYLOAD_W4},
6681 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
6682 I40E_INSET_FLEX_PAYLOAD_W5},
6683 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
6684 I40E_INSET_FLEX_PAYLOAD_W6},
6685 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
6686 I40E_INSET_FLEX_PAYLOAD_W7},
6687 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
6688 I40E_INSET_FLEX_PAYLOAD_W8},
6691 if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
6694 /* Only one item allowed for default or all */
6696 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
6697 *inset = i40e_get_default_input_set(pctype);
6699 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
6700 *inset = I40E_INSET_NONE;
6705 for (i = 0, *inset = 0; i < size; i++) {
6706 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
6707 if (field[i] == inset_convert_table[j].field) {
6708 *inset |= inset_convert_table[j].inset;
6713 /* It contains unsupported input set, return immediately */
6714 if (j == RTE_DIM(inset_convert_table))
6722 * Translate the input set from bit masks to register aware bit masks
6726 i40e_translate_input_set_reg(uint64_t input)
6731 static const struct {
6735 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
6736 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
6737 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
6738 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
6739 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
6740 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
6741 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
6742 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
6743 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
6744 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
6745 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
6746 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
6747 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
6748 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
6749 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
6750 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
6751 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
6752 {I40E_INSET_TUNNEL_DMAC,
6753 I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
6754 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
6755 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
6756 {I40E_INSET_TUNNEL_SRC_PORT,
6757 I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
6758 {I40E_INSET_TUNNEL_DST_PORT,
6759 I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
6760 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
6761 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
6762 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
6763 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
6764 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
6765 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
6766 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
6767 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
6768 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
6774 /* Translate input set to register aware inset */
6775 for (i = 0; i < RTE_DIM(inset_map); i++) {
6776 if (input & inset_map[i].inset)
6777 val |= inset_map[i].inset_reg;
6784 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
6788 static const struct {
6791 } inset_mask_map[] = {
6792 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
6793 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
6794 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
6795 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
6798 if (!inset || !mask || !nb_elem)
6801 if (!inset && nb_elem >= I40E_INSET_MASK_NUM_REG) {
6802 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++)
6804 return I40E_INSET_MASK_NUM_REG;
6807 for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
6810 if (inset & inset_mask_map[i].inset) {
6811 mask[idx] = inset_mask_map[i].mask;
6820 i40e_get_reg_inset(struct i40e_hw *hw, enum rte_filter_type filter,
6821 enum i40e_filter_pctype pctype)
6825 if (filter == RTE_ETH_FILTER_HASH) {
6826 reg = I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(1, pctype));
6827 reg <<= I40E_32_BIT_WIDTH;
6828 reg |= I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(0, pctype));
6829 } else if (filter == RTE_ETH_FILTER_FDIR) {
6830 reg = I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 1));
6831 reg <<= I40E_32_BIT_WIDTH;
6832 reg |= I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 0));
6839 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
6841 uint32_t reg = I40E_READ_REG(hw, addr);
6843 PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
6845 I40E_WRITE_REG(hw, addr, val);
6846 PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
6847 (uint32_t)I40E_READ_REG(hw, addr));
6851 i40e_set_hash_inset_mask(struct i40e_hw *hw,
6852 enum i40e_filter_pctype pctype,
6853 enum rte_filter_input_set_op op,
6860 if (!mask_reg || num > RTE_ETH_INPUT_SET_SELECT)
6863 if (op == RTE_ETH_INPUT_SET_SELECT) {
6864 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
6865 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
6869 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
6872 } else if (op == RTE_ETH_INPUT_SET_ADD) {
6873 uint8_t j, count = 0;
6875 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
6876 reg = I40E_READ_REG(hw, I40E_GLQF_HASH_MSK(i, pctype));
6877 if (reg & I40E_GLQF_HASH_MSK_FIELD)
6880 if (count + num > I40E_INSET_MASK_NUM_REG)
6883 for (i = count, j = 0; i < I40E_INSET_MASK_NUM_REG; i++, j++)
6884 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
6892 i40e_set_fd_inset_mask(struct i40e_hw *hw,
6893 enum i40e_filter_pctype pctype,
6894 enum rte_filter_input_set_op op,
6901 if (!mask_reg || num > RTE_ETH_INPUT_SET_SELECT)
6904 if (op == RTE_ETH_INPUT_SET_SELECT) {
6905 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
6906 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
6910 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
6913 } else if (op == RTE_ETH_INPUT_SET_ADD) {
6914 uint8_t j, count = 0;
6916 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
6917 reg = I40E_READ_REG(hw, I40E_GLQF_FD_MSK(i, pctype));
6918 if (reg & I40E_GLQF_FD_MSK_FIELD)
6921 if (count + num > I40E_INSET_MASK_NUM_REG)
6924 for (i = count, j = 0; i < I40E_INSET_MASK_NUM_REG; i++, j++)
6925 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
6933 i40e_filter_inset_select(struct i40e_hw *hw,
6934 struct rte_eth_input_set_conf *conf,
6935 enum rte_filter_type filter)
6937 enum i40e_filter_pctype pctype;
6938 uint64_t inset_reg = 0, input_set;
6939 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG];
6944 PMD_DRV_LOG(ERR, "Invalid pointer");
6948 pctype = i40e_flowtype_to_pctype(conf->flow_type);
6949 if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
6950 PMD_DRV_LOG(ERR, "Not supported flow type (%u)",
6954 if (filter != RTE_ETH_FILTER_HASH && filter != RTE_ETH_FILTER_FDIR) {
6955 PMD_DRV_LOG(ERR, "Not supported filter type (%u)", filter);
6959 ret = i40e_parse_input_set(&input_set, pctype, conf->field,
6962 PMD_DRV_LOG(ERR, "Failed to parse input set");
6965 if (i40e_validate_input_set(pctype, filter, input_set) != 0) {
6966 PMD_DRV_LOG(ERR, "Invalid input set");
6970 if (conf->op == RTE_ETH_INPUT_SET_ADD) {
6971 inset_reg |= i40e_get_reg_inset(hw, filter, pctype);
6972 } else if (conf->op != RTE_ETH_INPUT_SET_SELECT) {
6973 PMD_DRV_LOG(ERR, "Unsupported input set operation");
6976 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
6977 I40E_INSET_MASK_NUM_REG);
6978 inset_reg |= i40e_translate_input_set_reg(input_set);
6980 if (filter == RTE_ETH_FILTER_HASH) {
6981 ret = i40e_set_hash_inset_mask(hw, pctype, conf->op, mask_reg,
6986 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
6987 (uint32_t)(inset_reg & UINT32_MAX));
6988 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
6989 (uint32_t)((inset_reg >>
6990 I40E_32_BIT_WIDTH) & UINT32_MAX));
6991 } else if (filter == RTE_ETH_FILTER_FDIR) {
6992 ret = i40e_set_fd_inset_mask(hw, pctype, conf->op, mask_reg,
6997 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
6998 (uint32_t)(inset_reg & UINT32_MAX));
6999 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
7000 (uint32_t)((inset_reg >>
7001 I40E_32_BIT_WIDTH) & UINT32_MAX));
7003 PMD_DRV_LOG(ERR, "Not supported filter type (%u)", filter);
7006 I40E_WRITE_FLUSH(hw);
7012 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7017 PMD_DRV_LOG(ERR, "Invalid pointer");
7021 switch (info->info_type) {
7022 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7023 i40e_get_symmetric_hash_enable_per_port(hw,
7024 &(info->info.enable));
7026 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7027 ret = i40e_get_hash_filter_global_config(hw,
7028 &(info->info.global_conf));
7031 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7041 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7046 PMD_DRV_LOG(ERR, "Invalid pointer");
7050 switch (info->info_type) {
7051 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7052 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
7054 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7055 ret = i40e_set_hash_filter_global_config(hw,
7056 &(info->info.global_conf));
7058 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
7059 ret = i40e_filter_inset_select(hw,
7060 &(info->info.input_set_conf),
7061 RTE_ETH_FILTER_HASH);
7065 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7074 /* Operations for hash function */
7076 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
7077 enum rte_filter_op filter_op,
7080 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7083 switch (filter_op) {
7084 case RTE_ETH_FILTER_NOP:
7086 case RTE_ETH_FILTER_GET:
7087 ret = i40e_hash_filter_get(hw,
7088 (struct rte_eth_hash_filter_info *)arg);
7090 case RTE_ETH_FILTER_SET:
7091 ret = i40e_hash_filter_set(hw,
7092 (struct rte_eth_hash_filter_info *)arg);
7095 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
7105 * Configure ethertype filter, which can director packet by filtering
7106 * with mac address and ether_type or only ether_type
7109 i40e_ethertype_filter_set(struct i40e_pf *pf,
7110 struct rte_eth_ethertype_filter *filter,
7113 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7114 struct i40e_control_filter_stats stats;
7118 if (filter->queue >= pf->dev_data->nb_rx_queues) {
7119 PMD_DRV_LOG(ERR, "Invalid queue ID");
7122 if (filter->ether_type == ETHER_TYPE_IPv4 ||
7123 filter->ether_type == ETHER_TYPE_IPv6) {
7124 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
7125 " control packet filter.", filter->ether_type);
7128 if (filter->ether_type == ETHER_TYPE_VLAN)
7129 PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
7132 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
7133 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
7134 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
7135 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
7136 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
7138 memset(&stats, 0, sizeof(stats));
7139 ret = i40e_aq_add_rem_control_packet_filter(hw,
7140 filter->mac_addr.addr_bytes,
7141 filter->ether_type, flags,
7143 filter->queue, add, &stats, NULL);
7145 PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
7146 " mac_etype_used = %u, etype_used = %u,"
7147 " mac_etype_free = %u, etype_free = %u\n",
7148 ret, stats.mac_etype_used, stats.etype_used,
7149 stats.mac_etype_free, stats.etype_free);
7156 * Handle operations for ethertype filter.
7159 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
7160 enum rte_filter_op filter_op,
7163 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7166 if (filter_op == RTE_ETH_FILTER_NOP)
7170 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
7175 switch (filter_op) {
7176 case RTE_ETH_FILTER_ADD:
7177 ret = i40e_ethertype_filter_set(pf,
7178 (struct rte_eth_ethertype_filter *)arg,
7181 case RTE_ETH_FILTER_DELETE:
7182 ret = i40e_ethertype_filter_set(pf,
7183 (struct rte_eth_ethertype_filter *)arg,
7187 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
7195 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
7196 enum rte_filter_type filter_type,
7197 enum rte_filter_op filter_op,
7205 switch (filter_type) {
7206 case RTE_ETH_FILTER_NONE:
7207 /* For global configuration */
7208 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
7210 case RTE_ETH_FILTER_HASH:
7211 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
7213 case RTE_ETH_FILTER_MACVLAN:
7214 ret = i40e_mac_filter_handle(dev, filter_op, arg);
7216 case RTE_ETH_FILTER_ETHERTYPE:
7217 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
7219 case RTE_ETH_FILTER_TUNNEL:
7220 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
7222 case RTE_ETH_FILTER_FDIR:
7223 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
7226 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
7236 * As some registers wouldn't be reset unless a global hardware reset,
7237 * hardware initialization is needed to put those registers into an
7238 * expected initial state.
7241 i40e_hw_init(struct i40e_hw *hw)
7243 /* clear the PF Queue Filter control register */
7244 I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0);
7246 /* Disable symmetric hash per port */
7247 i40e_set_symmetric_hash_enable_per_port(hw, 0);
7250 enum i40e_filter_pctype
7251 i40e_flowtype_to_pctype(uint16_t flow_type)
7253 static const enum i40e_filter_pctype pctype_table[] = {
7254 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
7255 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
7256 I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7257 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
7258 I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7259 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
7260 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7261 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
7262 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7263 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
7264 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
7265 I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
7266 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
7267 I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
7268 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
7269 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
7270 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
7271 I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
7272 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
7275 return pctype_table[flow_type];
7279 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
7281 static const uint16_t flowtype_table[] = {
7282 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
7283 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7284 RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
7285 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7286 RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
7287 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7288 RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
7289 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7290 RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
7291 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
7292 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7293 RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
7294 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7295 RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
7296 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7297 RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
7298 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7299 RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
7300 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
7303 return flowtype_table[pctype];
7307 * On X710, performance number is far from the expectation on recent firmware
7308 * versions; on XL710, performance number is also far from the expectation on
7309 * recent firmware versions, if promiscuous mode is disabled, or promiscuous
7310 * mode is enabled and port MAC address is equal to the packet destination MAC
7311 * address. The fix for this issue may not be integrated in the following
7312 * firmware version. So the workaround in software driver is needed. It needs
7313 * to modify the initial values of 3 internal only registers for both X710 and
7314 * XL710. Note that the values for X710 or XL710 could be different, and the
7315 * workaround can be removed when it is fixed in firmware in the future.
7318 /* For both X710 and XL710 */
7319 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
7320 #define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
7322 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
7323 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
7326 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303
7328 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
7329 #define I40E_GL_SWR_PM_UP_THR 0x269FBC
7332 i40e_configure_registers(struct i40e_hw *hw)
7338 {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
7339 {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
7340 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
7346 for (i = 0; i < RTE_DIM(reg_table); i++) {
7347 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
7348 if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
7350 I40E_GL_SWR_PM_UP_THR_SF_VALUE;
7353 I40E_GL_SWR_PM_UP_THR_EF_VALUE;
7356 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
7359 PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
7363 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
7364 reg_table[i].addr, reg);
7365 if (reg == reg_table[i].val)
7368 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
7369 reg_table[i].val, NULL);
7371 PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
7372 "address of 0x%"PRIx32, reg_table[i].val,
7376 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
7377 "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
7381 #define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
7382 #define I40E_VSI_TSR_QINQ_CONFIG 0xc030
7383 #define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4))
7384 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
7386 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
7391 if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
7392 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
7396 /* Configure for double VLAN RX stripping */
7397 reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
7398 if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
7399 reg |= I40E_VSI_TSR_QINQ_CONFIG;
7400 ret = i40e_aq_debug_write_register(hw,
7401 I40E_VSI_TSR(vsi->vsi_id),
7404 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
7406 return I40E_ERR_CONFIG;
7410 /* Configure for double VLAN TX insertion */
7411 reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
7412 if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
7413 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
7414 ret = i40e_aq_debug_write_register(hw,
7415 I40E_VSI_L2TAGSTXVALID(
7416 vsi->vsi_id), reg, NULL);
7418 PMD_DRV_LOG(ERR, "Failed to update "
7419 "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
7420 return I40E_ERR_CONFIG;
7428 * i40e_aq_add_mirror_rule
7429 * @hw: pointer to the hardware structure
7430 * @seid: VEB seid to add mirror rule to
7431 * @dst_id: destination vsi seid
7432 * @entries: Buffer which contains the entities to be mirrored
7433 * @count: number of entities contained in the buffer
7434 * @rule_id:the rule_id of the rule to be added
7436 * Add a mirror rule for a given veb.
7439 static enum i40e_status_code
7440 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
7441 uint16_t seid, uint16_t dst_id,
7442 uint16_t rule_type, uint16_t *entries,
7443 uint16_t count, uint16_t *rule_id)
7445 struct i40e_aq_desc desc;
7446 struct i40e_aqc_add_delete_mirror_rule cmd;
7447 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
7448 (struct i40e_aqc_add_delete_mirror_rule_completion *)
7451 enum i40e_status_code status;
7453 i40e_fill_default_direct_cmd_desc(&desc,
7454 i40e_aqc_opc_add_mirror_rule);
7455 memset(&cmd, 0, sizeof(cmd));
7457 buff_len = sizeof(uint16_t) * count;
7458 desc.datalen = rte_cpu_to_le_16(buff_len);
7460 desc.flags |= rte_cpu_to_le_16(
7461 (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
7462 cmd.rule_type = rte_cpu_to_le_16(rule_type <<
7463 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
7464 cmd.num_entries = rte_cpu_to_le_16(count);
7465 cmd.seid = rte_cpu_to_le_16(seid);
7466 cmd.destination = rte_cpu_to_le_16(dst_id);
7468 rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
7469 status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
7470 PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
7472 " mirror_rules_used = %u, mirror_rules_free = %u,",
7473 hw->aq.asq_last_status, resp->rule_id,
7474 resp->mirror_rules_used, resp->mirror_rules_free);
7475 *rule_id = rte_le_to_cpu_16(resp->rule_id);
7481 * i40e_aq_del_mirror_rule
7482 * @hw: pointer to the hardware structure
7483 * @seid: VEB seid to add mirror rule to
7484 * @entries: Buffer which contains the entities to be mirrored
7485 * @count: number of entities contained in the buffer
7486 * @rule_id:the rule_id of the rule to be delete
7488 * Delete a mirror rule for a given veb.
7491 static enum i40e_status_code
7492 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
7493 uint16_t seid, uint16_t rule_type, uint16_t *entries,
7494 uint16_t count, uint16_t rule_id)
7496 struct i40e_aq_desc desc;
7497 struct i40e_aqc_add_delete_mirror_rule cmd;
7498 uint16_t buff_len = 0;
7499 enum i40e_status_code status;
7502 i40e_fill_default_direct_cmd_desc(&desc,
7503 i40e_aqc_opc_delete_mirror_rule);
7504 memset(&cmd, 0, sizeof(cmd));
7505 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
7506 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
7508 cmd.num_entries = count;
7509 buff_len = sizeof(uint16_t) * count;
7510 desc.datalen = rte_cpu_to_le_16(buff_len);
7511 buff = (void *)entries;
7513 /* rule id is filled in destination field for deleting mirror rule */
7514 cmd.destination = rte_cpu_to_le_16(rule_id);
7516 cmd.rule_type = rte_cpu_to_le_16(rule_type <<
7517 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
7518 cmd.seid = rte_cpu_to_le_16(seid);
7520 rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
7521 status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
7527 * i40e_mirror_rule_set
7528 * @dev: pointer to the hardware structure
7529 * @mirror_conf: mirror rule info
7530 * @sw_id: mirror rule's sw_id
7531 * @on: enable/disable
7533 * set a mirror rule.
7537 i40e_mirror_rule_set(struct rte_eth_dev *dev,
7538 struct rte_eth_mirror_conf *mirror_conf,
7539 uint8_t sw_id, uint8_t on)
7541 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7542 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7543 struct i40e_mirror_rule *it, *mirr_rule = NULL;
7544 struct i40e_mirror_rule *parent = NULL;
7545 uint16_t seid, dst_seid, rule_id;
7549 PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
7551 if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
7552 PMD_DRV_LOG(ERR, "mirror rule can not be configured"
7553 " without veb or vfs.");
7556 if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
7557 PMD_DRV_LOG(ERR, "mirror table is full.");
7560 if (mirror_conf->dst_pool > pf->vf_num) {
7561 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
7562 mirror_conf->dst_pool);
7566 seid = pf->main_vsi->veb->seid;
7568 TAILQ_FOREACH(it, &pf->mirror_list, rules) {
7569 if (sw_id <= it->index) {
7575 if (mirr_rule && sw_id == mirr_rule->index) {
7577 PMD_DRV_LOG(ERR, "mirror rule exists.");
7580 ret = i40e_aq_del_mirror_rule(hw, seid,
7581 mirr_rule->rule_type,
7583 mirr_rule->num_entries, mirr_rule->id);
7585 PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
7586 " ret = %d, aq_err = %d.",
7587 ret, hw->aq.asq_last_status);
7590 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
7591 rte_free(mirr_rule);
7592 pf->nb_mirror_rule--;
7596 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
7600 mirr_rule = rte_zmalloc("i40e_mirror_rule",
7601 sizeof(struct i40e_mirror_rule) , 0);
7603 PMD_DRV_LOG(ERR, "failed to allocate memory");
7604 return I40E_ERR_NO_MEMORY;
7606 switch (mirror_conf->rule_type) {
7607 case ETH_MIRROR_VLAN:
7608 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
7609 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
7610 mirr_rule->entries[j] =
7611 mirror_conf->vlan.vlan_id[i];
7616 PMD_DRV_LOG(ERR, "vlan is not specified.");
7617 rte_free(mirr_rule);
7620 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
7622 case ETH_MIRROR_VIRTUAL_POOL_UP:
7623 case ETH_MIRROR_VIRTUAL_POOL_DOWN:
7624 /* check if the specified pool bit is out of range */
7625 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
7626 PMD_DRV_LOG(ERR, "pool mask is out of range.");
7627 rte_free(mirr_rule);
7630 for (i = 0, j = 0; i < pf->vf_num; i++) {
7631 if (mirror_conf->pool_mask & (1ULL << i)) {
7632 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
7636 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
7637 /* add pf vsi to entries */
7638 mirr_rule->entries[j] = pf->main_vsi_seid;
7642 PMD_DRV_LOG(ERR, "pool is not specified.");
7643 rte_free(mirr_rule);
7646 /* egress and ingress in aq commands means from switch but not port */
7647 mirr_rule->rule_type =
7648 (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
7649 I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
7650 I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
7652 case ETH_MIRROR_UPLINK_PORT:
7653 /* egress and ingress in aq commands means from switch but not port*/
7654 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
7656 case ETH_MIRROR_DOWNLINK_PORT:
7657 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
7660 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
7661 mirror_conf->rule_type);
7662 rte_free(mirr_rule);
7666 /* If the dst_pool is equal to vf_num, consider it as PF */
7667 if (mirror_conf->dst_pool == pf->vf_num)
7668 dst_seid = pf->main_vsi_seid;
7670 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
7672 ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
7673 mirr_rule->rule_type, mirr_rule->entries,
7676 PMD_DRV_LOG(ERR, "failed to add mirror rule:"
7677 " ret = %d, aq_err = %d.",
7678 ret, hw->aq.asq_last_status);
7679 rte_free(mirr_rule);
7683 mirr_rule->index = sw_id;
7684 mirr_rule->num_entries = j;
7685 mirr_rule->id = rule_id;
7686 mirr_rule->dst_vsi_seid = dst_seid;
7689 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
7691 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
7693 pf->nb_mirror_rule++;
7698 * i40e_mirror_rule_reset
7699 * @dev: pointer to the device
7700 * @sw_id: mirror rule's sw_id
7702 * reset a mirror rule.
7706 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
7708 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7709 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7710 struct i40e_mirror_rule *it, *mirr_rule = NULL;
7714 PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
7716 seid = pf->main_vsi->veb->seid;
7718 TAILQ_FOREACH(it, &pf->mirror_list, rules) {
7719 if (sw_id == it->index) {
7725 ret = i40e_aq_del_mirror_rule(hw, seid,
7726 mirr_rule->rule_type,
7728 mirr_rule->num_entries, mirr_rule->id);
7730 PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
7731 " status = %d, aq_err = %d.",
7732 ret, hw->aq.asq_last_status);
7735 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
7736 rte_free(mirr_rule);
7737 pf->nb_mirror_rule--;
7739 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
7746 i40e_timesync_enable(struct rte_eth_dev *dev)
7748 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7749 struct rte_eth_link *link = &dev->data->dev_link;
7750 uint32_t tsync_ctl_l;
7751 uint32_t tsync_ctl_h;
7752 uint32_t tsync_inc_l;
7753 uint32_t tsync_inc_h;
7755 switch (link->link_speed) {
7756 case ETH_LINK_SPEED_40G:
7757 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
7758 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
7760 case ETH_LINK_SPEED_10G:
7761 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
7762 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
7764 case ETH_LINK_SPEED_1000:
7765 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
7766 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
7773 /* Clear timesync registers. */
7774 I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
7775 I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
7776 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(0));
7777 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(1));
7778 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(2));
7779 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(3));
7780 I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
7782 /* Set the timesync increment value. */
7783 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
7784 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
7786 /* Enable timestamping of PTP packets. */
7787 tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
7788 tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
7790 tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
7791 tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
7792 tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
7794 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
7795 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
7801 i40e_timesync_disable(struct rte_eth_dev *dev)
7803 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7804 uint32_t tsync_ctl_l;
7805 uint32_t tsync_ctl_h;
7807 /* Disable timestamping of transmitted PTP packets. */
7808 tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
7809 tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
7811 tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
7812 tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
7814 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
7815 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
7817 /* Set the timesync increment value. */
7818 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
7819 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
7825 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
7826 struct timespec *timestamp, uint32_t flags)
7828 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7829 uint32_t sync_status;
7832 uint32_t index = flags & 0x03;
7834 sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
7835 if ((sync_status & (1 << index)) == 0)
7838 rx_stmpl = I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
7839 rx_stmph = I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index));
7841 timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
7842 timestamp->tv_nsec = 0;
7848 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
7849 struct timespec *timestamp)
7851 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7852 uint32_t sync_status;
7856 sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
7857 if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
7860 tx_stmpl = I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
7861 tx_stmph = I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
7863 timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
7864 timestamp->tv_nsec = 0;
7870 * i40e_parse_dcb_configure - parse dcb configure from user
7871 * @dev: the device being configured
7872 * @dcb_cfg: pointer of the result of parse
7873 * @*tc_map: bit map of enabled traffic classes
7875 * Returns 0 on success, negative value on failure
7878 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
7879 struct i40e_dcbx_config *dcb_cfg,
7882 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
7883 uint8_t i, tc_bw, bw_lf;
7885 memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
7887 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7888 if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
7889 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
7893 /* assume each tc has the same bw */
7894 tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
7895 for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
7896 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
7897 /* to ensure the sum of tcbw is equal to 100 */
7898 bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
7899 for (i = 0; i < bw_lf; i++)
7900 dcb_cfg->etscfg.tcbwtable[i]++;
7902 /* assume each tc has the same Transmission Selection Algorithm */
7903 for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
7904 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
7906 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
7907 dcb_cfg->etscfg.prioritytable[i] =
7908 dcb_rx_conf->dcb_tc[i];
7910 /* FW needs one App to configure HW */
7911 dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
7912 dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
7913 dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
7914 dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
7916 if (dcb_rx_conf->nb_tcs == 0)
7917 *tc_map = 1; /* tc0 only */
7919 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
7921 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
7922 dcb_cfg->pfc.willing = 0;
7923 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
7924 dcb_cfg->pfc.pfcenable = *tc_map;
7930 * i40e_vsi_get_bw_info - Query VSI BW Information
7931 * @vsi: the VSI being queried
7933 * Returns 0 on success, negative value on failure
7936 i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
7938 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
7939 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
7940 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7944 /* Get the VSI level BW configuration */
7945 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
7948 "couldn't get PF vsi bw config, err %s aq_err %s\n",
7949 i40e_stat_str(hw, ret),
7950 i40e_aq_str(hw, hw->aq.asq_last_status));
7954 /* Get the VSI level BW configuration per TC */
7955 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
7959 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
7960 i40e_stat_str(hw, ret),
7961 i40e_aq_str(hw, hw->aq.asq_last_status));
7965 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
7966 PMD_INIT_LOG(WARNING,
7967 "Enabled TCs mismatch from querying VSI BW info"
7968 " 0x%08x 0x%08x\n", bw_config.tc_valid_bits,
7969 bw_ets_config.tc_valid_bits);
7970 /* Still continuing */
7973 vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
7974 vsi->bw_info.bw_max_quanta = bw_config.max_bw;
7975 tc_bw_max = rte_le_to_cpu_16(bw_ets_config.tc_bw_max[0]) |
7976 (rte_le_to_cpu_16(bw_ets_config.tc_bw_max[1]) << 16);
7977 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7978 vsi->bw_info.bw_ets_share_credits[i] =
7979 bw_ets_config.share_credits[i];
7980 vsi->bw_info.bw_ets_limit_credits[i] =
7981 rte_le_to_cpu_16(bw_ets_config.credits[i]);
7982 /* 3 bits out of 4 for each TC */
7983 vsi->bw_info.bw_ets_max_quanta[i] =
7984 (uint8_t)((tc_bw_max >> (i * 4)) & 0x7);
7986 "%s: vsi seid = %d, TC = %d, qset = 0x%x\n",
7987 __func__, vsi->seid, i, bw_config.qs_handles[i]);
7994 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
7995 struct i40e_aqc_vsi_properties_data *info,
7996 uint8_t enabled_tcmap)
7998 int ret, i, total_tc = 0;
7999 uint16_t qpnum_per_tc, bsf, qp_idx;
8000 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
8002 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
8003 if (ret != I40E_SUCCESS)
8006 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8007 if (enabled_tcmap & (1 << i))
8012 vsi->enabled_tc = enabled_tcmap;
8014 qpnum_per_tc = dev_data->nb_rx_queues / total_tc;
8015 /* Number of queues per enabled TC */
8016 if (qpnum_per_tc == 0) {
8017 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
8018 return I40E_ERR_INVALID_QP_ID;
8020 qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
8022 bsf = rte_bsf32(qpnum_per_tc);
8025 * Configure TC and queue mapping parameters, for enabled TC,
8026 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
8027 * default queue will serve it.
8030 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8031 if (vsi->enabled_tc & (1 << i)) {
8032 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
8033 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
8034 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
8035 qp_idx += qpnum_per_tc;
8037 info->tc_mapping[i] = 0;
8040 /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
8041 if (vsi->type == I40E_VSI_SRIOV) {
8042 info->mapping_flags |=
8043 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
8044 for (i = 0; i < vsi->nb_qps; i++)
8045 info->queue_mapping[i] =
8046 rte_cpu_to_le_16(vsi->base_queue + i);
8048 info->mapping_flags |=
8049 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
8050 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
8052 info->valid_sections |=
8053 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
8055 return I40E_SUCCESS;
8059 * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
8060 * @vsi: VSI to be configured
8061 * @tc_map: enabled TC bitmap
8063 * Returns 0 on success, negative value on failure
8066 i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map)
8068 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
8069 struct i40e_vsi_context ctxt;
8070 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
8074 /* Check if enabled_tc is same as existing or new TCs */
8075 if (vsi->enabled_tc == tc_map)
8078 /* configure tc bandwidth */
8079 memset(&bw_data, 0, sizeof(bw_data));
8080 bw_data.tc_valid_bits = tc_map;
8081 /* Enable ETS TCs with equal BW Share for now across all VSIs */
8082 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8083 if (tc_map & BIT_ULL(i))
8084 bw_data.tc_bw_credits[i] = 1;
8086 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
8088 PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
8089 " per TC failed = %d",
8090 hw->aq.asq_last_status);
8093 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
8094 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
8096 /* Update Queue Pairs Mapping for currently enabled UPs */
8097 ctxt.seid = vsi->seid;
8098 ctxt.pf_num = hw->pf_id;
8100 ctxt.uplink_seid = vsi->uplink_seid;
8101 ctxt.info = vsi->info;
8103 ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
8107 /* Update the VSI after updating the VSI queue-mapping information */
8108 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8110 PMD_INIT_LOG(ERR, "Failed to configure "
8111 "TC queue mapping = %d",
8112 hw->aq.asq_last_status);
8115 /* update the local VSI info with updated queue map */
8116 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
8117 sizeof(vsi->info.tc_mapping));
8118 (void)rte_memcpy(&vsi->info.queue_mapping,
8119 &ctxt.info.queue_mapping,
8120 sizeof(vsi->info.queue_mapping));
8121 vsi->info.mapping_flags = ctxt.info.mapping_flags;
8122 vsi->info.valid_sections = 0;
8124 /* Update current VSI BW information */
8125 ret = i40e_vsi_get_bw_info(vsi);
8128 "Failed updating vsi bw info, err %s aq_err %s",
8129 i40e_stat_str(hw, ret),
8130 i40e_aq_str(hw, hw->aq.asq_last_status));
8134 vsi->enabled_tc = tc_map;
8141 * i40e_dcb_hw_configure - program the dcb setting to hw
8142 * @pf: pf the configuration is taken on
8143 * @new_cfg: new configuration
8144 * @tc_map: enabled TC bitmap
8146 * Returns 0 on success, negative value on failure
8148 static enum i40e_status_code
8149 i40e_dcb_hw_configure(struct i40e_pf *pf,
8150 struct i40e_dcbx_config *new_cfg,
8153 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8154 struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
8155 struct i40e_vsi *main_vsi = pf->main_vsi;
8156 struct i40e_vsi_list *vsi_list;
8160 /* Use the FW API if FW > v4.4*/
8161 if (!((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4))) {
8162 PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
8163 " to configure DCB");
8164 return I40E_ERR_FIRMWARE_API_VERSION;
8167 /* Check if need reconfiguration */
8168 if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
8169 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
8170 return I40E_SUCCESS;
8173 /* Copy the new config to the current config */
8174 *old_cfg = *new_cfg;
8175 old_cfg->etsrec = old_cfg->etscfg;
8176 ret = i40e_set_dcb_config(hw);
8179 "Set DCB Config failed, err %s aq_err %s\n",
8180 i40e_stat_str(hw, ret),
8181 i40e_aq_str(hw, hw->aq.asq_last_status));
8184 /* set receive Arbiter to RR mode and ETS scheme by default */
8185 for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
8186 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
8187 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
8188 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
8189 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
8190 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
8191 I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
8192 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
8193 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
8194 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
8195 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
8196 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
8197 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
8199 /* get local mib to check whether it is configured correctly */
8201 hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
8202 /* Get Local DCB Config */
8203 i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
8204 &hw->local_dcbx_config);
8206 /* Update each VSI */
8207 i40e_vsi_config_tc(main_vsi, tc_map);
8208 if (main_vsi->veb) {
8209 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
8210 /* Beside main VSI, only enable default
8213 ret = i40e_vsi_config_tc(vsi_list->vsi,
8214 I40E_DEFAULT_TCMAP);
8216 PMD_INIT_LOG(WARNING,
8217 "Failed configuring TC for VSI seid=%d\n",
8218 vsi_list->vsi->seid);
8222 return I40E_SUCCESS;
8226 * i40e_dcb_init_configure - initial dcb config
8227 * @dev: device being configured
8228 * @sw_dcb: indicate whether dcb is sw configured or hw offload
8230 * Returns 0 on success, negative value on failure
8233 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
8235 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8236 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8239 if ((pf->flags & I40E_FLAG_DCB) == 0) {
8240 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
8244 /* DCB initialization:
8245 * Update DCB configuration from the Firmware and configure
8246 * LLDP MIB change event.
8248 if (sw_dcb == TRUE) {
8249 ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
8250 if (ret != I40E_SUCCESS)
8251 PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
8253 ret = i40e_init_dcb(hw);
8254 /* if sw_dcb, lldp agent is stopped, the return from
8255 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
8258 if (ret != I40E_SUCCESS &&
8259 hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
8260 memset(&hw->local_dcbx_config, 0,
8261 sizeof(struct i40e_dcbx_config));
8262 /* set dcb default configuration */
8263 hw->local_dcbx_config.etscfg.willing = 0;
8264 hw->local_dcbx_config.etscfg.maxtcs = 0;
8265 hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
8266 hw->local_dcbx_config.etscfg.tsatable[0] =
8268 hw->local_dcbx_config.etsrec =
8269 hw->local_dcbx_config.etscfg;
8270 hw->local_dcbx_config.pfc.willing = 0;
8271 hw->local_dcbx_config.pfc.pfccap =
8272 I40E_MAX_TRAFFIC_CLASS;
8273 /* FW needs one App to configure HW */
8274 hw->local_dcbx_config.numapps = 1;
8275 hw->local_dcbx_config.app[0].selector =
8276 I40E_APP_SEL_ETHTYPE;
8277 hw->local_dcbx_config.app[0].priority = 3;
8278 hw->local_dcbx_config.app[0].protocolid =
8279 I40E_APP_PROTOID_FCOE;
8280 ret = i40e_set_dcb_config(hw);
8282 PMD_INIT_LOG(ERR, "default dcb config fails."
8283 " err = %d, aq_err = %d.", ret,
8284 hw->aq.asq_last_status);
8288 PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
8289 " aq_err = %d.", ret,
8290 hw->aq.asq_last_status);
8294 ret = i40e_aq_start_lldp(hw, NULL);
8295 if (ret != I40E_SUCCESS)
8296 PMD_INIT_LOG(DEBUG, "Failed to start lldp");
8298 ret = i40e_init_dcb(hw);
8300 if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
8301 PMD_INIT_LOG(ERR, "HW doesn't support"
8306 PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
8307 " aq_err = %d.", ret,
8308 hw->aq.asq_last_status);
8316 * i40e_dcb_setup - setup dcb related config
8317 * @dev: device being configured
8319 * Returns 0 on success, negative value on failure
8322 i40e_dcb_setup(struct rte_eth_dev *dev)
8324 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8325 struct i40e_dcbx_config dcb_cfg;
8329 if ((pf->flags & I40E_FLAG_DCB) == 0) {
8330 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
8334 if (pf->vf_num != 0 ||
8335 (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
8336 PMD_INIT_LOG(DEBUG, " DCB only works on main vsi.");
8338 ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
8340 PMD_INIT_LOG(ERR, "invalid dcb config");
8343 ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
8345 PMD_INIT_LOG(ERR, "dcb sw configure fails");
8353 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
8354 struct rte_eth_dcb_info *dcb_info)
8356 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8357 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8358 struct i40e_vsi *vsi = pf->main_vsi;
8359 struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
8360 uint16_t bsf, tc_mapping;
8363 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
8364 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
8366 dcb_info->nb_tcs = 1;
8367 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
8368 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
8369 for (i = 0; i < dcb_info->nb_tcs; i++)
8370 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
8372 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8373 if (vsi->enabled_tc & (1 << i)) {
8374 tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
8375 /* only main vsi support multi TCs */
8376 dcb_info->tc_queue.tc_rxq[0][i].base =
8377 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
8378 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
8379 dcb_info->tc_queue.tc_txq[0][i].base =
8380 dcb_info->tc_queue.tc_rxq[0][i].base;
8381 bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
8382 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
8383 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 1 << bsf;
8384 dcb_info->tc_queue.tc_txq[0][i].nb_queue =
8385 dcb_info->tc_queue.tc_rxq[0][i].nb_queue;
8393 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
8395 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
8396 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8398 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
8401 msix_intr = intr_handle->intr_vec[queue_id];
8402 if (msix_intr == I40E_MISC_VEC_ID)
8403 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
8404 I40E_PFINT_DYN_CTLN_INTENA_MASK |
8405 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
8406 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
8408 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
8411 I40E_PFINT_DYN_CTLN(msix_intr -
8413 I40E_PFINT_DYN_CTLN_INTENA_MASK |
8414 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
8415 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
8417 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
8419 I40E_WRITE_FLUSH(hw);
8420 rte_intr_enable(&dev->pci_dev->intr_handle);
8426 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
8428 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
8429 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8432 msix_intr = intr_handle->intr_vec[queue_id];
8433 if (msix_intr == I40E_MISC_VEC_ID)
8434 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
8437 I40E_PFINT_DYN_CTLN(msix_intr -
8440 I40E_WRITE_FLUSH(hw);