1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
13 #include <rte_tailq.h>
15 #include "base/ice_sched.h"
16 #include "base/ice_flow.h"
17 #include "base/ice_dcb.h"
18 #include "base/ice_common.h"
20 #include "rte_pmd_ice.h"
21 #include "ice_ethdev.h"
23 #include "ice_generic_flow.h"
26 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
27 #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support"
28 #define ICE_PROTO_XTR_ARG "proto_xtr"
29 #define ICE_HW_DEBUG_MASK_ARG "hw_debug_mask"
31 static const char * const ice_valid_args[] = {
32 ICE_SAFE_MODE_SUPPORT_ARG,
33 ICE_PIPELINE_MODE_SUPPORT_ARG,
35 ICE_HW_DEBUG_MASK_ARG,
39 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
40 .name = "intel_pmd_dynfield_proto_xtr_metadata",
41 .size = sizeof(uint32_t),
42 .align = __alignof__(uint32_t),
46 struct proto_xtr_ol_flag {
47 const struct rte_mbuf_dynflag param;
52 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
54 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
56 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
57 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
59 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
60 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
62 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
63 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
64 [PROTO_XTR_IPV6_FLOW] = {
65 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
66 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
68 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
69 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
70 [PROTO_XTR_IP_OFFSET] = {
71 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
72 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
75 #define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package"
76 #define ICE_COMMS_PKG_NAME "ICE COMMS Package"
77 #define ICE_MAX_RES_DESC_NUM 1024
79 static int ice_dev_configure(struct rte_eth_dev *dev);
80 static int ice_dev_start(struct rte_eth_dev *dev);
81 static int ice_dev_stop(struct rte_eth_dev *dev);
82 static int ice_dev_close(struct rte_eth_dev *dev);
83 static int ice_dev_reset(struct rte_eth_dev *dev);
84 static int ice_dev_info_get(struct rte_eth_dev *dev,
85 struct rte_eth_dev_info *dev_info);
86 static int ice_link_update(struct rte_eth_dev *dev,
87 int wait_to_complete);
88 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
89 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
91 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
92 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
93 static int ice_rss_reta_update(struct rte_eth_dev *dev,
94 struct rte_eth_rss_reta_entry64 *reta_conf,
96 static int ice_rss_reta_query(struct rte_eth_dev *dev,
97 struct rte_eth_rss_reta_entry64 *reta_conf,
99 static int ice_rss_hash_update(struct rte_eth_dev *dev,
100 struct rte_eth_rss_conf *rss_conf);
101 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
102 struct rte_eth_rss_conf *rss_conf);
103 static int ice_promisc_enable(struct rte_eth_dev *dev);
104 static int ice_promisc_disable(struct rte_eth_dev *dev);
105 static int ice_allmulti_enable(struct rte_eth_dev *dev);
106 static int ice_allmulti_disable(struct rte_eth_dev *dev);
107 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
110 static int ice_macaddr_set(struct rte_eth_dev *dev,
111 struct rte_ether_addr *mac_addr);
112 static int ice_macaddr_add(struct rte_eth_dev *dev,
113 struct rte_ether_addr *mac_addr,
114 __rte_unused uint32_t index,
116 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
117 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
119 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
121 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
123 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
124 uint16_t pvid, int on);
125 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
126 static int ice_get_eeprom(struct rte_eth_dev *dev,
127 struct rte_dev_eeprom_info *eeprom);
128 static int ice_stats_get(struct rte_eth_dev *dev,
129 struct rte_eth_stats *stats);
130 static int ice_stats_reset(struct rte_eth_dev *dev);
131 static int ice_xstats_get(struct rte_eth_dev *dev,
132 struct rte_eth_xstat *xstats, unsigned int n);
133 static int ice_xstats_get_names(struct rte_eth_dev *dev,
134 struct rte_eth_xstat_name *xstats_names,
136 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
137 const struct rte_flow_ops **ops);
138 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
139 struct rte_eth_udp_tunnel *udp_tunnel);
140 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
141 struct rte_eth_udp_tunnel *udp_tunnel);
143 static const struct rte_pci_id pci_id_ice_map[] = {
144 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
145 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
146 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
147 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
148 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
149 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
150 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
151 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
152 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
153 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
154 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
155 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) },
156 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) },
157 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
158 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
159 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
160 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
161 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
162 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
163 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
164 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
165 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
166 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
167 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
168 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
169 { .vendor_id = 0, /* sentinel */ },
172 static const struct eth_dev_ops ice_eth_dev_ops = {
173 .dev_configure = ice_dev_configure,
174 .dev_start = ice_dev_start,
175 .dev_stop = ice_dev_stop,
176 .dev_close = ice_dev_close,
177 .dev_reset = ice_dev_reset,
178 .dev_set_link_up = ice_dev_set_link_up,
179 .dev_set_link_down = ice_dev_set_link_down,
180 .rx_queue_start = ice_rx_queue_start,
181 .rx_queue_stop = ice_rx_queue_stop,
182 .tx_queue_start = ice_tx_queue_start,
183 .tx_queue_stop = ice_tx_queue_stop,
184 .rx_queue_setup = ice_rx_queue_setup,
185 .rx_queue_release = ice_rx_queue_release,
186 .tx_queue_setup = ice_tx_queue_setup,
187 .tx_queue_release = ice_tx_queue_release,
188 .dev_infos_get = ice_dev_info_get,
189 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
190 .link_update = ice_link_update,
191 .mtu_set = ice_mtu_set,
192 .mac_addr_set = ice_macaddr_set,
193 .mac_addr_add = ice_macaddr_add,
194 .mac_addr_remove = ice_macaddr_remove,
195 .vlan_filter_set = ice_vlan_filter_set,
196 .vlan_offload_set = ice_vlan_offload_set,
197 .reta_update = ice_rss_reta_update,
198 .reta_query = ice_rss_reta_query,
199 .rss_hash_update = ice_rss_hash_update,
200 .rss_hash_conf_get = ice_rss_hash_conf_get,
201 .promiscuous_enable = ice_promisc_enable,
202 .promiscuous_disable = ice_promisc_disable,
203 .allmulticast_enable = ice_allmulti_enable,
204 .allmulticast_disable = ice_allmulti_disable,
205 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
206 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
207 .fw_version_get = ice_fw_version_get,
208 .vlan_pvid_set = ice_vlan_pvid_set,
209 .rxq_info_get = ice_rxq_info_get,
210 .txq_info_get = ice_txq_info_get,
211 .rx_burst_mode_get = ice_rx_burst_mode_get,
212 .tx_burst_mode_get = ice_tx_burst_mode_get,
213 .get_eeprom_length = ice_get_eeprom_length,
214 .get_eeprom = ice_get_eeprom,
215 .stats_get = ice_stats_get,
216 .stats_reset = ice_stats_reset,
217 .xstats_get = ice_xstats_get,
218 .xstats_get_names = ice_xstats_get_names,
219 .xstats_reset = ice_stats_reset,
220 .flow_ops_get = ice_dev_flow_ops_get,
221 .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add,
222 .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
223 .tx_done_cleanup = ice_tx_done_cleanup,
224 .get_monitor_addr = ice_get_monitor_addr,
227 /* store statistics names and its offset in stats structure */
228 struct ice_xstats_name_off {
229 char name[RTE_ETH_XSTATS_NAME_SIZE];
233 static const struct ice_xstats_name_off ice_stats_strings[] = {
234 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
235 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
236 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
237 {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
238 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
239 rx_unknown_protocol)},
240 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
241 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
242 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
243 {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
246 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
247 sizeof(ice_stats_strings[0]))
249 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
250 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
251 tx_dropped_link_down)},
252 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
253 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
255 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
256 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
258 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
260 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
262 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
263 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
264 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
265 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
266 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
267 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
269 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
271 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
273 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
275 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
277 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
279 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
281 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
283 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
284 mac_short_pkt_dropped)},
285 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
287 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
288 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
289 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
291 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
293 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
295 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
297 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
299 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
303 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
304 sizeof(ice_hw_port_strings[0]))
307 ice_init_controlq_parameter(struct ice_hw *hw)
309 /* fields for adminq */
310 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
311 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
312 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
313 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
315 /* fields for mailboxq, DPDK used as PF host */
316 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
317 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
318 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
319 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
323 lookup_proto_xtr_type(const char *xtr_name)
327 enum proto_xtr_type type;
329 { "vlan", PROTO_XTR_VLAN },
330 { "ipv4", PROTO_XTR_IPV4 },
331 { "ipv6", PROTO_XTR_IPV6 },
332 { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
333 { "tcp", PROTO_XTR_TCP },
334 { "ip_offset", PROTO_XTR_IP_OFFSET },
338 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
339 if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
340 return xtr_type_map[i].type;
347 * Parse elem, the elem could be single number/range or '(' ')' group
348 * 1) A single number elem, it's just a simple digit. e.g. 9
349 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
350 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
351 * Within group elem, '-' used for a range separator;
352 * ',' used for a single number.
355 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
357 const char *str = input;
362 while (isblank(*str))
365 if (!isdigit(*str) && *str != '(')
368 /* process single number or single range of number */
371 idx = strtoul(str, &end, 10);
372 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
375 while (isblank(*end))
381 /* process single <number>-<number> */
384 while (isblank(*end))
390 idx = strtoul(end, &end, 10);
391 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
395 while (isblank(*end))
402 for (idx = RTE_MIN(min, max);
403 idx <= RTE_MAX(min, max); idx++)
404 devargs->proto_xtr[idx] = xtr_type;
409 /* process set within bracket */
411 while (isblank(*str))
416 min = ICE_MAX_QUEUE_NUM;
418 /* go ahead to the first digit */
419 while (isblank(*str))
424 /* get the digit value */
426 idx = strtoul(str, &end, 10);
427 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
430 /* go ahead to separator '-',',' and ')' */
431 while (isblank(*end))
434 if (min == ICE_MAX_QUEUE_NUM)
436 else /* avoid continuous '-' */
438 } else if (*end == ',' || *end == ')') {
440 if (min == ICE_MAX_QUEUE_NUM)
443 for (idx = RTE_MIN(min, max);
444 idx <= RTE_MAX(min, max); idx++)
445 devargs->proto_xtr[idx] = xtr_type;
447 min = ICE_MAX_QUEUE_NUM;
453 } while (*end != ')' && *end != '\0');
459 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
461 const char *queue_start;
466 while (isblank(*queues))
469 if (*queues != '[') {
470 xtr_type = lookup_proto_xtr_type(queues);
474 devargs->proto_xtr_dflt = xtr_type;
481 while (isblank(*queues))
486 queue_start = queues;
488 /* go across a complete bracket */
489 if (*queue_start == '(') {
490 queues += strcspn(queues, ")");
495 /* scan the separator ':' */
496 queues += strcspn(queues, ":");
497 if (*queues++ != ':')
499 while (isblank(*queues))
502 for (idx = 0; ; idx++) {
503 if (isblank(queues[idx]) ||
504 queues[idx] == ',' ||
505 queues[idx] == ']' ||
509 if (idx > sizeof(xtr_name) - 2)
512 xtr_name[idx] = queues[idx];
514 xtr_name[idx] = '\0';
515 xtr_type = lookup_proto_xtr_type(xtr_name);
521 while (isblank(*queues) || *queues == ',' || *queues == ']')
524 if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
526 } while (*queues != '\0');
532 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
535 struct ice_devargs *devargs = extra_args;
537 if (value == NULL || extra_args == NULL)
540 if (parse_queue_proto_xtr(value, devargs) < 0) {
542 "The protocol extraction parameter is wrong : '%s'",
551 ice_check_proto_xtr_support(struct ice_hw *hw)
553 #define FLX_REG(val, fld, idx) \
554 (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
555 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
562 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
564 ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
565 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
567 ICE_PROT_IPV4_OF_OR_S,
568 ICE_PROT_IPV4_OF_OR_S },
569 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
571 ICE_PROT_IPV6_OF_OR_S,
572 ICE_PROT_IPV6_OF_OR_S },
573 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
575 ICE_PROT_IPV6_OF_OR_S,
576 ICE_PROT_IPV6_OF_OR_S },
577 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
579 ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
580 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
582 ICE_PROT_IPV4_OF_OR_S,
583 ICE_PROT_IPV6_OF_OR_S },
587 for (i = 0; i < RTE_DIM(xtr_sets); i++) {
588 uint32_t rxdid = xtr_sets[i].rxdid;
591 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
592 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
594 if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
595 FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
596 ice_proto_xtr_hw_support[i] = true;
599 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
600 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
602 if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
603 FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
604 ice_proto_xtr_hw_support[i] = true;
610 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
613 struct pool_entry *entry;
618 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
621 "Failed to allocate memory for resource pool");
625 /* queue heap initialize */
626 pool->num_free = num;
629 LIST_INIT(&pool->alloc_list);
630 LIST_INIT(&pool->free_list);
632 /* Initialize element */
636 LIST_INSERT_HEAD(&pool->free_list, entry, next);
641 ice_res_pool_alloc(struct ice_res_pool_info *pool,
644 struct pool_entry *entry, *valid_entry;
647 PMD_INIT_LOG(ERR, "Invalid parameter");
651 if (pool->num_free < num) {
652 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
653 num, pool->num_free);
658 /* Lookup in free list and find most fit one */
659 LIST_FOREACH(entry, &pool->free_list, next) {
660 if (entry->len >= num) {
662 if (entry->len == num) {
667 valid_entry->len > entry->len)
672 /* Not find one to satisfy the request, return */
674 PMD_INIT_LOG(ERR, "No valid entry found");
678 * The entry have equal queue number as requested,
679 * remove it from alloc_list.
681 if (valid_entry->len == num) {
682 LIST_REMOVE(valid_entry, next);
685 * The entry have more numbers than requested,
686 * create a new entry for alloc_list and minus its
687 * queue base and number in free_list.
689 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
692 "Failed to allocate memory for "
696 entry->base = valid_entry->base;
698 valid_entry->base += num;
699 valid_entry->len -= num;
703 /* Insert it into alloc list, not sorted */
704 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
706 pool->num_free -= valid_entry->len;
707 pool->num_alloc += valid_entry->len;
709 return valid_entry->base + pool->base;
713 ice_res_pool_destroy(struct ice_res_pool_info *pool)
715 struct pool_entry *entry, *next_entry;
720 for (entry = LIST_FIRST(&pool->alloc_list);
721 entry && (next_entry = LIST_NEXT(entry, next), 1);
722 entry = next_entry) {
723 LIST_REMOVE(entry, next);
727 for (entry = LIST_FIRST(&pool->free_list);
728 entry && (next_entry = LIST_NEXT(entry, next), 1);
729 entry = next_entry) {
730 LIST_REMOVE(entry, next);
737 LIST_INIT(&pool->alloc_list);
738 LIST_INIT(&pool->free_list);
742 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
744 /* Set VSI LUT selection */
745 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
746 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
747 /* Set Hash scheme */
748 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
749 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
751 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
754 static enum ice_status
755 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
756 struct ice_aqc_vsi_props *info,
757 uint8_t enabled_tcmap)
759 uint16_t bsf, qp_idx;
761 /* default tc 0 now. Multi-TC supporting need to be done later.
762 * Configure TC and queue mapping parameters, for enabled TC,
763 * allocate qpnum_per_tc queues to this traffic.
765 if (enabled_tcmap != 0x01) {
766 PMD_INIT_LOG(ERR, "only TC0 is supported");
770 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
771 bsf = rte_bsf32(vsi->nb_qps);
772 /* Adjust the queue number to actual queues that can be applied */
773 vsi->nb_qps = 0x1 << bsf;
776 /* Set tc and queue mapping with VSI */
777 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
778 ICE_AQ_VSI_TC_Q_OFFSET_S) |
779 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
781 /* Associate queue number with VSI */
782 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
783 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
784 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
785 info->valid_sections |=
786 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
787 /* Set the info.ingress_table and info.egress_table
788 * for UP translate table. Now just set it to 1:1 map by default
789 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
791 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
792 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
793 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
794 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
799 ice_init_mac_address(struct rte_eth_dev *dev)
801 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
803 if (!rte_is_unicast_ether_addr
804 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
805 PMD_INIT_LOG(ERR, "Invalid MAC address");
810 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
811 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
813 dev->data->mac_addrs =
814 rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0);
815 if (!dev->data->mac_addrs) {
817 "Failed to allocate memory to store mac address");
820 /* store it to dev data */
822 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
823 &dev->data->mac_addrs[0]);
827 /* Find out specific MAC filter */
828 static struct ice_mac_filter *
829 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
831 struct ice_mac_filter *f;
833 TAILQ_FOREACH(f, &vsi->mac_list, next) {
834 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
842 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
844 struct ice_fltr_list_entry *m_list_itr = NULL;
845 struct ice_mac_filter *f;
846 struct LIST_HEAD_TYPE list_head;
847 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
850 /* If it's added and configured, return */
851 f = ice_find_mac_filter(vsi, mac_addr);
853 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
857 INIT_LIST_HEAD(&list_head);
859 m_list_itr = (struct ice_fltr_list_entry *)
860 ice_malloc(hw, sizeof(*m_list_itr));
865 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
866 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
867 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
868 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
869 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
870 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
871 m_list_itr->fltr_info.vsi_handle = vsi->idx;
873 LIST_ADD(&m_list_itr->list_entry, &list_head);
876 ret = ice_add_mac(hw, &list_head);
877 if (ret != ICE_SUCCESS) {
878 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
882 /* Add the mac addr into mac list */
883 f = rte_zmalloc(NULL, sizeof(*f), 0);
885 PMD_DRV_LOG(ERR, "failed to allocate memory");
889 rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
890 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
896 rte_free(m_list_itr);
901 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
903 struct ice_fltr_list_entry *m_list_itr = NULL;
904 struct ice_mac_filter *f;
905 struct LIST_HEAD_TYPE list_head;
906 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
909 /* Can't find it, return an error */
910 f = ice_find_mac_filter(vsi, mac_addr);
914 INIT_LIST_HEAD(&list_head);
916 m_list_itr = (struct ice_fltr_list_entry *)
917 ice_malloc(hw, sizeof(*m_list_itr));
922 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
923 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
924 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
925 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
926 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
927 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
928 m_list_itr->fltr_info.vsi_handle = vsi->idx;
930 LIST_ADD(&m_list_itr->list_entry, &list_head);
932 /* remove the mac filter */
933 ret = ice_remove_mac(hw, &list_head);
934 if (ret != ICE_SUCCESS) {
935 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
940 /* Remove the mac addr from mac list */
941 TAILQ_REMOVE(&vsi->mac_list, f, next);
947 rte_free(m_list_itr);
951 /* Find out specific VLAN filter */
952 static struct ice_vlan_filter *
953 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
955 struct ice_vlan_filter *f;
957 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
958 if (vlan->tpid == f->vlan_info.vlan.tpid &&
959 vlan->vid == f->vlan_info.vlan.vid)
967 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
969 struct ice_fltr_list_entry *v_list_itr = NULL;
970 struct ice_vlan_filter *f;
971 struct LIST_HEAD_TYPE list_head;
975 if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
978 hw = ICE_VSI_TO_HW(vsi);
980 /* If it's added and configured, return. */
981 f = ice_find_vlan_filter(vsi, vlan);
983 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
987 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
990 INIT_LIST_HEAD(&list_head);
992 v_list_itr = (struct ice_fltr_list_entry *)
993 ice_malloc(hw, sizeof(*v_list_itr));
998 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
999 v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1000 v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1001 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1002 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1003 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1004 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1005 v_list_itr->fltr_info.vsi_handle = vsi->idx;
1007 LIST_ADD(&v_list_itr->list_entry, &list_head);
1010 ret = ice_add_vlan(hw, &list_head);
1011 if (ret != ICE_SUCCESS) {
1012 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1017 /* Add vlan into vlan list */
1018 f = rte_zmalloc(NULL, sizeof(*f), 0);
1020 PMD_DRV_LOG(ERR, "failed to allocate memory");
1024 f->vlan_info.vlan.tpid = vlan->tpid;
1025 f->vlan_info.vlan.vid = vlan->vid;
1026 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1032 rte_free(v_list_itr);
1037 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1039 struct ice_fltr_list_entry *v_list_itr = NULL;
1040 struct ice_vlan_filter *f;
1041 struct LIST_HEAD_TYPE list_head;
1045 if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1048 hw = ICE_VSI_TO_HW(vsi);
1050 /* Can't find it, return an error */
1051 f = ice_find_vlan_filter(vsi, vlan);
1055 INIT_LIST_HEAD(&list_head);
1057 v_list_itr = (struct ice_fltr_list_entry *)
1058 ice_malloc(hw, sizeof(*v_list_itr));
1064 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1065 v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1066 v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1067 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1068 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1069 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1070 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1071 v_list_itr->fltr_info.vsi_handle = vsi->idx;
1073 LIST_ADD(&v_list_itr->list_entry, &list_head);
1075 /* remove the vlan filter */
1076 ret = ice_remove_vlan(hw, &list_head);
1077 if (ret != ICE_SUCCESS) {
1078 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1083 /* Remove the vlan id from vlan list */
1084 TAILQ_REMOVE(&vsi->vlan_list, f, next);
1090 rte_free(v_list_itr);
1095 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1097 struct ice_mac_filter *m_f;
1098 struct ice_vlan_filter *v_f;
1102 if (!vsi || !vsi->mac_num)
1105 TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) {
1106 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1107 if (ret != ICE_SUCCESS) {
1113 if (vsi->vlan_num == 0)
1116 TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) {
1117 ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan);
1118 if (ret != ICE_SUCCESS) {
1130 ice_pf_enable_irq0(struct ice_hw *hw)
1132 /* reset the registers */
1133 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1134 ICE_READ_REG(hw, PFINT_OICR);
1137 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1138 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1139 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1141 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1142 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1143 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1144 PFINT_OICR_CTL_ITR_INDX_M) |
1145 PFINT_OICR_CTL_CAUSE_ENA_M);
1147 ICE_WRITE_REG(hw, PFINT_FW_CTL,
1148 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1149 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1150 PFINT_FW_CTL_ITR_INDX_M) |
1151 PFINT_FW_CTL_CAUSE_ENA_M);
1153 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1156 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1157 GLINT_DYN_CTL_INTENA_M |
1158 GLINT_DYN_CTL_CLEARPBA_M |
1159 GLINT_DYN_CTL_ITR_INDX_M);
1166 ice_pf_disable_irq0(struct ice_hw *hw)
1168 /* Disable all interrupt types */
1169 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1175 ice_handle_aq_msg(struct rte_eth_dev *dev)
1177 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1178 struct ice_ctl_q_info *cq = &hw->adminq;
1179 struct ice_rq_event_info event;
1180 uint16_t pending, opcode;
1183 event.buf_len = ICE_AQ_MAX_BUF_LEN;
1184 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1185 if (!event.msg_buf) {
1186 PMD_DRV_LOG(ERR, "Failed to allocate mem");
1192 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1194 if (ret != ICE_SUCCESS) {
1196 "Failed to read msg from AdminQ, "
1198 hw->adminq.sq_last_status);
1201 opcode = rte_le_to_cpu_16(event.desc.opcode);
1204 case ice_aqc_opc_get_link_status:
1205 ret = ice_link_update(dev, 0);
1207 rte_eth_dev_callback_process
1208 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1211 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1216 rte_free(event.msg_buf);
1221 * Interrupt handler triggered by NIC for handling
1222 * specific interrupt.
1225 * Pointer to interrupt handle.
1227 * The address of parameter (struct rte_eth_dev *) regsitered before.
1233 ice_interrupt_handler(void *param)
1235 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1236 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1244 uint32_t int_fw_ctl;
1247 /* Disable interrupt */
1248 ice_pf_disable_irq0(hw);
1250 /* read out interrupt causes */
1251 oicr = ICE_READ_REG(hw, PFINT_OICR);
1253 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1256 /* No interrupt event indicated */
1257 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1258 PMD_DRV_LOG(INFO, "No interrupt event");
1263 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1264 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1265 ice_handle_aq_msg(dev);
1268 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1269 PMD_DRV_LOG(INFO, "OICR: link state change event");
1270 ret = ice_link_update(dev, 0);
1272 rte_eth_dev_callback_process
1273 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1277 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1278 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1279 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1280 if (reg & GL_MDET_TX_PQM_VALID_M) {
1281 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1282 GL_MDET_TX_PQM_PF_NUM_S;
1283 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1284 GL_MDET_TX_PQM_MAL_TYPE_S;
1285 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1286 GL_MDET_TX_PQM_QNUM_S;
1288 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1289 "%d by PQM on TX queue %d PF# %d",
1290 event, queue, pf_num);
1293 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1294 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1295 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1296 GL_MDET_TX_TCLAN_PF_NUM_S;
1297 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1298 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1299 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1300 GL_MDET_TX_TCLAN_QNUM_S;
1302 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1303 "%d by TCLAN on TX queue %d PF# %d",
1304 event, queue, pf_num);
1308 /* Enable interrupt */
1309 ice_pf_enable_irq0(hw);
1310 rte_intr_ack(dev->intr_handle);
1314 ice_init_proto_xtr(struct rte_eth_dev *dev)
1316 struct ice_adapter *ad =
1317 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1318 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1319 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1320 const struct proto_xtr_ol_flag *ol_flag;
1321 bool proto_xtr_enable = false;
1325 pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1326 if (unlikely(pf->proto_xtr == NULL)) {
1327 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1331 for (i = 0; i < pf->lan_nb_qps; i++) {
1332 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1333 ad->devargs.proto_xtr[i] :
1334 ad->devargs.proto_xtr_dflt;
1336 if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1337 uint8_t type = pf->proto_xtr[i];
1339 ice_proto_xtr_ol_flag_params[type].required = true;
1340 proto_xtr_enable = true;
1344 if (likely(!proto_xtr_enable))
1347 ice_check_proto_xtr_support(hw);
1349 offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1350 if (unlikely(offset == -1)) {
1352 "Protocol extraction metadata is disabled in mbuf with error %d",
1358 "Protocol extraction metadata offset in mbuf is : %d",
1360 rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1362 for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1363 ol_flag = &ice_proto_xtr_ol_flag_params[i];
1365 if (!ol_flag->required)
1368 if (!ice_proto_xtr_hw_support[i]) {
1370 "Protocol extraction type %u is not supported in hardware",
1372 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1376 offset = rte_mbuf_dynflag_register(&ol_flag->param);
1377 if (unlikely(offset == -1)) {
1379 "Protocol extraction offload '%s' failed to register with error %d",
1380 ol_flag->param.name, -rte_errno);
1382 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1387 "Protocol extraction offload '%s' offset in mbuf is : %d",
1388 ol_flag->param.name, offset);
1389 *ol_flag->ol_flag = 1ULL << offset;
1393 /* Initialize SW parameters of PF */
1395 ice_pf_sw_init(struct rte_eth_dev *dev)
1397 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1398 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1401 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1402 hw->func_caps.common_cap.num_rxq);
1404 pf->lan_nb_qps = pf->lan_nb_qp_max;
1406 ice_init_proto_xtr(dev);
1408 if (hw->func_caps.fd_fltr_guar > 0 ||
1409 hw->func_caps.fd_fltr_best_effort > 0) {
1410 pf->flags |= ICE_FLAG_FDIR;
1411 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1412 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1414 pf->fdir_nb_qps = 0;
1416 pf->fdir_qp_offset = 0;
1422 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1424 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1425 struct ice_vsi *vsi = NULL;
1426 struct ice_vsi_ctx vsi_ctx;
1428 struct rte_ether_addr broadcast = {
1429 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1430 struct rte_ether_addr mac_addr;
1431 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1432 uint8_t tc_bitmap = 0x1;
1435 /* hw->num_lports = 1 in NIC mode */
1436 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1440 vsi->idx = pf->next_vsi_idx;
1443 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1444 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1445 vsi->vlan_anti_spoof_on = 0;
1446 vsi->vlan_filter_on = 1;
1447 TAILQ_INIT(&vsi->mac_list);
1448 TAILQ_INIT(&vsi->vlan_list);
1450 /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1451 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1452 ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1453 hw->func_caps.common_cap.rss_table_size;
1454 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1456 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1459 vsi->nb_qps = pf->lan_nb_qps;
1460 vsi->base_queue = 1;
1461 ice_vsi_config_default_rss(&vsi_ctx.info);
1462 vsi_ctx.alloc_from_pool = true;
1463 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1464 /* switch_id is queried by get_switch_config aq, which is done
1467 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1468 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1469 /* Allow all untagged or tagged packets */
1470 vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
1471 vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
1472 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1473 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1474 if (ice_is_dvm_ena(hw)) {
1475 vsi_ctx.info.outer_vlan_flags =
1476 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
1477 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
1478 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
1479 vsi_ctx.info.outer_vlan_flags |=
1480 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
1481 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
1482 ICE_AQ_VSI_OUTER_TAG_TYPE_M;
1486 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1487 ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1488 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1489 cfg = ICE_AQ_VSI_FD_ENABLE;
1490 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1491 vsi_ctx.info.max_fd_fltr_dedicated =
1492 rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1493 vsi_ctx.info.max_fd_fltr_shared =
1494 rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1496 /* Enable VLAN/UP trip */
1497 ret = ice_vsi_config_tc_queue_mapping(vsi,
1502 "tc queue mapping with vsi failed, "
1510 vsi->nb_qps = pf->fdir_nb_qps;
1511 vsi->base_queue = ICE_FDIR_QUEUE_ID;
1512 vsi_ctx.alloc_from_pool = true;
1513 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1515 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1516 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1517 cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1518 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1519 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1520 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1521 ret = ice_vsi_config_tc_queue_mapping(vsi,
1526 "tc queue mapping with vsi failed, "
1533 /* for other types of VSI */
1534 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1538 /* VF has MSIX interrupt in VF range, don't allocate here */
1539 if (type == ICE_VSI_PF) {
1540 ret = ice_res_pool_alloc(&pf->msix_pool,
1541 RTE_MIN(vsi->nb_qps,
1542 RTE_MAX_RXTX_INTR_VEC_ID));
1544 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1547 vsi->msix_intr = ret;
1548 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1549 } else if (type == ICE_VSI_CTRL) {
1550 ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1552 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1555 vsi->msix_intr = ret;
1561 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1562 if (ret != ICE_SUCCESS) {
1563 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1566 /* store vsi information is SW structure */
1567 vsi->vsi_id = vsi_ctx.vsi_num;
1568 vsi->info = vsi_ctx.info;
1569 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1570 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1572 if (type == ICE_VSI_PF) {
1573 /* MAC configuration */
1574 rte_ether_addr_copy((struct rte_ether_addr *)
1575 hw->port_info->mac.perm_addr,
1578 rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1579 ret = ice_add_mac_filter(vsi, &mac_addr);
1580 if (ret != ICE_SUCCESS)
1581 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1583 rte_ether_addr_copy(&broadcast, &mac_addr);
1584 ret = ice_add_mac_filter(vsi, &mac_addr);
1585 if (ret != ICE_SUCCESS)
1586 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1589 /* At the beginning, only TC0. */
1590 /* What we need here is the maximam number of the TX queues.
1591 * Currently vsi->nb_qps means it.
1592 * Correct it if any change.
1594 max_txqs[0] = vsi->nb_qps;
1595 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1596 tc_bitmap, max_txqs);
1597 if (ret != ICE_SUCCESS)
1598 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1608 ice_send_driver_ver(struct ice_hw *hw)
1610 struct ice_driver_ver dv;
1612 /* we don't have driver version use 0 for dummy */
1616 dv.subbuild_ver = 0;
1617 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1619 return ice_aq_send_driver_ver(hw, &dv, NULL);
1623 ice_pf_setup(struct ice_pf *pf)
1625 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1626 struct ice_vsi *vsi;
1629 /* Clear all stats counters */
1630 pf->offset_loaded = false;
1631 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1632 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1633 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1634 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1636 /* force guaranteed filter pool for PF */
1637 ice_alloc_fd_guar_item(hw, &unused,
1638 hw->func_caps.fd_fltr_guar);
1639 /* force shared filter pool for PF */
1640 ice_alloc_fd_shrd_item(hw, &unused,
1641 hw->func_caps.fd_fltr_best_effort);
1643 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1645 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1654 static enum ice_pkg_type
1655 ice_load_pkg_type(struct ice_hw *hw)
1657 enum ice_pkg_type package_type;
1659 /* store the activated package type (OS default or Comms) */
1660 if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1662 package_type = ICE_PKG_TYPE_OS_DEFAULT;
1663 else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1665 package_type = ICE_PKG_TYPE_COMMS;
1667 package_type = ICE_PKG_TYPE_UNKNOWN;
1669 PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)",
1670 hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1671 hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1672 hw->active_pkg_name,
1673 ice_is_dvm_ena(hw) ? "double" : "single");
1675 return package_type;
1678 #ifdef RTE_EXEC_ENV_WINDOWS
1679 #define ice_access _access
1681 #define ice_access access
1684 int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn)
1686 struct ice_hw *hw = &adapter->hw;
1687 char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1688 char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1690 uint8_t *buf = NULL;
1698 memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1699 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1700 "ice-%016" PRIx64 ".pkg", dsn);
1701 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1702 ICE_MAX_PKG_FILENAME_SIZE);
1703 if (!ice_access(strcat(pkg_file, opt_ddp_filename), 0))
1706 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1707 ICE_MAX_PKG_FILENAME_SIZE);
1708 if (!ice_access(strcat(pkg_file, opt_ddp_filename), 0))
1712 strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1713 if (!ice_access(pkg_file, 0))
1715 strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1716 if (ice_access(pkg_file, 0)) {
1717 PMD_INIT_LOG(ERR, "failed to search file path\n");
1722 file = fopen(pkg_file, "rb");
1724 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1728 PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_file);
1730 err = stat(pkg_file, &fstat);
1732 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1736 buf_len = fstat.st_size;
1737 buf = rte_malloc(NULL, buf_len, 0);
1740 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1746 err = fread(buf, buf_len, 1, file);
1748 PMD_INIT_LOG(ERR, "failed to read package data\n");
1753 err = ice_copy_and_init_pkg(hw, buf, buf_len);
1755 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1759 /* store the loaded pkg type info */
1760 adapter->active_pkg_type = ice_load_pkg_type(hw);
1771 ice_base_queue_get(struct ice_pf *pf)
1774 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1776 reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1777 if (reg & PFLAN_RX_QALLOC_VALID_M) {
1778 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1780 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1786 parse_bool(const char *key, const char *value, void *args)
1788 int *i = (int *)args;
1792 num = strtoul(value, &end, 10);
1794 if (num != 0 && num != 1) {
1795 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1796 "value must be 0 or 1",
1806 parse_u64(const char *key, const char *value, void *args)
1808 u64 *num = (u64 *)args;
1812 tmp = strtoull(value, NULL, 16);
1814 PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u64",
1824 static int ice_parse_devargs(struct rte_eth_dev *dev)
1826 struct ice_adapter *ad =
1827 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1828 struct rte_devargs *devargs = dev->device->devargs;
1829 struct rte_kvargs *kvlist;
1832 if (devargs == NULL)
1835 kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1836 if (kvlist == NULL) {
1837 PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1841 ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1842 memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1843 sizeof(ad->devargs.proto_xtr));
1845 ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1846 &handle_proto_xtr_arg, &ad->devargs);
1850 ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1851 &parse_bool, &ad->devargs.safe_mode_support);
1855 ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1856 &parse_bool, &ad->devargs.pipe_mode_support);
1860 ret = rte_kvargs_process(kvlist, ICE_HW_DEBUG_MASK_ARG,
1861 &parse_u64, &ad->hw.debug_mask);
1866 rte_kvargs_free(kvlist);
1870 /* Forward LLDP packets to default VSI by set switch rules */
1872 ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on)
1874 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1875 struct ice_fltr_list_entry *s_list_itr = NULL;
1876 struct LIST_HEAD_TYPE list_head;
1879 INIT_LIST_HEAD(&list_head);
1881 s_list_itr = (struct ice_fltr_list_entry *)
1882 ice_malloc(hw, sizeof(*s_list_itr));
1885 s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
1886 s_list_itr->fltr_info.vsi_handle = vsi->idx;
1887 s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
1888 RTE_ETHER_TYPE_LLDP;
1889 s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1890 s_list_itr->fltr_info.flag = ICE_FLTR_RX;
1891 s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
1892 LIST_ADD(&s_list_itr->list_entry, &list_head);
1894 ret = ice_add_eth_mac(hw, &list_head);
1896 ret = ice_remove_eth_mac(hw, &list_head);
1898 rte_free(s_list_itr);
1902 static enum ice_status
1903 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
1904 uint16_t num, uint16_t desc_id,
1905 uint16_t *prof_buf, uint16_t *num_prof)
1907 struct ice_aqc_res_elem *resp_buf;
1910 bool res_shared = 1;
1911 struct ice_aq_desc aq_desc;
1912 struct ice_sq_cd *cd = NULL;
1913 struct ice_aqc_get_allocd_res_desc *cmd =
1914 &aq_desc.params.get_res_desc;
1916 buf_len = sizeof(*resp_buf) * num;
1917 resp_buf = ice_malloc(hw, buf_len);
1921 ice_fill_dflt_direct_cmd_desc(&aq_desc,
1922 ice_aqc_opc_get_allocd_res_desc);
1924 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
1925 ICE_AQC_RES_TYPE_M) | (res_shared ?
1926 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
1927 cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
1929 ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
1931 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
1935 ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
1936 (*num_prof), ICE_NONDMA_TO_NONDMA);
1943 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
1947 uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
1948 uint16_t first_desc = 1;
1949 uint16_t num_prof = 0;
1951 ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
1952 first_desc, prof_buf, &num_prof);
1954 PMD_INIT_LOG(ERR, "Failed to get fxp resource");
1958 for (prof_id = 0; prof_id < num_prof; prof_id++) {
1959 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
1961 PMD_INIT_LOG(ERR, "Failed to free fxp resource");
1969 ice_reset_fxp_resource(struct ice_hw *hw)
1973 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
1975 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
1979 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
1981 PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
1989 ice_rss_ctx_init(struct ice_pf *pf)
1991 memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
1995 ice_get_supported_rxdid(struct ice_hw *hw)
1997 uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
2001 supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
2003 for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2004 regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
2005 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2006 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2007 supported_rxdid |= BIT(i);
2009 return supported_rxdid;
2013 ice_dev_init(struct rte_eth_dev *dev)
2015 struct rte_pci_device *pci_dev;
2016 struct rte_intr_handle *intr_handle;
2017 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2018 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2019 struct ice_adapter *ad =
2020 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2021 struct ice_vsi *vsi;
2023 #ifndef RTE_EXEC_ENV_WINDOWS
2025 uint32_t dsn_low, dsn_high;
2030 dev->dev_ops = &ice_eth_dev_ops;
2031 dev->rx_queue_count = ice_rx_queue_count;
2032 dev->rx_descriptor_status = ice_rx_descriptor_status;
2033 dev->tx_descriptor_status = ice_tx_descriptor_status;
2034 dev->rx_pkt_burst = ice_recv_pkts;
2035 dev->tx_pkt_burst = ice_xmit_pkts;
2036 dev->tx_pkt_prepare = ice_prep_pkts;
2038 /* for secondary processes, we don't initialise any further as primary
2039 * has already done this work.
2041 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2042 ice_set_rx_function(dev);
2043 ice_set_tx_function(dev);
2047 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2049 ice_set_default_ptype_table(dev);
2050 pci_dev = RTE_DEV_TO_PCI(dev->device);
2051 intr_handle = &pci_dev->intr_handle;
2053 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2054 pf->dev_data = dev->data;
2055 hw->back = pf->adapter;
2056 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2057 hw->vendor_id = pci_dev->id.vendor_id;
2058 hw->device_id = pci_dev->id.device_id;
2059 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2060 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2061 hw->bus.device = pci_dev->addr.devid;
2062 hw->bus.func = pci_dev->addr.function;
2064 ret = ice_parse_devargs(dev);
2066 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2070 ice_init_controlq_parameter(hw);
2072 ret = ice_init_hw(hw);
2074 PMD_INIT_LOG(ERR, "Failed to initialize HW");
2078 #ifndef RTE_EXEC_ENV_WINDOWS
2081 pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
2083 if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0 ||
2084 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) {
2085 PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
2088 dsn = (uint64_t)dsn_high << 32 | dsn_low;
2091 PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
2094 ret = ice_load_pkg(pf->adapter, use_dsn, dsn);
2096 ret = ice_init_hw_tbls(hw);
2098 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", ret);
2099 rte_free(hw->pkg_copy);
2104 if (ad->devargs.safe_mode_support == 0) {
2105 PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2106 "Use safe-mode-support=1 to enter Safe Mode");
2110 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2111 "Entering Safe Mode");
2112 ad->is_safe_mode = 1;
2116 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2117 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2118 hw->api_maj_ver, hw->api_min_ver);
2120 ice_pf_sw_init(dev);
2121 ret = ice_init_mac_address(dev);
2123 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2127 ret = ice_res_pool_init(&pf->msix_pool, 1,
2128 hw->func_caps.common_cap.num_msix_vectors - 1);
2130 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2131 goto err_msix_pool_init;
2134 ret = ice_pf_setup(pf);
2136 PMD_INIT_LOG(ERR, "Failed to setup PF");
2140 ret = ice_send_driver_ver(hw);
2142 PMD_INIT_LOG(ERR, "Failed to send driver version");
2148 ret = ice_aq_stop_lldp(hw, true, false, NULL);
2149 if (ret != ICE_SUCCESS)
2150 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2151 ret = ice_init_dcb(hw, true);
2152 if (ret != ICE_SUCCESS)
2153 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2154 /* Forward LLDP packets to default VSI */
2155 ret = ice_vsi_config_sw_lldp(vsi, true);
2156 if (ret != ICE_SUCCESS)
2157 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2158 /* register callback func to eal lib */
2159 rte_intr_callback_register(intr_handle,
2160 ice_interrupt_handler, dev);
2162 ice_pf_enable_irq0(hw);
2164 /* enable uio intr after callback register */
2165 rte_intr_enable(intr_handle);
2167 /* get base queue pairs index in the device */
2168 ice_base_queue_get(pf);
2170 /* Initialize RSS context for gtpu_eh */
2171 ice_rss_ctx_init(pf);
2173 if (!ad->is_safe_mode) {
2174 ret = ice_flow_init(ad);
2176 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2181 ret = ice_reset_fxp_resource(hw);
2183 PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2187 pf->supported_rxdid = ice_get_supported_rxdid(hw);
2192 ice_res_pool_destroy(&pf->msix_pool);
2194 rte_free(dev->data->mac_addrs);
2195 dev->data->mac_addrs = NULL;
2197 ice_sched_cleanup_all(hw);
2198 rte_free(hw->port_info);
2199 ice_shutdown_all_ctrlq(hw);
2200 rte_free(pf->proto_xtr);
2206 ice_release_vsi(struct ice_vsi *vsi)
2209 struct ice_vsi_ctx vsi_ctx;
2210 enum ice_status ret;
2216 hw = ICE_VSI_TO_HW(vsi);
2218 ice_remove_all_mac_vlan_filters(vsi);
2220 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2222 vsi_ctx.vsi_num = vsi->vsi_id;
2223 vsi_ctx.info = vsi->info;
2224 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2225 if (ret != ICE_SUCCESS) {
2226 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2230 rte_free(vsi->rss_lut);
2231 rte_free(vsi->rss_key);
2237 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2239 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
2240 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2241 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2242 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2243 uint16_t msix_intr, i;
2245 /* disable interrupt and also clear all the exist config */
2246 for (i = 0; i < vsi->nb_qps; i++) {
2247 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2248 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2252 if (rte_intr_allow_others(intr_handle))
2254 for (i = 0; i < vsi->nb_msix; i++) {
2255 msix_intr = vsi->msix_intr + i;
2256 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2257 GLINT_DYN_CTL_WB_ON_ITR_M);
2261 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2265 ice_dev_stop(struct rte_eth_dev *dev)
2267 struct rte_eth_dev_data *data = dev->data;
2268 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2269 struct ice_vsi *main_vsi = pf->main_vsi;
2270 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2271 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2274 /* avoid stopping again */
2275 if (pf->adapter_stopped)
2278 /* stop and clear all Rx queues */
2279 for (i = 0; i < data->nb_rx_queues; i++)
2280 ice_rx_queue_stop(dev, i);
2282 /* stop and clear all Tx queues */
2283 for (i = 0; i < data->nb_tx_queues; i++)
2284 ice_tx_queue_stop(dev, i);
2286 /* disable all queue interrupts */
2287 ice_vsi_disable_queues_intr(main_vsi);
2289 if (pf->init_link_up)
2290 ice_dev_set_link_up(dev);
2292 ice_dev_set_link_down(dev);
2294 /* Clean datapath event and queue/vec mapping */
2295 rte_intr_efd_disable(intr_handle);
2296 if (intr_handle->intr_vec) {
2297 rte_free(intr_handle->intr_vec);
2298 intr_handle->intr_vec = NULL;
2301 pf->adapter_stopped = true;
2302 dev->data->dev_started = 0;
2308 ice_dev_close(struct rte_eth_dev *dev)
2310 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2311 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2312 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2313 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2314 struct ice_adapter *ad =
2315 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2318 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2321 /* Since stop will make link down, then the link event will be
2322 * triggered, disable the irq firstly to avoid the port_infoe etc
2323 * resources deallocation causing the interrupt service thread
2326 ice_pf_disable_irq0(hw);
2328 ret = ice_dev_stop(dev);
2330 if (!ad->is_safe_mode)
2331 ice_flow_uninit(ad);
2333 /* release all queue resource */
2334 ice_free_queues(dev);
2336 ice_res_pool_destroy(&pf->msix_pool);
2337 ice_release_vsi(pf->main_vsi);
2338 ice_sched_cleanup_all(hw);
2339 ice_free_hw_tbls(hw);
2340 rte_free(hw->port_info);
2341 hw->port_info = NULL;
2342 ice_shutdown_all_ctrlq(hw);
2343 rte_free(pf->proto_xtr);
2344 pf->proto_xtr = NULL;
2346 /* disable uio intr before callback unregister */
2347 rte_intr_disable(intr_handle);
2349 /* unregister callback func from eal lib */
2350 rte_intr_callback_unregister(intr_handle,
2351 ice_interrupt_handler, dev);
2357 ice_dev_uninit(struct rte_eth_dev *dev)
2365 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
2367 return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false;
2371 hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
2376 cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
2380 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2382 enum ice_status status = ICE_SUCCESS;
2383 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2384 struct ice_vsi *vsi = pf->main_vsi;
2386 if (!is_hash_cfg_valid(cfg))
2389 status = ice_rem_rss_cfg(hw, vsi->idx, cfg);
2390 if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2392 "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
2401 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2403 enum ice_status status = ICE_SUCCESS;
2404 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2405 struct ice_vsi *vsi = pf->main_vsi;
2407 if (!is_hash_cfg_valid(cfg))
2410 status = ice_add_rss_cfg(hw, vsi->idx, cfg);
2413 "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
2422 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2426 ret = ice_hash_moveout(pf, cfg);
2427 if (ret && (ret != -ENOENT))
2430 hash_cfg_reset(cfg);
2436 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2442 case ICE_HASH_GTPU_CTX_EH_IP:
2443 ret = ice_hash_remove(pf,
2444 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2445 if (ret && (ret != -ENOENT))
2448 ret = ice_hash_remove(pf,
2449 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2450 if (ret && (ret != -ENOENT))
2453 ret = ice_hash_remove(pf,
2454 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2455 if (ret && (ret != -ENOENT))
2458 ret = ice_hash_remove(pf,
2459 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2460 if (ret && (ret != -ENOENT))
2463 ret = ice_hash_remove(pf,
2464 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2465 if (ret && (ret != -ENOENT))
2468 ret = ice_hash_remove(pf,
2469 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2470 if (ret && (ret != -ENOENT))
2473 ret = ice_hash_remove(pf,
2474 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2475 if (ret && (ret != -ENOENT))
2478 ret = ice_hash_remove(pf,
2479 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2480 if (ret && (ret != -ENOENT))
2484 case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2485 ret = ice_hash_remove(pf,
2486 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2487 if (ret && (ret != -ENOENT))
2490 ret = ice_hash_remove(pf,
2491 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2492 if (ret && (ret != -ENOENT))
2495 ret = ice_hash_moveout(pf,
2496 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2497 if (ret && (ret != -ENOENT))
2500 ret = ice_hash_moveout(pf,
2501 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2502 if (ret && (ret != -ENOENT))
2505 ret = ice_hash_moveout(pf,
2506 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2507 if (ret && (ret != -ENOENT))
2510 ret = ice_hash_moveout(pf,
2511 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2512 if (ret && (ret != -ENOENT))
2516 case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2517 ret = ice_hash_remove(pf,
2518 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2519 if (ret && (ret != -ENOENT))
2522 ret = ice_hash_remove(pf,
2523 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2524 if (ret && (ret != -ENOENT))
2527 ret = ice_hash_moveout(pf,
2528 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2529 if (ret && (ret != -ENOENT))
2532 ret = ice_hash_moveout(pf,
2533 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2534 if (ret && (ret != -ENOENT))
2537 ret = ice_hash_moveout(pf,
2538 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2539 if (ret && (ret != -ENOENT))
2542 ret = ice_hash_moveout(pf,
2543 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2544 if (ret && (ret != -ENOENT))
2548 case ICE_HASH_GTPU_CTX_UP_IP:
2549 ret = ice_hash_remove(pf,
2550 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2551 if (ret && (ret != -ENOENT))
2554 ret = ice_hash_remove(pf,
2555 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2556 if (ret && (ret != -ENOENT))
2559 ret = ice_hash_moveout(pf,
2560 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2561 if (ret && (ret != -ENOENT))
2564 ret = ice_hash_moveout(pf,
2565 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2566 if (ret && (ret != -ENOENT))
2569 ret = ice_hash_moveout(pf,
2570 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2571 if (ret && (ret != -ENOENT))
2575 case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2576 case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2577 ret = ice_hash_moveout(pf,
2578 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2579 if (ret && (ret != -ENOENT))
2582 ret = ice_hash_moveout(pf,
2583 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2584 if (ret && (ret != -ENOENT))
2587 ret = ice_hash_moveout(pf,
2588 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2589 if (ret && (ret != -ENOENT))
2593 case ICE_HASH_GTPU_CTX_DW_IP:
2594 ret = ice_hash_remove(pf,
2595 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2596 if (ret && (ret != -ENOENT))
2599 ret = ice_hash_remove(pf,
2600 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2601 if (ret && (ret != -ENOENT))
2604 ret = ice_hash_moveout(pf,
2605 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2606 if (ret && (ret != -ENOENT))
2609 ret = ice_hash_moveout(pf,
2610 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2611 if (ret && (ret != -ENOENT))
2614 ret = ice_hash_moveout(pf,
2615 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2616 if (ret && (ret != -ENOENT))
2620 case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2621 case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2622 ret = ice_hash_moveout(pf,
2623 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2624 if (ret && (ret != -ENOENT))
2627 ret = ice_hash_moveout(pf,
2628 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2629 if (ret && (ret != -ENOENT))
2632 ret = ice_hash_moveout(pf,
2633 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2634 if (ret && (ret != -ENOENT))
2645 static u8 calc_gtpu_ctx_idx(uint32_t hdr)
2649 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
2651 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
2653 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
2656 return ICE_HASH_GTPU_CTX_MAX;
2659 if (hdr & ICE_FLOW_SEG_HDR_UDP)
2661 else if (hdr & ICE_FLOW_SEG_HDR_TCP)
2664 if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
2665 return eh_idx * 3 + ip_idx;
2667 return ICE_HASH_GTPU_CTX_MAX;
2671 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2673 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2675 if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2676 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
2678 else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2679 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
2686 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2687 u8 ctx_idx, struct ice_rss_hash_cfg *cfg)
2691 if (ctx_idx < ICE_HASH_GTPU_CTX_MAX)
2692 ctx->ctx[ctx_idx] = *cfg;
2695 case ICE_HASH_GTPU_CTX_EH_IP:
2697 case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2698 ret = ice_hash_moveback(pf,
2699 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2700 if (ret && (ret != -ENOENT))
2703 ret = ice_hash_moveback(pf,
2704 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2705 if (ret && (ret != -ENOENT))
2708 ret = ice_hash_moveback(pf,
2709 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2710 if (ret && (ret != -ENOENT))
2713 ret = ice_hash_moveback(pf,
2714 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2715 if (ret && (ret != -ENOENT))
2719 case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2720 ret = ice_hash_moveback(pf,
2721 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2722 if (ret && (ret != -ENOENT))
2725 ret = ice_hash_moveback(pf,
2726 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2727 if (ret && (ret != -ENOENT))
2730 ret = ice_hash_moveback(pf,
2731 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2732 if (ret && (ret != -ENOENT))
2735 ret = ice_hash_moveback(pf,
2736 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2737 if (ret && (ret != -ENOENT))
2741 case ICE_HASH_GTPU_CTX_UP_IP:
2742 case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2743 case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2744 case ICE_HASH_GTPU_CTX_DW_IP:
2745 case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2746 case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2747 ret = ice_hash_moveback(pf,
2748 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2749 if (ret && (ret != -ENOENT))
2752 ret = ice_hash_moveback(pf,
2753 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2754 if (ret && (ret != -ENOENT))
2757 ret = ice_hash_moveback(pf,
2758 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2759 if (ret && (ret != -ENOENT))
2771 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2773 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs);
2775 if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
2776 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4,
2778 else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
2779 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6,
2786 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2788 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2790 if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
2793 if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2794 hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
2795 else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2796 hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
2800 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2801 struct ice_rss_hash_cfg *cfg)
2803 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2806 ret = ice_rem_rss_cfg(hw, vsi_id, cfg);
2807 if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2808 PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2810 ice_rem_rss_cfg_post(pf, cfg->addl_hdrs);
2816 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2817 struct ice_rss_hash_cfg *cfg)
2819 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2822 ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs);
2824 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2826 ret = ice_add_rss_cfg(hw, vsi_id, cfg);
2828 PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2830 ret = ice_add_rss_cfg_post(pf, cfg);
2832 PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2838 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2840 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2841 struct ice_vsi *vsi = pf->main_vsi;
2842 struct ice_rss_hash_cfg cfg;
2845 #define ICE_RSS_HF_ALL ( \
2848 ETH_RSS_NONFRAG_IPV4_UDP | \
2849 ETH_RSS_NONFRAG_IPV6_UDP | \
2850 ETH_RSS_NONFRAG_IPV4_TCP | \
2851 ETH_RSS_NONFRAG_IPV6_TCP | \
2852 ETH_RSS_NONFRAG_IPV4_SCTP | \
2853 ETH_RSS_NONFRAG_IPV6_SCTP | \
2854 ETH_RSS_FRAG_IPV4 | \
2857 ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
2859 PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d",
2863 cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
2864 /* Configure RSS for IPv4 with src/dst addr as input set */
2865 if (rss_hf & ETH_RSS_IPV4) {
2866 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2867 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2868 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2870 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
2874 /* Configure RSS for IPv6 with src/dst addr as input set */
2875 if (rss_hf & ETH_RSS_IPV6) {
2876 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2877 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2878 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2880 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
2884 /* Configure RSS for udp4 with src/dst addr and port as input set */
2885 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2886 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
2887 ICE_FLOW_SEG_HDR_IPV_OTHER;
2888 cfg.hash_flds = ICE_HASH_UDP_IPV4;
2889 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2891 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
2895 /* Configure RSS for udp6 with src/dst addr and port as input set */
2896 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2897 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
2898 ICE_FLOW_SEG_HDR_IPV_OTHER;
2899 cfg.hash_flds = ICE_HASH_UDP_IPV6;
2900 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2902 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
2906 /* Configure RSS for tcp4 with src/dst addr and port as input set */
2907 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2908 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
2909 ICE_FLOW_SEG_HDR_IPV_OTHER;
2910 cfg.hash_flds = ICE_HASH_TCP_IPV4;
2911 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2913 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
2917 /* Configure RSS for tcp6 with src/dst addr and port as input set */
2918 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2919 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
2920 ICE_FLOW_SEG_HDR_IPV_OTHER;
2921 cfg.hash_flds = ICE_HASH_TCP_IPV6;
2922 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2924 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
2928 /* Configure RSS for sctp4 with src/dst addr and port as input set */
2929 if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2930 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
2931 ICE_FLOW_SEG_HDR_IPV_OTHER;
2932 cfg.hash_flds = ICE_HASH_SCTP_IPV4;
2933 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2935 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2939 /* Configure RSS for sctp6 with src/dst addr and port as input set */
2940 if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2941 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
2942 ICE_FLOW_SEG_HDR_IPV_OTHER;
2943 cfg.hash_flds = ICE_HASH_SCTP_IPV6;
2944 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2946 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2950 if (rss_hf & ETH_RSS_IPV4) {
2951 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
2952 ICE_FLOW_SEG_HDR_IPV_OTHER;
2953 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2954 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2956 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
2960 if (rss_hf & ETH_RSS_IPV6) {
2961 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
2962 ICE_FLOW_SEG_HDR_IPV_OTHER;
2963 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2964 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2966 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
2970 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2971 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
2972 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2973 cfg.hash_flds = ICE_HASH_UDP_IPV4;
2974 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2976 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
2980 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2981 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
2982 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2983 cfg.hash_flds = ICE_HASH_UDP_IPV6;
2984 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2986 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
2990 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2991 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
2992 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2993 cfg.hash_flds = ICE_HASH_TCP_IPV4;
2994 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2996 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
3000 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
3001 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3002 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3003 cfg.hash_flds = ICE_HASH_TCP_IPV6;
3004 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3006 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
3010 if (rss_hf & ETH_RSS_FRAG_IPV4) {
3011 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_FRAG;
3012 cfg.hash_flds = ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
3013 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3015 PMD_DRV_LOG(ERR, "%s IPV4_FRAG rss flow fail %d",
3019 if (rss_hf & ETH_RSS_FRAG_IPV6) {
3020 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_FRAG;
3021 cfg.hash_flds = ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
3022 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3024 PMD_DRV_LOG(ERR, "%s IPV6_FRAG rss flow fail %d",
3028 pf->rss_hf = rss_hf & ICE_RSS_HF_ALL;
3032 ice_get_default_rss_key(uint8_t *rss_key, uint32_t rss_key_size)
3034 static struct ice_aqc_get_set_rss_keys default_key;
3035 static bool default_key_done;
3036 uint8_t *key = (uint8_t *)&default_key;
3039 if (rss_key_size > sizeof(default_key)) {
3040 PMD_DRV_LOG(WARNING,
3041 "requested size %u is larger than default %zu, "
3042 "only %zu bytes are gotten for key\n",
3043 rss_key_size, sizeof(default_key),
3044 sizeof(default_key));
3047 if (!default_key_done) {
3048 /* Calculate the default hash key */
3049 for (i = 0; i < sizeof(default_key); i++)
3050 key[i] = (uint8_t)rte_rand();
3051 default_key_done = true;
3053 rte_memcpy(rss_key, key, RTE_MIN(rss_key_size, sizeof(default_key)));
3056 static int ice_init_rss(struct ice_pf *pf)
3058 struct ice_hw *hw = ICE_PF_TO_HW(pf);
3059 struct ice_vsi *vsi = pf->main_vsi;
3060 struct rte_eth_dev_data *dev_data = pf->dev_data;
3061 struct ice_aq_get_set_rss_lut_params lut_params;
3062 struct rte_eth_rss_conf *rss_conf;
3063 struct ice_aqc_get_set_rss_keys key;
3066 bool is_safe_mode = pf->adapter->is_safe_mode;
3069 rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
3070 nb_q = dev_data->nb_rx_queues;
3071 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3072 vsi->rss_lut_size = pf->hash_lut_size;
3075 PMD_DRV_LOG(WARNING,
3076 "RSS is not supported as rx queues number is zero\n");
3081 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3085 if (!vsi->rss_key) {
3086 vsi->rss_key = rte_zmalloc(NULL,
3087 vsi->rss_key_size, 0);
3088 if (vsi->rss_key == NULL) {
3089 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3093 if (!vsi->rss_lut) {
3094 vsi->rss_lut = rte_zmalloc(NULL,
3095 vsi->rss_lut_size, 0);
3096 if (vsi->rss_lut == NULL) {
3097 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3098 rte_free(vsi->rss_key);
3099 vsi->rss_key = NULL;
3103 /* configure RSS key */
3104 if (!rss_conf->rss_key)
3105 ice_get_default_rss_key(vsi->rss_key, vsi->rss_key_size);
3107 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3108 RTE_MIN(rss_conf->rss_key_len,
3109 vsi->rss_key_size));
3111 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3112 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3116 /* init RSS LUT table */
3117 for (i = 0; i < vsi->rss_lut_size; i++)
3118 vsi->rss_lut[i] = i % nb_q;
3120 lut_params.vsi_handle = vsi->idx;
3121 lut_params.lut_size = vsi->rss_lut_size;
3122 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
3123 lut_params.lut = vsi->rss_lut;
3124 lut_params.global_lut_id = 0;
3125 ret = ice_aq_set_rss_lut(hw, &lut_params);
3129 /* Enable registers for symmetric_toeplitz function. */
3130 reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3131 reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3132 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3133 ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3135 /* RSS hash configuration */
3136 ice_rss_hash_set(pf, rss_conf->rss_hf);
3140 rte_free(vsi->rss_key);
3141 vsi->rss_key = NULL;
3142 rte_free(vsi->rss_lut);
3143 vsi->rss_lut = NULL;
3148 ice_dev_configure(struct rte_eth_dev *dev)
3150 struct ice_adapter *ad =
3151 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3152 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3155 /* Initialize to TRUE. If any of Rx queues doesn't meet the
3156 * bulk allocation or vector Rx preconditions we will reset it.
3158 ad->rx_bulk_alloc_allowed = true;
3159 ad->tx_simple_allowed = true;
3161 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
3162 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
3164 if (dev->data->nb_rx_queues) {
3165 ret = ice_init_rss(pf);
3167 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3176 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3177 int base_queue, int nb_queue)
3179 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3180 uint32_t val, val_tx;
3183 for (i = 0; i < nb_queue; i++) {
3185 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3186 (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3187 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3188 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3190 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3191 base_queue + i, msix_vect);
3192 /* set ITR0 value */
3193 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
3194 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3195 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3200 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3202 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3203 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3204 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3205 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3206 uint16_t msix_vect = vsi->msix_intr;
3207 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
3208 uint16_t queue_idx = 0;
3212 /* clear Rx/Tx queue interrupt */
3213 for (i = 0; i < vsi->nb_used_qps; i++) {
3214 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3215 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3218 /* PF bind interrupt */
3219 if (rte_intr_dp_is_en(intr_handle)) {
3224 for (i = 0; i < vsi->nb_used_qps; i++) {
3226 if (!rte_intr_allow_others(intr_handle))
3227 msix_vect = ICE_MISC_VEC_ID;
3229 /* uio mapping all queue to one msix_vect */
3230 __vsi_queues_bind_intr(vsi, msix_vect,
3231 vsi->base_queue + i,
3232 vsi->nb_used_qps - i);
3234 for (; !!record && i < vsi->nb_used_qps; i++)
3235 intr_handle->intr_vec[queue_idx + i] =
3240 /* vfio 1:1 queue/msix_vect mapping */
3241 __vsi_queues_bind_intr(vsi, msix_vect,
3242 vsi->base_queue + i, 1);
3245 intr_handle->intr_vec[queue_idx + i] = msix_vect;
3253 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3255 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3256 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3257 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3258 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3259 uint16_t msix_intr, i;
3261 if (rte_intr_allow_others(intr_handle))
3262 for (i = 0; i < vsi->nb_used_qps; i++) {
3263 msix_intr = vsi->msix_intr + i;
3264 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3265 GLINT_DYN_CTL_INTENA_M |
3266 GLINT_DYN_CTL_CLEARPBA_M |
3267 GLINT_DYN_CTL_ITR_INDX_M |
3268 GLINT_DYN_CTL_WB_ON_ITR_M);
3271 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3272 GLINT_DYN_CTL_INTENA_M |
3273 GLINT_DYN_CTL_CLEARPBA_M |
3274 GLINT_DYN_CTL_ITR_INDX_M |
3275 GLINT_DYN_CTL_WB_ON_ITR_M);
3279 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3281 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3282 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3283 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3284 struct ice_vsi *vsi = pf->main_vsi;
3285 uint32_t intr_vector = 0;
3287 rte_intr_disable(intr_handle);
3289 /* check and configure queue intr-vector mapping */
3290 if ((rte_intr_cap_multiple(intr_handle) ||
3291 !RTE_ETH_DEV_SRIOV(dev).active) &&
3292 dev->data->dev_conf.intr_conf.rxq != 0) {
3293 intr_vector = dev->data->nb_rx_queues;
3294 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3295 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3296 ICE_MAX_INTR_QUEUE_NUM);
3299 if (rte_intr_efd_enable(intr_handle, intr_vector))
3303 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3304 intr_handle->intr_vec =
3305 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
3307 if (!intr_handle->intr_vec) {
3309 "Failed to allocate %d rx_queues intr_vec",
3310 dev->data->nb_rx_queues);
3315 /* Map queues with MSIX interrupt */
3316 vsi->nb_used_qps = dev->data->nb_rx_queues;
3317 ice_vsi_queues_bind_intr(vsi);
3319 /* Enable interrupts for all the queues */
3320 ice_vsi_enable_queues_intr(vsi);
3322 rte_intr_enable(intr_handle);
3328 ice_get_init_link_status(struct rte_eth_dev *dev)
3330 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3331 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3332 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3333 struct ice_link_status link_status;
3336 ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3337 &link_status, NULL);
3338 if (ret != ICE_SUCCESS) {
3339 PMD_DRV_LOG(ERR, "Failed to get link info");
3340 pf->init_link_up = false;
3344 if (link_status.link_info & ICE_AQ_LINK_UP)
3345 pf->init_link_up = true;
3349 ice_dev_start(struct rte_eth_dev *dev)
3351 struct rte_eth_dev_data *data = dev->data;
3352 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3353 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3354 struct ice_vsi *vsi = pf->main_vsi;
3355 uint16_t nb_rxq = 0;
3357 uint16_t max_frame_size;
3360 /* program Tx queues' context in hardware */
3361 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3362 ret = ice_tx_queue_start(dev, nb_txq);
3364 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3369 /* program Rx queues' context in hardware*/
3370 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3371 ret = ice_rx_queue_start(dev, nb_rxq);
3373 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3378 ice_set_rx_function(dev);
3379 ice_set_tx_function(dev);
3381 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
3382 ETH_VLAN_EXTEND_MASK;
3383 ret = ice_vlan_offload_set(dev, mask);
3385 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3389 /* enable Rx interrput and mapping Rx queue to interrupt vector */
3390 if (ice_rxq_intr_setup(dev))
3393 /* Enable receiving broadcast packets and transmitting packets */
3394 ret = ice_set_vsi_promisc(hw, vsi->idx,
3395 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3396 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3398 if (ret != ICE_SUCCESS)
3399 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3401 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3402 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3403 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3404 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3405 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3406 ICE_AQ_LINK_EVENT_AN_COMPLETED |
3407 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3409 if (ret != ICE_SUCCESS)
3410 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3412 ice_get_init_link_status(dev);
3414 ice_dev_set_link_up(dev);
3416 /* Call get_link_info aq commond to enable/disable LSE */
3417 ice_link_update(dev, 0);
3419 pf->adapter_stopped = false;
3421 /* Set the max frame size to default value*/
3422 max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3423 pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3426 /* Set the max frame size to HW*/
3427 ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3431 /* stop the started queues if failed to start all queues */
3433 for (i = 0; i < nb_rxq; i++)
3434 ice_rx_queue_stop(dev, i);
3436 for (i = 0; i < nb_txq; i++)
3437 ice_tx_queue_stop(dev, i);
3443 ice_dev_reset(struct rte_eth_dev *dev)
3447 if (dev->data->sriov.active)
3450 ret = ice_dev_uninit(dev);
3452 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3456 ret = ice_dev_init(dev);
3458 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3466 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3468 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3469 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3470 struct ice_vsi *vsi = pf->main_vsi;
3471 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3472 bool is_safe_mode = pf->adapter->is_safe_mode;
3476 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3477 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3478 dev_info->max_rx_queues = vsi->nb_qps;
3479 dev_info->max_tx_queues = vsi->nb_qps;
3480 dev_info->max_mac_addrs = vsi->max_macaddrs;
3481 dev_info->max_vfs = pci_dev->max_vfs;
3482 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3483 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3485 dev_info->rx_offload_capa =
3486 DEV_RX_OFFLOAD_VLAN_STRIP |
3487 DEV_RX_OFFLOAD_JUMBO_FRAME |
3488 DEV_RX_OFFLOAD_KEEP_CRC |
3489 DEV_RX_OFFLOAD_SCATTER |
3490 DEV_RX_OFFLOAD_VLAN_FILTER;
3491 dev_info->tx_offload_capa =
3492 DEV_TX_OFFLOAD_VLAN_INSERT |
3493 DEV_TX_OFFLOAD_TCP_TSO |
3494 DEV_TX_OFFLOAD_MULTI_SEGS |
3495 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3496 dev_info->flow_type_rss_offloads = 0;
3498 if (!is_safe_mode) {
3499 dev_info->rx_offload_capa |=
3500 DEV_RX_OFFLOAD_IPV4_CKSUM |
3501 DEV_RX_OFFLOAD_UDP_CKSUM |
3502 DEV_RX_OFFLOAD_TCP_CKSUM |
3503 DEV_RX_OFFLOAD_QINQ_STRIP |
3504 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3505 DEV_RX_OFFLOAD_VLAN_EXTEND |
3506 DEV_RX_OFFLOAD_RSS_HASH;
3507 dev_info->tx_offload_capa |=
3508 DEV_TX_OFFLOAD_QINQ_INSERT |
3509 DEV_TX_OFFLOAD_IPV4_CKSUM |
3510 DEV_TX_OFFLOAD_UDP_CKSUM |
3511 DEV_TX_OFFLOAD_TCP_CKSUM |
3512 DEV_TX_OFFLOAD_SCTP_CKSUM |
3513 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3514 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3515 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3518 dev_info->rx_queue_offload_capa = 0;
3519 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3521 dev_info->reta_size = pf->hash_lut_size;
3522 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3524 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3526 .pthresh = ICE_DEFAULT_RX_PTHRESH,
3527 .hthresh = ICE_DEFAULT_RX_HTHRESH,
3528 .wthresh = ICE_DEFAULT_RX_WTHRESH,
3530 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3535 dev_info->default_txconf = (struct rte_eth_txconf) {
3537 .pthresh = ICE_DEFAULT_TX_PTHRESH,
3538 .hthresh = ICE_DEFAULT_TX_HTHRESH,
3539 .wthresh = ICE_DEFAULT_TX_WTHRESH,
3541 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3542 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3546 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3547 .nb_max = ICE_MAX_RING_DESC,
3548 .nb_min = ICE_MIN_RING_DESC,
3549 .nb_align = ICE_ALIGN_RING_DESC,
3552 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3553 .nb_max = ICE_MAX_RING_DESC,
3554 .nb_min = ICE_MIN_RING_DESC,
3555 .nb_align = ICE_ALIGN_RING_DESC,
3558 dev_info->speed_capa = ETH_LINK_SPEED_10M |
3559 ETH_LINK_SPEED_100M |
3561 ETH_LINK_SPEED_2_5G |
3563 ETH_LINK_SPEED_10G |
3564 ETH_LINK_SPEED_20G |
3567 phy_type_low = hw->port_info->phy.phy_type_low;
3568 phy_type_high = hw->port_info->phy.phy_type_high;
3570 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3571 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3573 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3574 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3575 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3577 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3578 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3580 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3581 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3582 dev_info->default_rxportconf.nb_queues = 1;
3583 dev_info->default_txportconf.nb_queues = 1;
3584 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3585 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3591 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3592 struct rte_eth_link *link)
3594 struct rte_eth_link *dst = link;
3595 struct rte_eth_link *src = &dev->data->dev_link;
3597 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3598 *(uint64_t *)src) == 0)
3605 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3606 struct rte_eth_link *link)
3608 struct rte_eth_link *dst = &dev->data->dev_link;
3609 struct rte_eth_link *src = link;
3611 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3612 *(uint64_t *)src) == 0)
3619 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3621 #define CHECK_INTERVAL 100 /* 100ms */
3622 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
3623 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3624 struct ice_link_status link_status;
3625 struct rte_eth_link link, old;
3627 unsigned int rep_cnt = MAX_REPEAT_TIME;
3628 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3630 memset(&link, 0, sizeof(link));
3631 memset(&old, 0, sizeof(old));
3632 memset(&link_status, 0, sizeof(link_status));
3633 ice_atomic_read_link_status(dev, &old);
3636 /* Get link status information from hardware */
3637 status = ice_aq_get_link_info(hw->port_info, enable_lse,
3638 &link_status, NULL);
3639 if (status != ICE_SUCCESS) {
3640 link.link_speed = ETH_SPEED_NUM_100M;
3641 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3642 PMD_DRV_LOG(ERR, "Failed to get link info");
3646 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3647 if (!wait_to_complete || link.link_status)
3650 rte_delay_ms(CHECK_INTERVAL);
3651 } while (--rep_cnt);
3653 if (!link.link_status)
3656 /* Full-duplex operation at all supported speeds */
3657 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3659 /* Parse the link status */
3660 switch (link_status.link_speed) {
3661 case ICE_AQ_LINK_SPEED_10MB:
3662 link.link_speed = ETH_SPEED_NUM_10M;
3664 case ICE_AQ_LINK_SPEED_100MB:
3665 link.link_speed = ETH_SPEED_NUM_100M;
3667 case ICE_AQ_LINK_SPEED_1000MB:
3668 link.link_speed = ETH_SPEED_NUM_1G;
3670 case ICE_AQ_LINK_SPEED_2500MB:
3671 link.link_speed = ETH_SPEED_NUM_2_5G;
3673 case ICE_AQ_LINK_SPEED_5GB:
3674 link.link_speed = ETH_SPEED_NUM_5G;
3676 case ICE_AQ_LINK_SPEED_10GB:
3677 link.link_speed = ETH_SPEED_NUM_10G;
3679 case ICE_AQ_LINK_SPEED_20GB:
3680 link.link_speed = ETH_SPEED_NUM_20G;
3682 case ICE_AQ_LINK_SPEED_25GB:
3683 link.link_speed = ETH_SPEED_NUM_25G;
3685 case ICE_AQ_LINK_SPEED_40GB:
3686 link.link_speed = ETH_SPEED_NUM_40G;
3688 case ICE_AQ_LINK_SPEED_50GB:
3689 link.link_speed = ETH_SPEED_NUM_50G;
3691 case ICE_AQ_LINK_SPEED_100GB:
3692 link.link_speed = ETH_SPEED_NUM_100G;
3694 case ICE_AQ_LINK_SPEED_UNKNOWN:
3695 PMD_DRV_LOG(ERR, "Unknown link speed");
3696 link.link_speed = ETH_SPEED_NUM_UNKNOWN;
3699 PMD_DRV_LOG(ERR, "None link speed");
3700 link.link_speed = ETH_SPEED_NUM_NONE;
3704 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3705 ETH_LINK_SPEED_FIXED);
3708 ice_atomic_write_link_status(dev, &link);
3709 if (link.link_status == old.link_status)
3715 /* Force the physical link state by getting the current PHY capabilities from
3716 * hardware and setting the PHY config based on the determined capabilities. If
3717 * link changes, link event will be triggered because both the Enable Automatic
3718 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3720 static enum ice_status
3721 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3723 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3724 struct ice_aqc_get_phy_caps_data *pcaps;
3725 struct ice_port_info *pi;
3726 enum ice_status status;
3728 if (!hw || !hw->port_info)
3729 return ICE_ERR_PARAM;
3733 pcaps = (struct ice_aqc_get_phy_caps_data *)
3734 ice_malloc(hw, sizeof(*pcaps));
3736 return ICE_ERR_NO_MEMORY;
3738 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3743 /* No change in link */
3744 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3745 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3748 cfg.phy_type_low = pcaps->phy_type_low;
3749 cfg.phy_type_high = pcaps->phy_type_high;
3750 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3751 cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3752 cfg.eee_cap = pcaps->eee_cap;
3753 cfg.eeer_value = pcaps->eeer_value;
3754 cfg.link_fec_opt = pcaps->link_fec_options;
3756 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3758 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3760 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3763 ice_free(hw, pcaps);
3768 ice_dev_set_link_up(struct rte_eth_dev *dev)
3770 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3772 return ice_force_phys_link_state(hw, true);
3776 ice_dev_set_link_down(struct rte_eth_dev *dev)
3778 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3780 return ice_force_phys_link_state(hw, false);
3784 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3786 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3787 struct rte_eth_dev_data *dev_data = pf->dev_data;
3788 uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3790 /* check if mtu is within the allowed range */
3791 if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3794 /* mtu setting is forbidden if port is start */
3795 if (dev_data->dev_started) {
3797 "port %d must be stopped before configuration",
3802 if (frame_size > ICE_ETH_MAX_LEN)
3803 dev_data->dev_conf.rxmode.offloads |=
3804 DEV_RX_OFFLOAD_JUMBO_FRAME;
3806 dev_data->dev_conf.rxmode.offloads &=
3807 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3809 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3814 static int ice_macaddr_set(struct rte_eth_dev *dev,
3815 struct rte_ether_addr *mac_addr)
3817 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3818 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3819 struct ice_vsi *vsi = pf->main_vsi;
3820 struct ice_mac_filter *f;
3824 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3825 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3829 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3830 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3835 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3839 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3840 if (ret != ICE_SUCCESS) {
3841 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3844 ret = ice_add_mac_filter(vsi, mac_addr);
3845 if (ret != ICE_SUCCESS) {
3846 PMD_DRV_LOG(ERR, "Failed to add mac filter");
3849 rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3851 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3852 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3853 if (ret != ICE_SUCCESS)
3854 PMD_DRV_LOG(ERR, "Failed to set manage mac");
3859 /* Add a MAC address, and update filters */
3861 ice_macaddr_add(struct rte_eth_dev *dev,
3862 struct rte_ether_addr *mac_addr,
3863 __rte_unused uint32_t index,
3864 __rte_unused uint32_t pool)
3866 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3867 struct ice_vsi *vsi = pf->main_vsi;
3870 ret = ice_add_mac_filter(vsi, mac_addr);
3871 if (ret != ICE_SUCCESS) {
3872 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3879 /* Remove a MAC address, and update filters */
3881 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3883 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3884 struct ice_vsi *vsi = pf->main_vsi;
3885 struct rte_eth_dev_data *data = dev->data;
3886 struct rte_ether_addr *macaddr;
3889 macaddr = &data->mac_addrs[index];
3890 ret = ice_remove_mac_filter(vsi, macaddr);
3892 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3898 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3900 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3901 struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id);
3902 struct ice_vsi *vsi = pf->main_vsi;
3905 PMD_INIT_FUNC_TRACE();
3908 * Vlan 0 is the generic filter for untagged packets
3909 * and can't be removed or added by user.
3915 ret = ice_add_vlan_filter(vsi, &vlan);
3917 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3921 ret = ice_remove_vlan_filter(vsi, &vlan);
3923 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3931 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are
3932 * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8)
3933 * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3934 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3936 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3937 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3938 * traffic in SVM, since the VLAN TPID isn't part of filtering.
3940 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3941 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3942 * part of filtering.
3945 ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3947 struct ice_vlan vlan;
3950 vlan = ICE_VLAN(0, 0);
3951 err = ice_add_vlan_filter(vsi, &vlan);
3953 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0");
3957 /* in SVM both VLAN 0 filters are identical */
3958 if (!ice_is_dvm_ena(&vsi->adapter->hw))
3961 vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
3962 err = ice_add_vlan_filter(vsi, &vlan);
3964 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode");
3972 * Delete the VLAN 0 filters in the same manner that they were added in
3973 * ice_vsi_add_vlan_zero.
3976 ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3978 struct ice_vlan vlan;
3981 vlan = ICE_VLAN(0, 0);
3982 err = ice_remove_vlan_filter(vsi, &vlan);
3984 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0");
3988 /* in SVM both VLAN 0 filters are identical */
3989 if (!ice_is_dvm_ena(&vsi->adapter->hw))
3992 vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
3993 err = ice_remove_vlan_filter(vsi, &vlan);
3995 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode");
4002 /* Configure vlan filter on or off */
4004 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
4006 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4007 struct ice_vsi_ctx ctxt;
4011 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4014 vsi->info.sw_flags2 |= sw_flags2;
4016 vsi->info.sw_flags2 &= ~sw_flags2;
4018 vsi->info.sw_id = hw->port_info->sw_id;
4019 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4020 ctxt.info.valid_sections =
4021 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4022 ICE_AQ_VSI_PROP_SECURITY_VALID);
4023 ctxt.vsi_num = vsi->vsi_id;
4025 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4027 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
4028 on ? "enable" : "disable");
4031 vsi->info.valid_sections |=
4032 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4033 ICE_AQ_VSI_PROP_SECURITY_VALID);
4036 /* consist with other drivers, allow untagged packet when vlan filter on */
4038 ret = ice_vsi_add_vlan_zero(vsi);
4040 ret = ice_vsi_del_vlan_zero(vsi);
4045 /* Manage VLAN stripping for the VSI for Rx */
4047 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4049 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4050 struct ice_vsi_ctx ctxt;
4051 enum ice_status status;
4054 /* do not allow modifying VLAN stripping when a port VLAN is configured
4057 if (vsi->info.port_based_inner_vlan)
4060 memset(&ctxt, 0, sizeof(ctxt));
4063 /* Strip VLAN tag from Rx packet and put it in the desc */
4064 ctxt.info.inner_vlan_flags =
4065 ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
4067 /* Disable stripping. Leave tag in packet */
4068 ctxt.info.inner_vlan_flags =
4069 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4071 /* Allow all packets untagged/tagged */
4072 ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
4074 ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4076 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4078 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping",
4079 ena ? "enable" : "disable");
4082 vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags;
4089 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi)
4091 return ice_vsi_manage_vlan_stripping(vsi, true);
4095 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
4097 return ice_vsi_manage_vlan_stripping(vsi, false);
4100 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi)
4102 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4103 struct ice_vsi_ctx ctxt;
4104 enum ice_status status;
4107 /* do not allow modifying VLAN stripping when a port VLAN is configured
4110 if (vsi->info.port_based_outer_vlan)
4113 memset(&ctxt, 0, sizeof(ctxt));
4115 ctxt.info.valid_sections =
4116 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4117 /* clear current outer VLAN strip settings */
4118 ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4119 ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
4120 ctxt.info.outer_vlan_flags |=
4121 (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
4122 ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
4123 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
4124 ICE_AQ_VSI_OUTER_TAG_TYPE_S);
4126 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4128 PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping");
4131 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4138 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
4140 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4141 struct ice_vsi_ctx ctxt;
4142 enum ice_status status;
4145 if (vsi->info.port_based_outer_vlan)
4148 memset(&ctxt, 0, sizeof(ctxt));
4150 ctxt.info.valid_sections =
4151 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4152 /* clear current outer VLAN strip settings */
4153 ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4154 ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
4155 ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
4156 ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
4158 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4160 PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping");
4163 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4170 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena)
4172 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4175 if (ice_is_dvm_ena(hw)) {
4177 ret = ice_vsi_ena_outer_stripping(vsi);
4179 ret = ice_vsi_dis_outer_stripping(vsi);
4182 ret = ice_vsi_ena_inner_stripping(vsi);
4184 ret = ice_vsi_dis_inner_stripping(vsi);
4191 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4193 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4194 struct ice_vsi *vsi = pf->main_vsi;
4195 struct rte_eth_rxmode *rxmode;
4197 rxmode = &dev->data->dev_conf.rxmode;
4198 if (mask & ETH_VLAN_FILTER_MASK) {
4199 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4200 ice_vsi_config_vlan_filter(vsi, true);
4202 ice_vsi_config_vlan_filter(vsi, false);
4205 if (mask & ETH_VLAN_STRIP_MASK) {
4206 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4207 ice_vsi_config_vlan_stripping(vsi, true);
4209 ice_vsi_config_vlan_stripping(vsi, false);
4216 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4218 struct ice_aq_get_set_rss_lut_params lut_params;
4219 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4220 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4226 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4227 lut_params.vsi_handle = vsi->idx;
4228 lut_params.lut_size = lut_size;
4229 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4230 lut_params.lut = lut;
4231 lut_params.global_lut_id = 0;
4232 ret = ice_aq_get_rss_lut(hw, &lut_params);
4234 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4238 uint64_t *lut_dw = (uint64_t *)lut;
4239 uint16_t i, lut_size_dw = lut_size / 4;
4241 for (i = 0; i < lut_size_dw; i++)
4242 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4249 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4251 struct ice_aq_get_set_rss_lut_params lut_params;
4259 pf = ICE_VSI_TO_PF(vsi);
4260 hw = ICE_VSI_TO_HW(vsi);
4262 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4263 lut_params.vsi_handle = vsi->idx;
4264 lut_params.lut_size = lut_size;
4265 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4266 lut_params.lut = lut;
4267 lut_params.global_lut_id = 0;
4268 ret = ice_aq_set_rss_lut(hw, &lut_params);
4270 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4274 uint64_t *lut_dw = (uint64_t *)lut;
4275 uint16_t i, lut_size_dw = lut_size / 4;
4277 for (i = 0; i < lut_size_dw; i++)
4278 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4287 ice_rss_reta_update(struct rte_eth_dev *dev,
4288 struct rte_eth_rss_reta_entry64 *reta_conf,
4291 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4292 uint16_t i, lut_size = pf->hash_lut_size;
4293 uint16_t idx, shift;
4297 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4298 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4299 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4301 "The size of hash lookup table configured (%d)"
4302 "doesn't match the number hardware can "
4303 "supported (128, 512, 2048)",
4308 /* It MUST use the current LUT size to get the RSS lookup table,
4309 * otherwise if will fail with -100 error code.
4311 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0);
4313 PMD_DRV_LOG(ERR, "No memory can be allocated");
4316 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4320 for (i = 0; i < reta_size; i++) {
4321 idx = i / RTE_RETA_GROUP_SIZE;
4322 shift = i % RTE_RETA_GROUP_SIZE;
4323 if (reta_conf[idx].mask & (1ULL << shift))
4324 lut[i] = reta_conf[idx].reta[shift];
4326 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4327 if (ret == 0 && lut_size != reta_size) {
4329 "The size of hash lookup table is changed from (%d) to (%d)",
4330 lut_size, reta_size);
4331 pf->hash_lut_size = reta_size;
4341 ice_rss_reta_query(struct rte_eth_dev *dev,
4342 struct rte_eth_rss_reta_entry64 *reta_conf,
4345 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4346 uint16_t i, lut_size = pf->hash_lut_size;
4347 uint16_t idx, shift;
4351 if (reta_size != lut_size) {
4353 "The size of hash lookup table configured (%d)"
4354 "doesn't match the number hardware can "
4356 reta_size, lut_size);
4360 lut = rte_zmalloc(NULL, reta_size, 0);
4362 PMD_DRV_LOG(ERR, "No memory can be allocated");
4366 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4370 for (i = 0; i < reta_size; i++) {
4371 idx = i / RTE_RETA_GROUP_SIZE;
4372 shift = i % RTE_RETA_GROUP_SIZE;
4373 if (reta_conf[idx].mask & (1ULL << shift))
4374 reta_conf[idx].reta[shift] = lut[i];
4384 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4386 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4389 if (!key || key_len == 0) {
4390 PMD_DRV_LOG(DEBUG, "No key to be configured");
4392 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4394 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4398 struct ice_aqc_get_set_rss_keys *key_dw =
4399 (struct ice_aqc_get_set_rss_keys *)key;
4401 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4403 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4411 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4413 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4416 if (!key || !key_len)
4419 ret = ice_aq_get_rss_key
4421 (struct ice_aqc_get_set_rss_keys *)key);
4423 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4426 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4432 ice_rss_hash_update(struct rte_eth_dev *dev,
4433 struct rte_eth_rss_conf *rss_conf)
4435 enum ice_status status = ICE_SUCCESS;
4436 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4437 struct ice_vsi *vsi = pf->main_vsi;
4440 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4444 if (rss_conf->rss_hf == 0) {
4449 /* RSS hash configuration */
4450 ice_rss_hash_set(pf, rss_conf->rss_hf);
4456 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4457 struct rte_eth_rss_conf *rss_conf)
4459 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4460 struct ice_vsi *vsi = pf->main_vsi;
4462 ice_get_rss_key(vsi, rss_conf->rss_key,
4463 &rss_conf->rss_key_len);
4465 rss_conf->rss_hf = pf->rss_hf;
4470 ice_promisc_enable(struct rte_eth_dev *dev)
4472 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4473 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4474 struct ice_vsi *vsi = pf->main_vsi;
4475 enum ice_status status;
4479 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4480 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4482 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4484 case ICE_ERR_ALREADY_EXISTS:
4485 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4489 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4497 ice_promisc_disable(struct rte_eth_dev *dev)
4499 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4500 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4501 struct ice_vsi *vsi = pf->main_vsi;
4502 enum ice_status status;
4506 if (dev->data->all_multicast == 1)
4507 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX;
4509 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4510 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4512 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4513 if (status != ICE_SUCCESS) {
4514 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4522 ice_allmulti_enable(struct rte_eth_dev *dev)
4524 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4525 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4526 struct ice_vsi *vsi = pf->main_vsi;
4527 enum ice_status status;
4531 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4533 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4536 case ICE_ERR_ALREADY_EXISTS:
4537 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4541 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4549 ice_allmulti_disable(struct rte_eth_dev *dev)
4551 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4552 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4553 struct ice_vsi *vsi = pf->main_vsi;
4554 enum ice_status status;
4558 if (dev->data->promiscuous == 1)
4559 return 0; /* must remain in all_multicast mode */
4561 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4563 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4564 if (status != ICE_SUCCESS) {
4565 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4572 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4575 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4576 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4577 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4581 msix_intr = intr_handle->intr_vec[queue_id];
4583 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4584 GLINT_DYN_CTL_ITR_INDX_M;
4585 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4587 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4588 rte_intr_ack(&pci_dev->intr_handle);
4593 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4596 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4597 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4598 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4601 msix_intr = intr_handle->intr_vec[queue_id];
4603 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4609 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4611 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4616 ver = hw->flash.orom.major;
4617 patch = hw->flash.orom.patch;
4618 build = hw->flash.orom.build;
4620 ret = snprintf(fw_version, fw_size,
4621 "%x.%02x 0x%08x %d.%d.%d",
4622 hw->flash.nvm.major,
4623 hw->flash.nvm.minor,
4624 hw->flash.nvm.eetrack,
4629 /* add the size of '\0' */
4631 if (fw_size < (size_t)ret)
4638 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4641 struct ice_vsi_ctx ctxt;
4642 uint8_t vlan_flags = 0;
4645 if (!vsi || !info) {
4646 PMD_DRV_LOG(ERR, "invalid parameters");
4651 vsi->info.port_based_inner_vlan = info->config.pvid;
4653 * If insert pvid is enabled, only tagged pkts are
4654 * allowed to be sent out.
4656 vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4657 ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4659 vsi->info.port_based_inner_vlan = 0;
4660 if (info->config.reject.tagged == 0)
4661 vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED;
4663 if (info->config.reject.untagged == 0)
4664 vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4666 vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4667 ICE_AQ_VSI_INNER_VLAN_EMODE_M);
4668 vsi->info.inner_vlan_flags |= vlan_flags;
4669 memset(&ctxt, 0, sizeof(ctxt));
4670 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4671 ctxt.info.valid_sections =
4672 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4673 ctxt.vsi_num = vsi->vsi_id;
4675 hw = ICE_VSI_TO_HW(vsi);
4676 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4677 if (ret != ICE_SUCCESS) {
4679 "update VSI for VLAN insert failed, err %d",
4684 vsi->info.valid_sections |=
4685 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4691 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4693 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4694 struct ice_vsi *vsi = pf->main_vsi;
4695 struct rte_eth_dev_data *data = pf->dev_data;
4696 struct ice_vsi_vlan_pvid_info info;
4699 memset(&info, 0, sizeof(info));
4702 info.config.pvid = pvid;
4704 info.config.reject.tagged =
4705 data->dev_conf.txmode.hw_vlan_reject_tagged;
4706 info.config.reject.untagged =
4707 data->dev_conf.txmode.hw_vlan_reject_untagged;
4710 ret = ice_vsi_vlan_pvid_set(vsi, &info);
4712 PMD_DRV_LOG(ERR, "Failed to set pvid.");
4720 ice_get_eeprom_length(struct rte_eth_dev *dev)
4722 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4724 return hw->flash.flash_size;
4728 ice_get_eeprom(struct rte_eth_dev *dev,
4729 struct rte_dev_eeprom_info *eeprom)
4731 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4732 enum ice_status status = ICE_SUCCESS;
4733 uint8_t *data = eeprom->data;
4735 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4737 status = ice_acquire_nvm(hw, ICE_RES_READ);
4739 PMD_DRV_LOG(ERR, "acquire nvm failed.");
4743 status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4746 ice_release_nvm(hw);
4749 PMD_DRV_LOG(ERR, "EEPROM read failed.");
4757 ice_stat_update_32(struct ice_hw *hw,
4765 new_data = (uint64_t)ICE_READ_REG(hw, reg);
4769 if (new_data >= *offset)
4770 *stat = (uint64_t)(new_data - *offset);
4772 *stat = (uint64_t)((new_data +
4773 ((uint64_t)1 << ICE_32_BIT_WIDTH))
4778 ice_stat_update_40(struct ice_hw *hw,
4787 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4788 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4794 if (new_data >= *offset)
4795 *stat = new_data - *offset;
4797 *stat = (uint64_t)((new_data +
4798 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4801 *stat &= ICE_40_BIT_MASK;
4804 /* Get all the statistics of a VSI */
4806 ice_update_vsi_stats(struct ice_vsi *vsi)
4808 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4809 struct ice_eth_stats *nes = &vsi->eth_stats;
4810 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4811 int idx = rte_le_to_cpu_16(vsi->vsi_id);
4813 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4814 vsi->offset_loaded, &oes->rx_bytes,
4816 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4817 vsi->offset_loaded, &oes->rx_unicast,
4819 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4820 vsi->offset_loaded, &oes->rx_multicast,
4821 &nes->rx_multicast);
4822 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4823 vsi->offset_loaded, &oes->rx_broadcast,
4824 &nes->rx_broadcast);
4825 /* enlarge the limitation when rx_bytes overflowed */
4826 if (vsi->offset_loaded) {
4827 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
4828 nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4829 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
4831 vsi->old_rx_bytes = nes->rx_bytes;
4832 /* exclude CRC bytes */
4833 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4834 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4836 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4837 &oes->rx_discards, &nes->rx_discards);
4838 /* GLV_REPC not supported */
4839 /* GLV_RMPC not supported */
4840 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4841 &oes->rx_unknown_protocol,
4842 &nes->rx_unknown_protocol);
4843 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4844 vsi->offset_loaded, &oes->tx_bytes,
4846 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4847 vsi->offset_loaded, &oes->tx_unicast,
4849 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4850 vsi->offset_loaded, &oes->tx_multicast,
4851 &nes->tx_multicast);
4852 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4853 vsi->offset_loaded, &oes->tx_broadcast,
4854 &nes->tx_broadcast);
4855 /* GLV_TDPC not supported */
4856 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4857 &oes->tx_errors, &nes->tx_errors);
4858 /* enlarge the limitation when tx_bytes overflowed */
4859 if (vsi->offset_loaded) {
4860 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
4861 nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4862 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
4864 vsi->old_tx_bytes = nes->tx_bytes;
4865 vsi->offset_loaded = true;
4867 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4869 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
4870 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
4871 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
4872 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
4873 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
4874 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4875 nes->rx_unknown_protocol);
4876 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
4877 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
4878 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
4879 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
4880 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
4881 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
4882 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4887 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4889 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4890 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4892 /* Get statistics of struct ice_eth_stats */
4893 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4894 GLPRT_GORCL(hw->port_info->lport),
4895 pf->offset_loaded, &os->eth.rx_bytes,
4897 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4898 GLPRT_UPRCL(hw->port_info->lport),
4899 pf->offset_loaded, &os->eth.rx_unicast,
4900 &ns->eth.rx_unicast);
4901 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4902 GLPRT_MPRCL(hw->port_info->lport),
4903 pf->offset_loaded, &os->eth.rx_multicast,
4904 &ns->eth.rx_multicast);
4905 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4906 GLPRT_BPRCL(hw->port_info->lport),
4907 pf->offset_loaded, &os->eth.rx_broadcast,
4908 &ns->eth.rx_broadcast);
4909 ice_stat_update_32(hw, PRTRPB_RDPC,
4910 pf->offset_loaded, &os->eth.rx_discards,
4911 &ns->eth.rx_discards);
4912 /* enlarge the limitation when rx_bytes overflowed */
4913 if (pf->offset_loaded) {
4914 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
4915 ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4916 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
4918 pf->old_rx_bytes = ns->eth.rx_bytes;
4920 /* Workaround: CRC size should not be included in byte statistics,
4921 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4924 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4925 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4927 /* GLPRT_REPC not supported */
4928 /* GLPRT_RMPC not supported */
4929 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4931 &os->eth.rx_unknown_protocol,
4932 &ns->eth.rx_unknown_protocol);
4933 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4934 GLPRT_GOTCL(hw->port_info->lport),
4935 pf->offset_loaded, &os->eth.tx_bytes,
4937 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4938 GLPRT_UPTCL(hw->port_info->lport),
4939 pf->offset_loaded, &os->eth.tx_unicast,
4940 &ns->eth.tx_unicast);
4941 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4942 GLPRT_MPTCL(hw->port_info->lport),
4943 pf->offset_loaded, &os->eth.tx_multicast,
4944 &ns->eth.tx_multicast);
4945 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4946 GLPRT_BPTCL(hw->port_info->lport),
4947 pf->offset_loaded, &os->eth.tx_broadcast,
4948 &ns->eth.tx_broadcast);
4949 /* enlarge the limitation when tx_bytes overflowed */
4950 if (pf->offset_loaded) {
4951 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
4952 ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4953 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
4955 pf->old_tx_bytes = ns->eth.tx_bytes;
4956 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4957 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4959 /* GLPRT_TEPC not supported */
4961 /* additional port specific stats */
4962 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4963 pf->offset_loaded, &os->tx_dropped_link_down,
4964 &ns->tx_dropped_link_down);
4965 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4966 pf->offset_loaded, &os->crc_errors,
4968 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4969 pf->offset_loaded, &os->illegal_bytes,
4970 &ns->illegal_bytes);
4971 /* GLPRT_ERRBC not supported */
4972 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4973 pf->offset_loaded, &os->mac_local_faults,
4974 &ns->mac_local_faults);
4975 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4976 pf->offset_loaded, &os->mac_remote_faults,
4977 &ns->mac_remote_faults);
4979 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4980 pf->offset_loaded, &os->rx_len_errors,
4981 &ns->rx_len_errors);
4983 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4984 pf->offset_loaded, &os->link_xon_rx,
4986 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
4987 pf->offset_loaded, &os->link_xoff_rx,
4989 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
4990 pf->offset_loaded, &os->link_xon_tx,
4992 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
4993 pf->offset_loaded, &os->link_xoff_tx,
4995 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
4996 GLPRT_PRC64L(hw->port_info->lport),
4997 pf->offset_loaded, &os->rx_size_64,
4999 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
5000 GLPRT_PRC127L(hw->port_info->lport),
5001 pf->offset_loaded, &os->rx_size_127,
5003 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
5004 GLPRT_PRC255L(hw->port_info->lport),
5005 pf->offset_loaded, &os->rx_size_255,
5007 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
5008 GLPRT_PRC511L(hw->port_info->lport),
5009 pf->offset_loaded, &os->rx_size_511,
5011 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
5012 GLPRT_PRC1023L(hw->port_info->lport),
5013 pf->offset_loaded, &os->rx_size_1023,
5015 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
5016 GLPRT_PRC1522L(hw->port_info->lport),
5017 pf->offset_loaded, &os->rx_size_1522,
5019 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
5020 GLPRT_PRC9522L(hw->port_info->lport),
5021 pf->offset_loaded, &os->rx_size_big,
5023 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
5024 pf->offset_loaded, &os->rx_undersize,
5026 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
5027 pf->offset_loaded, &os->rx_fragments,
5029 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
5030 pf->offset_loaded, &os->rx_oversize,
5032 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
5033 pf->offset_loaded, &os->rx_jabber,
5035 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
5036 GLPRT_PTC64L(hw->port_info->lport),
5037 pf->offset_loaded, &os->tx_size_64,
5039 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
5040 GLPRT_PTC127L(hw->port_info->lport),
5041 pf->offset_loaded, &os->tx_size_127,
5043 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
5044 GLPRT_PTC255L(hw->port_info->lport),
5045 pf->offset_loaded, &os->tx_size_255,
5047 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
5048 GLPRT_PTC511L(hw->port_info->lport),
5049 pf->offset_loaded, &os->tx_size_511,
5051 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
5052 GLPRT_PTC1023L(hw->port_info->lport),
5053 pf->offset_loaded, &os->tx_size_1023,
5055 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
5056 GLPRT_PTC1522L(hw->port_info->lport),
5057 pf->offset_loaded, &os->tx_size_1522,
5059 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
5060 GLPRT_PTC9522L(hw->port_info->lport),
5061 pf->offset_loaded, &os->tx_size_big,
5064 /* GLPRT_MSPDC not supported */
5065 /* GLPRT_XEC not supported */
5067 pf->offset_loaded = true;
5070 ice_update_vsi_stats(pf->main_vsi);
5073 /* Get all statistics of a port */
5075 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
5077 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5078 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5079 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5081 /* call read registers - updates values, now write them to struct */
5082 ice_read_stats_registers(pf, hw);
5084 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
5085 pf->main_vsi->eth_stats.rx_multicast +
5086 pf->main_vsi->eth_stats.rx_broadcast -
5087 pf->main_vsi->eth_stats.rx_discards;
5088 stats->opackets = ns->eth.tx_unicast +
5089 ns->eth.tx_multicast +
5090 ns->eth.tx_broadcast;
5091 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
5092 stats->obytes = ns->eth.tx_bytes;
5093 stats->oerrors = ns->eth.tx_errors +
5094 pf->main_vsi->eth_stats.tx_errors;
5097 stats->imissed = ns->eth.rx_discards +
5098 pf->main_vsi->eth_stats.rx_discards;
5099 stats->ierrors = ns->crc_errors +
5101 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
5103 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
5104 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
5105 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
5106 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
5107 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
5108 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
5109 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
5110 pf->main_vsi->eth_stats.rx_discards);
5111 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
5112 ns->eth.rx_unknown_protocol);
5113 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
5114 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
5115 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
5116 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
5117 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
5118 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
5119 pf->main_vsi->eth_stats.tx_discards);
5120 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
5122 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
5123 ns->tx_dropped_link_down);
5124 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
5125 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
5127 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
5128 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
5129 ns->mac_local_faults);
5130 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
5131 ns->mac_remote_faults);
5132 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
5133 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
5134 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
5135 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
5136 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
5137 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
5138 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
5139 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
5140 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
5141 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
5142 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
5143 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
5144 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
5145 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
5146 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
5147 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
5148 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
5149 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
5150 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
5151 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
5152 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
5153 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
5154 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
5155 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
5159 /* Reset the statistics */
5161 ice_stats_reset(struct rte_eth_dev *dev)
5163 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5164 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5166 /* Mark PF and VSI stats to update the offset, aka "reset" */
5167 pf->offset_loaded = false;
5169 pf->main_vsi->offset_loaded = false;
5171 /* read the stats, reading current register values into offset */
5172 ice_read_stats_registers(pf, hw);
5178 ice_xstats_calc_num(void)
5182 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5188 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5191 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5192 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5195 struct ice_hw_port_stats *hw_stats = &pf->stats;
5197 count = ice_xstats_calc_num();
5201 ice_read_stats_registers(pf, hw);
5208 /* Get stats from ice_eth_stats struct */
5209 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5210 xstats[count].value =
5211 *(uint64_t *)((char *)&hw_stats->eth +
5212 ice_stats_strings[i].offset);
5213 xstats[count].id = count;
5217 /* Get individiual stats from ice_hw_port struct */
5218 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5219 xstats[count].value =
5220 *(uint64_t *)((char *)hw_stats +
5221 ice_hw_port_strings[i].offset);
5222 xstats[count].id = count;
5229 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5230 struct rte_eth_xstat_name *xstats_names,
5231 __rte_unused unsigned int limit)
5233 unsigned int count = 0;
5237 return ice_xstats_calc_num();
5239 /* Note: limit checked in rte_eth_xstats_names() */
5241 /* Get stats from ice_eth_stats struct */
5242 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5243 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5244 sizeof(xstats_names[count].name));
5248 /* Get individiual stats from ice_hw_port struct */
5249 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5250 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5251 sizeof(xstats_names[count].name));
5259 ice_dev_flow_ops_get(struct rte_eth_dev *dev,
5260 const struct rte_flow_ops **ops)
5265 *ops = &ice_flow_ops;
5269 /* Add UDP tunneling port */
5271 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5272 struct rte_eth_udp_tunnel *udp_tunnel)
5275 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5277 if (udp_tunnel == NULL)
5280 switch (udp_tunnel->prot_type) {
5281 case RTE_TUNNEL_TYPE_VXLAN:
5282 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5285 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5293 /* Delete UDP tunneling port */
5295 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5296 struct rte_eth_udp_tunnel *udp_tunnel)
5299 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5301 if (udp_tunnel == NULL)
5304 switch (udp_tunnel->prot_type) {
5305 case RTE_TUNNEL_TYPE_VXLAN:
5306 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5309 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5318 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5319 struct rte_pci_device *pci_dev)
5321 return rte_eth_dev_pci_generic_probe(pci_dev,
5322 sizeof(struct ice_adapter),
5327 ice_pci_remove(struct rte_pci_device *pci_dev)
5329 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5332 static struct rte_pci_driver rte_ice_pmd = {
5333 .id_table = pci_id_ice_map,
5334 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5335 .probe = ice_pci_probe,
5336 .remove = ice_pci_remove,
5340 * Driver initialization routine.
5341 * Invoked once at EAL init time.
5342 * Register itself as the [Poll Mode] Driver of PCI devices.
5344 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5345 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5346 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5347 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5348 ICE_HW_DEBUG_MASK_ARG "=0xXXX"
5349 ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5350 ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5351 ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
5353 RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE);
5354 RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE);
5355 #ifdef RTE_ETHDEV_DEBUG_RX
5356 RTE_LOG_REGISTER_SUFFIX(ice_logtype_rx, rx, DEBUG);
5358 #ifdef RTE_ETHDEV_DEBUG_TX
5359 RTE_LOG_REGISTER_SUFFIX(ice_logtype_tx, tx, DEBUG);