1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "base/ice_common.h"
18 #include "rte_pmd_ice.h"
19 #include "ice_ethdev.h"
21 #include "ice_generic_flow.h"
24 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
25 #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support"
26 #define ICE_PROTO_XTR_ARG "proto_xtr"
28 static const char * const ice_valid_args[] = {
29 ICE_SAFE_MODE_SUPPORT_ARG,
30 ICE_PIPELINE_MODE_SUPPORT_ARG,
35 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
36 .name = "intel_pmd_dynfield_proto_xtr_metadata",
37 .size = sizeof(uint32_t),
38 .align = __alignof__(uint32_t),
42 struct proto_xtr_ol_flag {
43 const struct rte_mbuf_dynflag param;
48 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
50 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
52 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
53 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
55 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
56 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
58 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
59 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
60 [PROTO_XTR_IPV6_FLOW] = {
61 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
62 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
64 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
65 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
66 [PROTO_XTR_IP_OFFSET] = {
67 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
68 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
71 #define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package"
72 #define ICE_COMMS_PKG_NAME "ICE COMMS Package"
73 #define ICE_MAX_RES_DESC_NUM 1024
75 static int ice_dev_configure(struct rte_eth_dev *dev);
76 static int ice_dev_start(struct rte_eth_dev *dev);
77 static int ice_dev_stop(struct rte_eth_dev *dev);
78 static int ice_dev_close(struct rte_eth_dev *dev);
79 static int ice_dev_reset(struct rte_eth_dev *dev);
80 static int ice_dev_info_get(struct rte_eth_dev *dev,
81 struct rte_eth_dev_info *dev_info);
82 static int ice_link_update(struct rte_eth_dev *dev,
83 int wait_to_complete);
84 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
85 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
87 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
88 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
89 static int ice_rss_reta_update(struct rte_eth_dev *dev,
90 struct rte_eth_rss_reta_entry64 *reta_conf,
92 static int ice_rss_reta_query(struct rte_eth_dev *dev,
93 struct rte_eth_rss_reta_entry64 *reta_conf,
95 static int ice_rss_hash_update(struct rte_eth_dev *dev,
96 struct rte_eth_rss_conf *rss_conf);
97 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
98 struct rte_eth_rss_conf *rss_conf);
99 static int ice_promisc_enable(struct rte_eth_dev *dev);
100 static int ice_promisc_disable(struct rte_eth_dev *dev);
101 static int ice_allmulti_enable(struct rte_eth_dev *dev);
102 static int ice_allmulti_disable(struct rte_eth_dev *dev);
103 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
106 static int ice_macaddr_set(struct rte_eth_dev *dev,
107 struct rte_ether_addr *mac_addr);
108 static int ice_macaddr_add(struct rte_eth_dev *dev,
109 struct rte_ether_addr *mac_addr,
110 __rte_unused uint32_t index,
112 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
113 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
115 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
117 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
119 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
120 uint16_t pvid, int on);
121 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
122 static int ice_get_eeprom(struct rte_eth_dev *dev,
123 struct rte_dev_eeprom_info *eeprom);
124 static int ice_stats_get(struct rte_eth_dev *dev,
125 struct rte_eth_stats *stats);
126 static int ice_stats_reset(struct rte_eth_dev *dev);
127 static int ice_xstats_get(struct rte_eth_dev *dev,
128 struct rte_eth_xstat *xstats, unsigned int n);
129 static int ice_xstats_get_names(struct rte_eth_dev *dev,
130 struct rte_eth_xstat_name *xstats_names,
132 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
133 const struct rte_flow_ops **ops);
134 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
135 struct rte_eth_udp_tunnel *udp_tunnel);
136 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
137 struct rte_eth_udp_tunnel *udp_tunnel);
139 static const struct rte_pci_id pci_id_ice_map[] = {
140 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
141 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
142 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
143 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
144 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
145 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
146 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
147 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
148 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
149 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
150 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
151 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) },
152 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) },
153 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
154 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
155 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
156 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
157 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
158 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
159 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
160 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
161 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
162 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
163 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
164 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
165 { .vendor_id = 0, /* sentinel */ },
168 static const struct eth_dev_ops ice_eth_dev_ops = {
169 .dev_configure = ice_dev_configure,
170 .dev_start = ice_dev_start,
171 .dev_stop = ice_dev_stop,
172 .dev_close = ice_dev_close,
173 .dev_reset = ice_dev_reset,
174 .dev_set_link_up = ice_dev_set_link_up,
175 .dev_set_link_down = ice_dev_set_link_down,
176 .rx_queue_start = ice_rx_queue_start,
177 .rx_queue_stop = ice_rx_queue_stop,
178 .tx_queue_start = ice_tx_queue_start,
179 .tx_queue_stop = ice_tx_queue_stop,
180 .rx_queue_setup = ice_rx_queue_setup,
181 .rx_queue_release = ice_rx_queue_release,
182 .tx_queue_setup = ice_tx_queue_setup,
183 .tx_queue_release = ice_tx_queue_release,
184 .dev_infos_get = ice_dev_info_get,
185 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
186 .link_update = ice_link_update,
187 .mtu_set = ice_mtu_set,
188 .mac_addr_set = ice_macaddr_set,
189 .mac_addr_add = ice_macaddr_add,
190 .mac_addr_remove = ice_macaddr_remove,
191 .vlan_filter_set = ice_vlan_filter_set,
192 .vlan_offload_set = ice_vlan_offload_set,
193 .reta_update = ice_rss_reta_update,
194 .reta_query = ice_rss_reta_query,
195 .rss_hash_update = ice_rss_hash_update,
196 .rss_hash_conf_get = ice_rss_hash_conf_get,
197 .promiscuous_enable = ice_promisc_enable,
198 .promiscuous_disable = ice_promisc_disable,
199 .allmulticast_enable = ice_allmulti_enable,
200 .allmulticast_disable = ice_allmulti_disable,
201 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
202 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
203 .fw_version_get = ice_fw_version_get,
204 .vlan_pvid_set = ice_vlan_pvid_set,
205 .rxq_info_get = ice_rxq_info_get,
206 .txq_info_get = ice_txq_info_get,
207 .rx_burst_mode_get = ice_rx_burst_mode_get,
208 .tx_burst_mode_get = ice_tx_burst_mode_get,
209 .get_eeprom_length = ice_get_eeprom_length,
210 .get_eeprom = ice_get_eeprom,
211 .stats_get = ice_stats_get,
212 .stats_reset = ice_stats_reset,
213 .xstats_get = ice_xstats_get,
214 .xstats_get_names = ice_xstats_get_names,
215 .xstats_reset = ice_stats_reset,
216 .flow_ops_get = ice_dev_flow_ops_get,
217 .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add,
218 .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
219 .tx_done_cleanup = ice_tx_done_cleanup,
220 .get_monitor_addr = ice_get_monitor_addr,
223 /* store statistics names and its offset in stats structure */
224 struct ice_xstats_name_off {
225 char name[RTE_ETH_XSTATS_NAME_SIZE];
229 static const struct ice_xstats_name_off ice_stats_strings[] = {
230 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
231 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
232 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
233 {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
234 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
235 rx_unknown_protocol)},
236 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
237 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
238 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
239 {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
242 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
243 sizeof(ice_stats_strings[0]))
245 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
246 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
247 tx_dropped_link_down)},
248 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
249 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
251 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
252 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
254 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
256 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
258 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
259 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
260 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
261 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
262 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
263 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
265 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
267 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
269 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
271 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
273 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
275 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
277 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
279 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
280 mac_short_pkt_dropped)},
281 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
283 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
284 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
285 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
287 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
289 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
291 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
293 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
295 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
299 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
300 sizeof(ice_hw_port_strings[0]))
303 ice_init_controlq_parameter(struct ice_hw *hw)
305 /* fields for adminq */
306 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
307 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
308 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
309 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
311 /* fields for mailboxq, DPDK used as PF host */
312 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
313 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
314 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
315 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
319 lookup_proto_xtr_type(const char *xtr_name)
323 enum proto_xtr_type type;
325 { "vlan", PROTO_XTR_VLAN },
326 { "ipv4", PROTO_XTR_IPV4 },
327 { "ipv6", PROTO_XTR_IPV6 },
328 { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
329 { "tcp", PROTO_XTR_TCP },
330 { "ip_offset", PROTO_XTR_IP_OFFSET },
334 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
335 if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
336 return xtr_type_map[i].type;
343 * Parse elem, the elem could be single number/range or '(' ')' group
344 * 1) A single number elem, it's just a simple digit. e.g. 9
345 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
346 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
347 * Within group elem, '-' used for a range separator;
348 * ',' used for a single number.
351 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
353 const char *str = input;
358 while (isblank(*str))
361 if (!isdigit(*str) && *str != '(')
364 /* process single number or single range of number */
367 idx = strtoul(str, &end, 10);
368 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
371 while (isblank(*end))
377 /* process single <number>-<number> */
380 while (isblank(*end))
386 idx = strtoul(end, &end, 10);
387 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
391 while (isblank(*end))
398 for (idx = RTE_MIN(min, max);
399 idx <= RTE_MAX(min, max); idx++)
400 devargs->proto_xtr[idx] = xtr_type;
405 /* process set within bracket */
407 while (isblank(*str))
412 min = ICE_MAX_QUEUE_NUM;
414 /* go ahead to the first digit */
415 while (isblank(*str))
420 /* get the digit value */
422 idx = strtoul(str, &end, 10);
423 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
426 /* go ahead to separator '-',',' and ')' */
427 while (isblank(*end))
430 if (min == ICE_MAX_QUEUE_NUM)
432 else /* avoid continuous '-' */
434 } else if (*end == ',' || *end == ')') {
436 if (min == ICE_MAX_QUEUE_NUM)
439 for (idx = RTE_MIN(min, max);
440 idx <= RTE_MAX(min, max); idx++)
441 devargs->proto_xtr[idx] = xtr_type;
443 min = ICE_MAX_QUEUE_NUM;
449 } while (*end != ')' && *end != '\0');
455 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
457 const char *queue_start;
462 while (isblank(*queues))
465 if (*queues != '[') {
466 xtr_type = lookup_proto_xtr_type(queues);
470 devargs->proto_xtr_dflt = xtr_type;
477 while (isblank(*queues))
482 queue_start = queues;
484 /* go across a complete bracket */
485 if (*queue_start == '(') {
486 queues += strcspn(queues, ")");
491 /* scan the separator ':' */
492 queues += strcspn(queues, ":");
493 if (*queues++ != ':')
495 while (isblank(*queues))
498 for (idx = 0; ; idx++) {
499 if (isblank(queues[idx]) ||
500 queues[idx] == ',' ||
501 queues[idx] == ']' ||
505 if (idx > sizeof(xtr_name) - 2)
508 xtr_name[idx] = queues[idx];
510 xtr_name[idx] = '\0';
511 xtr_type = lookup_proto_xtr_type(xtr_name);
517 while (isblank(*queues) || *queues == ',' || *queues == ']')
520 if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
522 } while (*queues != '\0');
528 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
531 struct ice_devargs *devargs = extra_args;
533 if (value == NULL || extra_args == NULL)
536 if (parse_queue_proto_xtr(value, devargs) < 0) {
538 "The protocol extraction parameter is wrong : '%s'",
547 ice_check_proto_xtr_support(struct ice_hw *hw)
549 #define FLX_REG(val, fld, idx) \
550 (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
551 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
558 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
560 ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
561 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
563 ICE_PROT_IPV4_OF_OR_S,
564 ICE_PROT_IPV4_OF_OR_S },
565 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
567 ICE_PROT_IPV6_OF_OR_S,
568 ICE_PROT_IPV6_OF_OR_S },
569 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
571 ICE_PROT_IPV6_OF_OR_S,
572 ICE_PROT_IPV6_OF_OR_S },
573 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
575 ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
576 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
578 ICE_PROT_IPV4_OF_OR_S,
579 ICE_PROT_IPV6_OF_OR_S },
583 for (i = 0; i < RTE_DIM(xtr_sets); i++) {
584 uint32_t rxdid = xtr_sets[i].rxdid;
587 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
588 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
590 if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
591 FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
592 ice_proto_xtr_hw_support[i] = true;
595 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
596 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
598 if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
599 FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
600 ice_proto_xtr_hw_support[i] = true;
606 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
609 struct pool_entry *entry;
614 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
617 "Failed to allocate memory for resource pool");
621 /* queue heap initialize */
622 pool->num_free = num;
625 LIST_INIT(&pool->alloc_list);
626 LIST_INIT(&pool->free_list);
628 /* Initialize element */
632 LIST_INSERT_HEAD(&pool->free_list, entry, next);
637 ice_res_pool_alloc(struct ice_res_pool_info *pool,
640 struct pool_entry *entry, *valid_entry;
643 PMD_INIT_LOG(ERR, "Invalid parameter");
647 if (pool->num_free < num) {
648 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
649 num, pool->num_free);
654 /* Lookup in free list and find most fit one */
655 LIST_FOREACH(entry, &pool->free_list, next) {
656 if (entry->len >= num) {
658 if (entry->len == num) {
663 valid_entry->len > entry->len)
668 /* Not find one to satisfy the request, return */
670 PMD_INIT_LOG(ERR, "No valid entry found");
674 * The entry have equal queue number as requested,
675 * remove it from alloc_list.
677 if (valid_entry->len == num) {
678 LIST_REMOVE(valid_entry, next);
681 * The entry have more numbers than requested,
682 * create a new entry for alloc_list and minus its
683 * queue base and number in free_list.
685 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
688 "Failed to allocate memory for "
692 entry->base = valid_entry->base;
694 valid_entry->base += num;
695 valid_entry->len -= num;
699 /* Insert it into alloc list, not sorted */
700 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
702 pool->num_free -= valid_entry->len;
703 pool->num_alloc += valid_entry->len;
705 return valid_entry->base + pool->base;
709 ice_res_pool_destroy(struct ice_res_pool_info *pool)
711 struct pool_entry *entry, *next_entry;
716 for (entry = LIST_FIRST(&pool->alloc_list);
717 entry && (next_entry = LIST_NEXT(entry, next), 1);
718 entry = next_entry) {
719 LIST_REMOVE(entry, next);
723 for (entry = LIST_FIRST(&pool->free_list);
724 entry && (next_entry = LIST_NEXT(entry, next), 1);
725 entry = next_entry) {
726 LIST_REMOVE(entry, next);
733 LIST_INIT(&pool->alloc_list);
734 LIST_INIT(&pool->free_list);
738 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
740 /* Set VSI LUT selection */
741 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
742 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
743 /* Set Hash scheme */
744 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
745 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
747 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
750 static enum ice_status
751 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
752 struct ice_aqc_vsi_props *info,
753 uint8_t enabled_tcmap)
755 uint16_t bsf, qp_idx;
757 /* default tc 0 now. Multi-TC supporting need to be done later.
758 * Configure TC and queue mapping parameters, for enabled TC,
759 * allocate qpnum_per_tc queues to this traffic.
761 if (enabled_tcmap != 0x01) {
762 PMD_INIT_LOG(ERR, "only TC0 is supported");
766 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
767 bsf = rte_bsf32(vsi->nb_qps);
768 /* Adjust the queue number to actual queues that can be applied */
769 vsi->nb_qps = 0x1 << bsf;
772 /* Set tc and queue mapping with VSI */
773 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
774 ICE_AQ_VSI_TC_Q_OFFSET_S) |
775 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
777 /* Associate queue number with VSI */
778 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
779 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
780 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
781 info->valid_sections |=
782 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
783 /* Set the info.ingress_table and info.egress_table
784 * for UP translate table. Now just set it to 1:1 map by default
785 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
787 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
788 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
789 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
790 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
795 ice_init_mac_address(struct rte_eth_dev *dev)
797 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
799 if (!rte_is_unicast_ether_addr
800 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
801 PMD_INIT_LOG(ERR, "Invalid MAC address");
806 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
807 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
809 dev->data->mac_addrs =
810 rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0);
811 if (!dev->data->mac_addrs) {
813 "Failed to allocate memory to store mac address");
816 /* store it to dev data */
818 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
819 &dev->data->mac_addrs[0]);
823 /* Find out specific MAC filter */
824 static struct ice_mac_filter *
825 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
827 struct ice_mac_filter *f;
829 TAILQ_FOREACH(f, &vsi->mac_list, next) {
830 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
838 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
840 struct ice_fltr_list_entry *m_list_itr = NULL;
841 struct ice_mac_filter *f;
842 struct LIST_HEAD_TYPE list_head;
843 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
846 /* If it's added and configured, return */
847 f = ice_find_mac_filter(vsi, mac_addr);
849 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
853 INIT_LIST_HEAD(&list_head);
855 m_list_itr = (struct ice_fltr_list_entry *)
856 ice_malloc(hw, sizeof(*m_list_itr));
861 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
862 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
863 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
864 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
865 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
866 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
867 m_list_itr->fltr_info.vsi_handle = vsi->idx;
869 LIST_ADD(&m_list_itr->list_entry, &list_head);
872 ret = ice_add_mac(hw, &list_head);
873 if (ret != ICE_SUCCESS) {
874 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
878 /* Add the mac addr into mac list */
879 f = rte_zmalloc(NULL, sizeof(*f), 0);
881 PMD_DRV_LOG(ERR, "failed to allocate memory");
885 rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
886 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
892 rte_free(m_list_itr);
897 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
899 struct ice_fltr_list_entry *m_list_itr = NULL;
900 struct ice_mac_filter *f;
901 struct LIST_HEAD_TYPE list_head;
902 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
905 /* Can't find it, return an error */
906 f = ice_find_mac_filter(vsi, mac_addr);
910 INIT_LIST_HEAD(&list_head);
912 m_list_itr = (struct ice_fltr_list_entry *)
913 ice_malloc(hw, sizeof(*m_list_itr));
918 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
919 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
920 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
921 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
922 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
923 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
924 m_list_itr->fltr_info.vsi_handle = vsi->idx;
926 LIST_ADD(&m_list_itr->list_entry, &list_head);
928 /* remove the mac filter */
929 ret = ice_remove_mac(hw, &list_head);
930 if (ret != ICE_SUCCESS) {
931 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
936 /* Remove the mac addr from mac list */
937 TAILQ_REMOVE(&vsi->mac_list, f, next);
943 rte_free(m_list_itr);
947 /* Find out specific VLAN filter */
948 static struct ice_vlan_filter *
949 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
951 struct ice_vlan_filter *f;
953 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
954 if (vlan->tpid == f->vlan_info.vlan.tpid &&
955 vlan->vid == f->vlan_info.vlan.vid)
963 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
965 struct ice_fltr_list_entry *v_list_itr = NULL;
966 struct ice_vlan_filter *f;
967 struct LIST_HEAD_TYPE list_head;
971 if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
974 hw = ICE_VSI_TO_HW(vsi);
976 /* If it's added and configured, return. */
977 f = ice_find_vlan_filter(vsi, vlan);
979 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
983 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
986 INIT_LIST_HEAD(&list_head);
988 v_list_itr = (struct ice_fltr_list_entry *)
989 ice_malloc(hw, sizeof(*v_list_itr));
994 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
995 v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
996 v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
997 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
998 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
999 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1000 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1001 v_list_itr->fltr_info.vsi_handle = vsi->idx;
1003 LIST_ADD(&v_list_itr->list_entry, &list_head);
1006 ret = ice_add_vlan(hw, &list_head);
1007 if (ret != ICE_SUCCESS) {
1008 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1013 /* Add vlan into vlan list */
1014 f = rte_zmalloc(NULL, sizeof(*f), 0);
1016 PMD_DRV_LOG(ERR, "failed to allocate memory");
1020 f->vlan_info.vlan.tpid = vlan->tpid;
1021 f->vlan_info.vlan.vid = vlan->vid;
1022 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1028 rte_free(v_list_itr);
1033 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1035 struct ice_fltr_list_entry *v_list_itr = NULL;
1036 struct ice_vlan_filter *f;
1037 struct LIST_HEAD_TYPE list_head;
1041 if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1044 hw = ICE_VSI_TO_HW(vsi);
1046 /* Can't find it, return an error */
1047 f = ice_find_vlan_filter(vsi, vlan);
1051 INIT_LIST_HEAD(&list_head);
1053 v_list_itr = (struct ice_fltr_list_entry *)
1054 ice_malloc(hw, sizeof(*v_list_itr));
1060 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1061 v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1062 v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1063 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1064 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1065 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1066 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1067 v_list_itr->fltr_info.vsi_handle = vsi->idx;
1069 LIST_ADD(&v_list_itr->list_entry, &list_head);
1071 /* remove the vlan filter */
1072 ret = ice_remove_vlan(hw, &list_head);
1073 if (ret != ICE_SUCCESS) {
1074 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1079 /* Remove the vlan id from vlan list */
1080 TAILQ_REMOVE(&vsi->vlan_list, f, next);
1086 rte_free(v_list_itr);
1091 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1093 struct ice_mac_filter *m_f;
1094 struct ice_vlan_filter *v_f;
1097 if (!vsi || !vsi->mac_num)
1100 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
1101 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1102 if (ret != ICE_SUCCESS) {
1108 if (vsi->vlan_num == 0)
1111 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
1112 ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan);
1113 if (ret != ICE_SUCCESS) {
1125 ice_pf_enable_irq0(struct ice_hw *hw)
1127 /* reset the registers */
1128 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1129 ICE_READ_REG(hw, PFINT_OICR);
1132 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1133 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1134 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1136 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1137 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1138 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1139 PFINT_OICR_CTL_ITR_INDX_M) |
1140 PFINT_OICR_CTL_CAUSE_ENA_M);
1142 ICE_WRITE_REG(hw, PFINT_FW_CTL,
1143 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1144 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1145 PFINT_FW_CTL_ITR_INDX_M) |
1146 PFINT_FW_CTL_CAUSE_ENA_M);
1148 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1151 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1152 GLINT_DYN_CTL_INTENA_M |
1153 GLINT_DYN_CTL_CLEARPBA_M |
1154 GLINT_DYN_CTL_ITR_INDX_M);
1161 ice_pf_disable_irq0(struct ice_hw *hw)
1163 /* Disable all interrupt types */
1164 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1170 ice_handle_aq_msg(struct rte_eth_dev *dev)
1172 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1173 struct ice_ctl_q_info *cq = &hw->adminq;
1174 struct ice_rq_event_info event;
1175 uint16_t pending, opcode;
1178 event.buf_len = ICE_AQ_MAX_BUF_LEN;
1179 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1180 if (!event.msg_buf) {
1181 PMD_DRV_LOG(ERR, "Failed to allocate mem");
1187 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1189 if (ret != ICE_SUCCESS) {
1191 "Failed to read msg from AdminQ, "
1193 hw->adminq.sq_last_status);
1196 opcode = rte_le_to_cpu_16(event.desc.opcode);
1199 case ice_aqc_opc_get_link_status:
1200 ret = ice_link_update(dev, 0);
1202 rte_eth_dev_callback_process
1203 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1206 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1211 rte_free(event.msg_buf);
1216 * Interrupt handler triggered by NIC for handling
1217 * specific interrupt.
1220 * Pointer to interrupt handle.
1222 * The address of parameter (struct rte_eth_dev *) regsitered before.
1228 ice_interrupt_handler(void *param)
1230 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1231 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1239 uint32_t int_fw_ctl;
1242 /* Disable interrupt */
1243 ice_pf_disable_irq0(hw);
1245 /* read out interrupt causes */
1246 oicr = ICE_READ_REG(hw, PFINT_OICR);
1248 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1251 /* No interrupt event indicated */
1252 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1253 PMD_DRV_LOG(INFO, "No interrupt event");
1258 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1259 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1260 ice_handle_aq_msg(dev);
1263 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1264 PMD_DRV_LOG(INFO, "OICR: link state change event");
1265 ret = ice_link_update(dev, 0);
1267 rte_eth_dev_callback_process
1268 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1272 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1273 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1274 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1275 if (reg & GL_MDET_TX_PQM_VALID_M) {
1276 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1277 GL_MDET_TX_PQM_PF_NUM_S;
1278 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1279 GL_MDET_TX_PQM_MAL_TYPE_S;
1280 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1281 GL_MDET_TX_PQM_QNUM_S;
1283 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1284 "%d by PQM on TX queue %d PF# %d",
1285 event, queue, pf_num);
1288 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1289 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1290 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1291 GL_MDET_TX_TCLAN_PF_NUM_S;
1292 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1293 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1294 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1295 GL_MDET_TX_TCLAN_QNUM_S;
1297 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1298 "%d by TCLAN on TX queue %d PF# %d",
1299 event, queue, pf_num);
1303 /* Enable interrupt */
1304 ice_pf_enable_irq0(hw);
1305 rte_intr_ack(dev->intr_handle);
1309 ice_init_proto_xtr(struct rte_eth_dev *dev)
1311 struct ice_adapter *ad =
1312 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1313 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1314 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1315 const struct proto_xtr_ol_flag *ol_flag;
1316 bool proto_xtr_enable = false;
1320 pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1321 if (unlikely(pf->proto_xtr == NULL)) {
1322 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1326 for (i = 0; i < pf->lan_nb_qps; i++) {
1327 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1328 ad->devargs.proto_xtr[i] :
1329 ad->devargs.proto_xtr_dflt;
1331 if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1332 uint8_t type = pf->proto_xtr[i];
1334 ice_proto_xtr_ol_flag_params[type].required = true;
1335 proto_xtr_enable = true;
1339 if (likely(!proto_xtr_enable))
1342 ice_check_proto_xtr_support(hw);
1344 offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1345 if (unlikely(offset == -1)) {
1347 "Protocol extraction metadata is disabled in mbuf with error %d",
1353 "Protocol extraction metadata offset in mbuf is : %d",
1355 rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1357 for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1358 ol_flag = &ice_proto_xtr_ol_flag_params[i];
1360 if (!ol_flag->required)
1363 if (!ice_proto_xtr_hw_support[i]) {
1365 "Protocol extraction type %u is not supported in hardware",
1367 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1371 offset = rte_mbuf_dynflag_register(&ol_flag->param);
1372 if (unlikely(offset == -1)) {
1374 "Protocol extraction offload '%s' failed to register with error %d",
1375 ol_flag->param.name, -rte_errno);
1377 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1382 "Protocol extraction offload '%s' offset in mbuf is : %d",
1383 ol_flag->param.name, offset);
1384 *ol_flag->ol_flag = 1ULL << offset;
1388 /* Initialize SW parameters of PF */
1390 ice_pf_sw_init(struct rte_eth_dev *dev)
1392 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1393 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1396 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1397 hw->func_caps.common_cap.num_rxq);
1399 pf->lan_nb_qps = pf->lan_nb_qp_max;
1401 ice_init_proto_xtr(dev);
1403 if (hw->func_caps.fd_fltr_guar > 0 ||
1404 hw->func_caps.fd_fltr_best_effort > 0) {
1405 pf->flags |= ICE_FLAG_FDIR;
1406 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1407 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1409 pf->fdir_nb_qps = 0;
1411 pf->fdir_qp_offset = 0;
1417 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1419 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1420 struct ice_vsi *vsi = NULL;
1421 struct ice_vsi_ctx vsi_ctx;
1423 struct rte_ether_addr broadcast = {
1424 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1425 struct rte_ether_addr mac_addr;
1426 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1427 uint8_t tc_bitmap = 0x1;
1430 /* hw->num_lports = 1 in NIC mode */
1431 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1435 vsi->idx = pf->next_vsi_idx;
1438 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1439 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1440 vsi->vlan_anti_spoof_on = 0;
1441 vsi->vlan_filter_on = 1;
1442 TAILQ_INIT(&vsi->mac_list);
1443 TAILQ_INIT(&vsi->vlan_list);
1445 /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1446 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1447 ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1448 hw->func_caps.common_cap.rss_table_size;
1449 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1451 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1454 vsi->nb_qps = pf->lan_nb_qps;
1455 vsi->base_queue = 1;
1456 ice_vsi_config_default_rss(&vsi_ctx.info);
1457 vsi_ctx.alloc_from_pool = true;
1458 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1459 /* switch_id is queried by get_switch_config aq, which is done
1462 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1463 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1464 /* Allow all untagged or tagged packets */
1465 vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
1466 vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
1467 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1468 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1469 if (ice_is_dvm_ena(hw)) {
1470 vsi_ctx.info.outer_vlan_flags =
1471 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
1472 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
1473 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
1474 vsi_ctx.info.outer_vlan_flags |=
1475 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
1476 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
1477 ICE_AQ_VSI_OUTER_TAG_TYPE_M;
1481 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1482 ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1483 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1484 cfg = ICE_AQ_VSI_FD_ENABLE;
1485 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1486 vsi_ctx.info.max_fd_fltr_dedicated =
1487 rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1488 vsi_ctx.info.max_fd_fltr_shared =
1489 rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1491 /* Enable VLAN/UP trip */
1492 ret = ice_vsi_config_tc_queue_mapping(vsi,
1497 "tc queue mapping with vsi failed, "
1505 vsi->nb_qps = pf->fdir_nb_qps;
1506 vsi->base_queue = ICE_FDIR_QUEUE_ID;
1507 vsi_ctx.alloc_from_pool = true;
1508 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1510 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1511 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1512 cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1513 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1514 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1515 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1516 ret = ice_vsi_config_tc_queue_mapping(vsi,
1521 "tc queue mapping with vsi failed, "
1528 /* for other types of VSI */
1529 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1533 /* VF has MSIX interrupt in VF range, don't allocate here */
1534 if (type == ICE_VSI_PF) {
1535 ret = ice_res_pool_alloc(&pf->msix_pool,
1536 RTE_MIN(vsi->nb_qps,
1537 RTE_MAX_RXTX_INTR_VEC_ID));
1539 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1542 vsi->msix_intr = ret;
1543 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1544 } else if (type == ICE_VSI_CTRL) {
1545 ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1547 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1550 vsi->msix_intr = ret;
1556 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1557 if (ret != ICE_SUCCESS) {
1558 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1561 /* store vsi information is SW structure */
1562 vsi->vsi_id = vsi_ctx.vsi_num;
1563 vsi->info = vsi_ctx.info;
1564 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1565 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1567 if (type == ICE_VSI_PF) {
1568 /* MAC configuration */
1569 rte_ether_addr_copy((struct rte_ether_addr *)
1570 hw->port_info->mac.perm_addr,
1573 rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1574 ret = ice_add_mac_filter(vsi, &mac_addr);
1575 if (ret != ICE_SUCCESS)
1576 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1578 rte_ether_addr_copy(&broadcast, &mac_addr);
1579 ret = ice_add_mac_filter(vsi, &mac_addr);
1580 if (ret != ICE_SUCCESS)
1581 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1584 /* At the beginning, only TC0. */
1585 /* What we need here is the maximam number of the TX queues.
1586 * Currently vsi->nb_qps means it.
1587 * Correct it if any change.
1589 max_txqs[0] = vsi->nb_qps;
1590 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1591 tc_bitmap, max_txqs);
1592 if (ret != ICE_SUCCESS)
1593 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1603 ice_send_driver_ver(struct ice_hw *hw)
1605 struct ice_driver_ver dv;
1607 /* we don't have driver version use 0 for dummy */
1611 dv.subbuild_ver = 0;
1612 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1614 return ice_aq_send_driver_ver(hw, &dv, NULL);
1618 ice_pf_setup(struct ice_pf *pf)
1620 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1621 struct ice_vsi *vsi;
1624 /* Clear all stats counters */
1625 pf->offset_loaded = false;
1626 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1627 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1628 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1629 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1631 /* force guaranteed filter pool for PF */
1632 ice_alloc_fd_guar_item(hw, &unused,
1633 hw->func_caps.fd_fltr_guar);
1634 /* force shared filter pool for PF */
1635 ice_alloc_fd_shrd_item(hw, &unused,
1636 hw->func_caps.fd_fltr_best_effort);
1638 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1640 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1650 * Extract device serial number from PCIe Configuration Space and
1651 * determine the pkg file path according to the DSN.
1653 #ifndef RTE_EXEC_ENV_WINDOWS
1655 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
1658 char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1659 uint32_t dsn_low, dsn_high;
1660 memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1662 pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
1665 if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0) {
1666 PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
1669 if (rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) {
1670 PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
1673 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1674 "ice-%08x%08x.pkg", dsn_high, dsn_low);
1676 PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
1680 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1681 ICE_MAX_PKG_FILENAME_SIZE);
1682 if (!ice_access(strcat(pkg_file, opt_ddp_filename), 0))
1685 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1686 ICE_MAX_PKG_FILENAME_SIZE);
1687 if (!ice_access(strcat(pkg_file, opt_ddp_filename), 0))
1691 strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1692 if (!ice_access(pkg_file, 0))
1694 strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1700 ice_load_pkg_type(struct ice_hw *hw)
1702 enum ice_pkg_type package_type;
1704 /* store the activated package type (OS default or Comms) */
1705 if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1707 package_type = ICE_PKG_TYPE_OS_DEFAULT;
1708 else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1710 package_type = ICE_PKG_TYPE_COMMS;
1712 package_type = ICE_PKG_TYPE_UNKNOWN;
1714 PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)",
1715 hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1716 hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1717 hw->active_pkg_name,
1718 ice_is_dvm_ena(hw) ? "double" : "single");
1720 return package_type;
1723 #ifndef RTE_EXEC_ENV_WINDOWS
1724 static int ice_load_pkg(struct rte_eth_dev *dev)
1726 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1727 char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1733 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1734 struct ice_adapter *ad =
1735 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1737 err = ice_pkg_file_search_path(pci_dev, pkg_file);
1739 PMD_INIT_LOG(ERR, "failed to search file path\n");
1743 file = fopen(pkg_file, "rb");
1745 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1749 err = stat(pkg_file, &fstat);
1751 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1756 buf_len = fstat.st_size;
1757 buf = rte_malloc(NULL, buf_len, 0);
1760 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1766 err = fread(buf, buf_len, 1, file);
1768 PMD_INIT_LOG(ERR, "failed to read package data\n");
1776 err = ice_copy_and_init_pkg(hw, buf, buf_len);
1778 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1782 /* store the loaded pkg type info */
1783 ad->active_pkg_type = ice_load_pkg_type(hw);
1785 err = ice_init_hw_tbls(hw);
1787 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1788 goto fail_init_tbls;
1794 rte_free(hw->pkg_copy);
1802 ice_base_queue_get(struct ice_pf *pf)
1805 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1807 reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1808 if (reg & PFLAN_RX_QALLOC_VALID_M) {
1809 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1811 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1817 parse_bool(const char *key, const char *value, void *args)
1819 int *i = (int *)args;
1823 num = strtoul(value, &end, 10);
1825 if (num != 0 && num != 1) {
1826 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1827 "value must be 0 or 1",
1836 static int ice_parse_devargs(struct rte_eth_dev *dev)
1838 struct ice_adapter *ad =
1839 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1840 struct rte_devargs *devargs = dev->device->devargs;
1841 struct rte_kvargs *kvlist;
1844 if (devargs == NULL)
1847 kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1848 if (kvlist == NULL) {
1849 PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1853 ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1854 memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1855 sizeof(ad->devargs.proto_xtr));
1857 ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1858 &handle_proto_xtr_arg, &ad->devargs);
1862 ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1863 &parse_bool, &ad->devargs.safe_mode_support);
1867 ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1868 &parse_bool, &ad->devargs.pipe_mode_support);
1873 rte_kvargs_free(kvlist);
1877 /* Forward LLDP packets to default VSI by set switch rules */
1879 ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on)
1881 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1882 struct ice_fltr_list_entry *s_list_itr = NULL;
1883 struct LIST_HEAD_TYPE list_head;
1886 INIT_LIST_HEAD(&list_head);
1888 s_list_itr = (struct ice_fltr_list_entry *)
1889 ice_malloc(hw, sizeof(*s_list_itr));
1892 s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
1893 s_list_itr->fltr_info.vsi_handle = vsi->idx;
1894 s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
1895 RTE_ETHER_TYPE_LLDP;
1896 s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1897 s_list_itr->fltr_info.flag = ICE_FLTR_RX;
1898 s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
1899 LIST_ADD(&s_list_itr->list_entry, &list_head);
1901 ret = ice_add_eth_mac(hw, &list_head);
1903 ret = ice_remove_eth_mac(hw, &list_head);
1905 rte_free(s_list_itr);
1909 static enum ice_status
1910 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
1911 uint16_t num, uint16_t desc_id,
1912 uint16_t *prof_buf, uint16_t *num_prof)
1914 struct ice_aqc_res_elem *resp_buf;
1917 bool res_shared = 1;
1918 struct ice_aq_desc aq_desc;
1919 struct ice_sq_cd *cd = NULL;
1920 struct ice_aqc_get_allocd_res_desc *cmd =
1921 &aq_desc.params.get_res_desc;
1923 buf_len = sizeof(*resp_buf) * num;
1924 resp_buf = ice_malloc(hw, buf_len);
1928 ice_fill_dflt_direct_cmd_desc(&aq_desc,
1929 ice_aqc_opc_get_allocd_res_desc);
1931 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
1932 ICE_AQC_RES_TYPE_M) | (res_shared ?
1933 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
1934 cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
1936 ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
1938 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
1942 ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
1943 (*num_prof), ICE_NONDMA_TO_NONDMA);
1950 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
1954 uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
1955 uint16_t first_desc = 1;
1956 uint16_t num_prof = 0;
1958 ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
1959 first_desc, prof_buf, &num_prof);
1961 PMD_INIT_LOG(ERR, "Failed to get fxp resource");
1965 for (prof_id = 0; prof_id < num_prof; prof_id++) {
1966 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
1968 PMD_INIT_LOG(ERR, "Failed to free fxp resource");
1976 ice_reset_fxp_resource(struct ice_hw *hw)
1980 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
1982 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
1986 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
1988 PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
1996 ice_rss_ctx_init(struct ice_pf *pf)
1998 memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
2002 ice_get_supported_rxdid(struct ice_hw *hw)
2004 uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
2008 supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
2010 for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2011 regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
2012 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2013 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2014 supported_rxdid |= BIT(i);
2016 return supported_rxdid;
2020 ice_dev_init(struct rte_eth_dev *dev)
2022 struct rte_pci_device *pci_dev;
2023 struct rte_intr_handle *intr_handle;
2024 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2025 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2026 struct ice_adapter *ad =
2027 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2028 struct ice_vsi *vsi;
2031 dev->dev_ops = &ice_eth_dev_ops;
2032 dev->rx_queue_count = ice_rx_queue_count;
2033 dev->rx_descriptor_status = ice_rx_descriptor_status;
2034 dev->tx_descriptor_status = ice_tx_descriptor_status;
2035 dev->rx_pkt_burst = ice_recv_pkts;
2036 dev->tx_pkt_burst = ice_xmit_pkts;
2037 dev->tx_pkt_prepare = ice_prep_pkts;
2039 /* for secondary processes, we don't initialise any further as primary
2040 * has already done this work.
2042 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2043 ice_set_rx_function(dev);
2044 ice_set_tx_function(dev);
2048 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2050 ice_set_default_ptype_table(dev);
2051 pci_dev = RTE_DEV_TO_PCI(dev->device);
2052 intr_handle = &pci_dev->intr_handle;
2054 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2055 pf->adapter->eth_dev = dev;
2056 pf->dev_data = dev->data;
2057 hw->back = pf->adapter;
2058 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2059 hw->vendor_id = pci_dev->id.vendor_id;
2060 hw->device_id = pci_dev->id.device_id;
2061 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2062 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2063 hw->bus.device = pci_dev->addr.devid;
2064 hw->bus.func = pci_dev->addr.function;
2066 ret = ice_parse_devargs(dev);
2068 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2072 ice_init_controlq_parameter(hw);
2074 ret = ice_init_hw(hw);
2076 PMD_INIT_LOG(ERR, "Failed to initialize HW");
2080 #ifndef RTE_EXEC_ENV_WINDOWS
2081 ret = ice_load_pkg(dev);
2083 if (ad->devargs.safe_mode_support == 0) {
2084 PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2085 "Use safe-mode-support=1 to enter Safe Mode");
2089 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2090 "Entering Safe Mode");
2091 ad->is_safe_mode = 1;
2095 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2096 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2097 hw->api_maj_ver, hw->api_min_ver);
2099 ice_pf_sw_init(dev);
2100 ret = ice_init_mac_address(dev);
2102 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2106 ret = ice_res_pool_init(&pf->msix_pool, 1,
2107 hw->func_caps.common_cap.num_msix_vectors - 1);
2109 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2110 goto err_msix_pool_init;
2113 ret = ice_pf_setup(pf);
2115 PMD_INIT_LOG(ERR, "Failed to setup PF");
2119 ret = ice_send_driver_ver(hw);
2121 PMD_INIT_LOG(ERR, "Failed to send driver version");
2127 ret = ice_aq_stop_lldp(hw, true, false, NULL);
2128 if (ret != ICE_SUCCESS)
2129 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2130 ret = ice_init_dcb(hw, true);
2131 if (ret != ICE_SUCCESS)
2132 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2133 /* Forward LLDP packets to default VSI */
2134 ret = ice_vsi_config_sw_lldp(vsi, true);
2135 if (ret != ICE_SUCCESS)
2136 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2137 /* register callback func to eal lib */
2138 rte_intr_callback_register(intr_handle,
2139 ice_interrupt_handler, dev);
2141 ice_pf_enable_irq0(hw);
2143 /* enable uio intr after callback register */
2144 rte_intr_enable(intr_handle);
2146 /* get base queue pairs index in the device */
2147 ice_base_queue_get(pf);
2149 /* Initialize RSS context for gtpu_eh */
2150 ice_rss_ctx_init(pf);
2152 if (!ad->is_safe_mode) {
2153 ret = ice_flow_init(ad);
2155 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2160 ret = ice_reset_fxp_resource(hw);
2162 PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2166 pf->supported_rxdid = ice_get_supported_rxdid(hw);
2171 ice_res_pool_destroy(&pf->msix_pool);
2173 rte_free(dev->data->mac_addrs);
2174 dev->data->mac_addrs = NULL;
2176 ice_sched_cleanup_all(hw);
2177 rte_free(hw->port_info);
2178 ice_shutdown_all_ctrlq(hw);
2179 rte_free(pf->proto_xtr);
2185 ice_release_vsi(struct ice_vsi *vsi)
2188 struct ice_vsi_ctx vsi_ctx;
2189 enum ice_status ret;
2195 hw = ICE_VSI_TO_HW(vsi);
2197 ice_remove_all_mac_vlan_filters(vsi);
2199 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2201 vsi_ctx.vsi_num = vsi->vsi_id;
2202 vsi_ctx.info = vsi->info;
2203 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2204 if (ret != ICE_SUCCESS) {
2205 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2209 rte_free(vsi->rss_lut);
2210 rte_free(vsi->rss_key);
2216 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2218 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2219 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2220 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2221 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2222 uint16_t msix_intr, i;
2224 /* disable interrupt and also clear all the exist config */
2225 for (i = 0; i < vsi->nb_qps; i++) {
2226 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2227 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2231 if (rte_intr_allow_others(intr_handle))
2233 for (i = 0; i < vsi->nb_msix; i++) {
2234 msix_intr = vsi->msix_intr + i;
2235 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2236 GLINT_DYN_CTL_WB_ON_ITR_M);
2240 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2244 ice_dev_stop(struct rte_eth_dev *dev)
2246 struct rte_eth_dev_data *data = dev->data;
2247 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2248 struct ice_vsi *main_vsi = pf->main_vsi;
2249 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2250 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2253 /* avoid stopping again */
2254 if (pf->adapter_stopped)
2257 /* stop and clear all Rx queues */
2258 for (i = 0; i < data->nb_rx_queues; i++)
2259 ice_rx_queue_stop(dev, i);
2261 /* stop and clear all Tx queues */
2262 for (i = 0; i < data->nb_tx_queues; i++)
2263 ice_tx_queue_stop(dev, i);
2265 /* disable all queue interrupts */
2266 ice_vsi_disable_queues_intr(main_vsi);
2268 if (pf->init_link_up)
2269 ice_dev_set_link_up(dev);
2271 ice_dev_set_link_down(dev);
2273 /* Clean datapath event and queue/vec mapping */
2274 rte_intr_efd_disable(intr_handle);
2275 if (intr_handle->intr_vec) {
2276 rte_free(intr_handle->intr_vec);
2277 intr_handle->intr_vec = NULL;
2280 pf->adapter_stopped = true;
2281 dev->data->dev_started = 0;
2287 ice_dev_close(struct rte_eth_dev *dev)
2289 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2290 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2291 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2292 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2293 struct ice_adapter *ad =
2294 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2297 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2300 /* Since stop will make link down, then the link event will be
2301 * triggered, disable the irq firstly to avoid the port_infoe etc
2302 * resources deallocation causing the interrupt service thread
2305 ice_pf_disable_irq0(hw);
2307 ret = ice_dev_stop(dev);
2309 if (!ad->is_safe_mode)
2310 ice_flow_uninit(ad);
2312 /* release all queue resource */
2313 ice_free_queues(dev);
2315 ice_res_pool_destroy(&pf->msix_pool);
2316 ice_release_vsi(pf->main_vsi);
2317 ice_sched_cleanup_all(hw);
2318 ice_free_hw_tbls(hw);
2319 rte_free(hw->port_info);
2320 hw->port_info = NULL;
2321 ice_shutdown_all_ctrlq(hw);
2322 rte_free(pf->proto_xtr);
2323 pf->proto_xtr = NULL;
2325 /* disable uio intr before callback unregister */
2326 rte_intr_disable(intr_handle);
2328 /* unregister callback func from eal lib */
2329 rte_intr_callback_unregister(intr_handle,
2330 ice_interrupt_handler, dev);
2336 ice_dev_uninit(struct rte_eth_dev *dev)
2344 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
2346 return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false;
2350 hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
2355 cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
2359 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2361 enum ice_status status = ICE_SUCCESS;
2362 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2363 struct ice_vsi *vsi = pf->main_vsi;
2365 if (!is_hash_cfg_valid(cfg))
2368 status = ice_rem_rss_cfg(hw, vsi->idx, cfg);
2369 if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2371 "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
2380 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2382 enum ice_status status = ICE_SUCCESS;
2383 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2384 struct ice_vsi *vsi = pf->main_vsi;
2386 if (!is_hash_cfg_valid(cfg))
2389 status = ice_add_rss_cfg(hw, vsi->idx, cfg);
2392 "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
2401 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2405 ret = ice_hash_moveout(pf, cfg);
2406 if (ret && (ret != -ENOENT))
2409 hash_cfg_reset(cfg);
2415 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2421 case ICE_HASH_GTPU_CTX_EH_IP:
2422 ret = ice_hash_remove(pf,
2423 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2424 if (ret && (ret != -ENOENT))
2427 ret = ice_hash_remove(pf,
2428 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2429 if (ret && (ret != -ENOENT))
2432 ret = ice_hash_remove(pf,
2433 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2434 if (ret && (ret != -ENOENT))
2437 ret = ice_hash_remove(pf,
2438 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2439 if (ret && (ret != -ENOENT))
2442 ret = ice_hash_remove(pf,
2443 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2444 if (ret && (ret != -ENOENT))
2447 ret = ice_hash_remove(pf,
2448 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2449 if (ret && (ret != -ENOENT))
2452 ret = ice_hash_remove(pf,
2453 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2454 if (ret && (ret != -ENOENT))
2457 ret = ice_hash_remove(pf,
2458 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2459 if (ret && (ret != -ENOENT))
2463 case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2464 ret = ice_hash_remove(pf,
2465 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2466 if (ret && (ret != -ENOENT))
2469 ret = ice_hash_remove(pf,
2470 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2471 if (ret && (ret != -ENOENT))
2474 ret = ice_hash_moveout(pf,
2475 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2476 if (ret && (ret != -ENOENT))
2479 ret = ice_hash_moveout(pf,
2480 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2481 if (ret && (ret != -ENOENT))
2484 ret = ice_hash_moveout(pf,
2485 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2486 if (ret && (ret != -ENOENT))
2489 ret = ice_hash_moveout(pf,
2490 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2491 if (ret && (ret != -ENOENT))
2495 case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2496 ret = ice_hash_remove(pf,
2497 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2498 if (ret && (ret != -ENOENT))
2501 ret = ice_hash_remove(pf,
2502 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2503 if (ret && (ret != -ENOENT))
2506 ret = ice_hash_moveout(pf,
2507 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2508 if (ret && (ret != -ENOENT))
2511 ret = ice_hash_moveout(pf,
2512 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2513 if (ret && (ret != -ENOENT))
2516 ret = ice_hash_moveout(pf,
2517 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2518 if (ret && (ret != -ENOENT))
2521 ret = ice_hash_moveout(pf,
2522 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2523 if (ret && (ret != -ENOENT))
2527 case ICE_HASH_GTPU_CTX_UP_IP:
2528 ret = ice_hash_remove(pf,
2529 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2530 if (ret && (ret != -ENOENT))
2533 ret = ice_hash_remove(pf,
2534 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2535 if (ret && (ret != -ENOENT))
2538 ret = ice_hash_moveout(pf,
2539 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2540 if (ret && (ret != -ENOENT))
2543 ret = ice_hash_moveout(pf,
2544 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2545 if (ret && (ret != -ENOENT))
2548 ret = ice_hash_moveout(pf,
2549 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2550 if (ret && (ret != -ENOENT))
2554 case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2555 case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2556 ret = ice_hash_moveout(pf,
2557 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2558 if (ret && (ret != -ENOENT))
2561 ret = ice_hash_moveout(pf,
2562 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2563 if (ret && (ret != -ENOENT))
2566 ret = ice_hash_moveout(pf,
2567 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2568 if (ret && (ret != -ENOENT))
2572 case ICE_HASH_GTPU_CTX_DW_IP:
2573 ret = ice_hash_remove(pf,
2574 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2575 if (ret && (ret != -ENOENT))
2578 ret = ice_hash_remove(pf,
2579 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2580 if (ret && (ret != -ENOENT))
2583 ret = ice_hash_moveout(pf,
2584 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2585 if (ret && (ret != -ENOENT))
2588 ret = ice_hash_moveout(pf,
2589 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2590 if (ret && (ret != -ENOENT))
2593 ret = ice_hash_moveout(pf,
2594 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2595 if (ret && (ret != -ENOENT))
2599 case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2600 case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2601 ret = ice_hash_moveout(pf,
2602 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2603 if (ret && (ret != -ENOENT))
2606 ret = ice_hash_moveout(pf,
2607 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2608 if (ret && (ret != -ENOENT))
2611 ret = ice_hash_moveout(pf,
2612 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2613 if (ret && (ret != -ENOENT))
2624 static u8 calc_gtpu_ctx_idx(uint32_t hdr)
2628 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
2630 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
2632 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
2635 return ICE_HASH_GTPU_CTX_MAX;
2638 if (hdr & ICE_FLOW_SEG_HDR_UDP)
2640 else if (hdr & ICE_FLOW_SEG_HDR_TCP)
2643 if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
2644 return eh_idx * 3 + ip_idx;
2646 return ICE_HASH_GTPU_CTX_MAX;
2650 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2652 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2654 if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2655 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
2657 else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2658 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
2665 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2666 u8 ctx_idx, struct ice_rss_hash_cfg *cfg)
2670 if (ctx_idx < ICE_HASH_GTPU_CTX_MAX)
2671 ctx->ctx[ctx_idx] = *cfg;
2674 case ICE_HASH_GTPU_CTX_EH_IP:
2676 case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2677 ret = ice_hash_moveback(pf,
2678 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2679 if (ret && (ret != -ENOENT))
2682 ret = ice_hash_moveback(pf,
2683 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2684 if (ret && (ret != -ENOENT))
2687 ret = ice_hash_moveback(pf,
2688 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2689 if (ret && (ret != -ENOENT))
2692 ret = ice_hash_moveback(pf,
2693 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2694 if (ret && (ret != -ENOENT))
2698 case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2699 ret = ice_hash_moveback(pf,
2700 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2701 if (ret && (ret != -ENOENT))
2704 ret = ice_hash_moveback(pf,
2705 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2706 if (ret && (ret != -ENOENT))
2709 ret = ice_hash_moveback(pf,
2710 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2711 if (ret && (ret != -ENOENT))
2714 ret = ice_hash_moveback(pf,
2715 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2716 if (ret && (ret != -ENOENT))
2720 case ICE_HASH_GTPU_CTX_UP_IP:
2721 case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2722 case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2723 case ICE_HASH_GTPU_CTX_DW_IP:
2724 case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2725 case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2726 ret = ice_hash_moveback(pf,
2727 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2728 if (ret && (ret != -ENOENT))
2731 ret = ice_hash_moveback(pf,
2732 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2733 if (ret && (ret != -ENOENT))
2736 ret = ice_hash_moveback(pf,
2737 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2738 if (ret && (ret != -ENOENT))
2750 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2752 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs);
2754 if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
2755 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4,
2757 else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
2758 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6,
2765 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2767 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2769 if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
2772 if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2773 hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
2774 else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2775 hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
2779 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2780 struct ice_rss_hash_cfg *cfg)
2782 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2785 ret = ice_rem_rss_cfg(hw, vsi_id, cfg);
2786 if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2787 PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2789 ice_rem_rss_cfg_post(pf, cfg->addl_hdrs);
2795 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2796 struct ice_rss_hash_cfg *cfg)
2798 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2801 ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs);
2803 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2805 ret = ice_add_rss_cfg(hw, vsi_id, cfg);
2807 PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2809 ret = ice_add_rss_cfg_post(pf, cfg);
2811 PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2817 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2819 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2820 struct ice_vsi *vsi = pf->main_vsi;
2821 struct ice_rss_hash_cfg cfg;
2824 #define ICE_RSS_HF_ALL ( \
2827 ETH_RSS_NONFRAG_IPV4_UDP | \
2828 ETH_RSS_NONFRAG_IPV6_UDP | \
2829 ETH_RSS_NONFRAG_IPV4_TCP | \
2830 ETH_RSS_NONFRAG_IPV6_TCP | \
2831 ETH_RSS_NONFRAG_IPV4_SCTP | \
2832 ETH_RSS_NONFRAG_IPV6_SCTP)
2834 ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
2836 PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d",
2840 cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
2841 /* Configure RSS for IPv4 with src/dst addr as input set */
2842 if (rss_hf & ETH_RSS_IPV4) {
2843 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2844 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2845 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2847 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
2851 /* Configure RSS for IPv6 with src/dst addr as input set */
2852 if (rss_hf & ETH_RSS_IPV6) {
2853 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2854 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2855 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2857 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
2861 /* Configure RSS for udp4 with src/dst addr and port as input set */
2862 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2863 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
2864 ICE_FLOW_SEG_HDR_IPV_OTHER;
2865 cfg.hash_flds = ICE_HASH_UDP_IPV4;
2866 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2868 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
2872 /* Configure RSS for udp6 with src/dst addr and port as input set */
2873 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2874 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
2875 ICE_FLOW_SEG_HDR_IPV_OTHER;
2876 cfg.hash_flds = ICE_HASH_UDP_IPV6;
2877 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2879 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
2883 /* Configure RSS for tcp4 with src/dst addr and port as input set */
2884 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2885 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
2886 ICE_FLOW_SEG_HDR_IPV_OTHER;
2887 cfg.hash_flds = ICE_HASH_TCP_IPV4;
2888 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2890 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
2894 /* Configure RSS for tcp6 with src/dst addr and port as input set */
2895 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2896 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
2897 ICE_FLOW_SEG_HDR_IPV_OTHER;
2898 cfg.hash_flds = ICE_HASH_TCP_IPV6;
2899 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2901 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
2905 /* Configure RSS for sctp4 with src/dst addr and port as input set */
2906 if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2907 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
2908 ICE_FLOW_SEG_HDR_IPV_OTHER;
2909 cfg.hash_flds = ICE_HASH_SCTP_IPV4;
2910 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2912 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2916 /* Configure RSS for sctp6 with src/dst addr and port as input set */
2917 if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2918 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
2919 ICE_FLOW_SEG_HDR_IPV_OTHER;
2920 cfg.hash_flds = ICE_HASH_SCTP_IPV6;
2921 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2923 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2927 if (rss_hf & ETH_RSS_IPV4) {
2928 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4 |
2929 ICE_FLOW_SEG_HDR_IPV_OTHER;
2930 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2931 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2933 PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d",
2936 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4 |
2937 ICE_FLOW_SEG_HDR_IPV_OTHER;
2938 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2940 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
2943 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
2944 ICE_FLOW_SEG_HDR_IPV_OTHER;
2945 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2947 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
2951 if (rss_hf & ETH_RSS_IPV6) {
2952 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6 |
2953 ICE_FLOW_SEG_HDR_IPV_OTHER;
2954 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2955 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2957 PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d",
2960 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6 |
2961 ICE_FLOW_SEG_HDR_IPV_OTHER;
2962 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2964 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
2967 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
2968 ICE_FLOW_SEG_HDR_IPV_OTHER;
2969 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2971 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
2975 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2976 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_UDP |
2977 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2978 cfg.hash_flds = ICE_HASH_UDP_IPV4;
2979 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2981 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d",
2984 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_UDP |
2985 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2986 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2988 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
2991 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
2992 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2993 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2995 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
2999 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
3000 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_UDP |
3001 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3002 cfg.hash_flds = ICE_HASH_UDP_IPV6;
3003 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3005 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d",
3008 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_UDP |
3009 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3010 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3012 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
3015 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3016 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3017 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3019 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
3023 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
3024 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_TCP |
3025 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3026 cfg.hash_flds = ICE_HASH_TCP_IPV4;
3027 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3029 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d",
3032 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_TCP |
3033 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3034 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3036 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
3039 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3040 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3041 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3043 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
3047 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
3048 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_TCP |
3049 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3050 cfg.hash_flds = ICE_HASH_TCP_IPV6;
3051 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3053 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d",
3056 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_TCP |
3057 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3058 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3060 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
3063 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3064 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3065 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3067 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
3071 pf->rss_hf = rss_hf & ICE_RSS_HF_ALL;
3074 static int ice_init_rss(struct ice_pf *pf)
3076 struct ice_hw *hw = ICE_PF_TO_HW(pf);
3077 struct ice_vsi *vsi = pf->main_vsi;
3078 struct rte_eth_dev *dev = pf->adapter->eth_dev;
3079 struct ice_aq_get_set_rss_lut_params lut_params;
3080 struct rte_eth_rss_conf *rss_conf;
3081 struct ice_aqc_get_set_rss_keys key;
3084 bool is_safe_mode = pf->adapter->is_safe_mode;
3087 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
3088 nb_q = dev->data->nb_rx_queues;
3089 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3090 vsi->rss_lut_size = pf->hash_lut_size;
3093 PMD_DRV_LOG(WARNING,
3094 "RSS is not supported as rx queues number is zero\n");
3099 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3103 if (!vsi->rss_key) {
3104 vsi->rss_key = rte_zmalloc(NULL,
3105 vsi->rss_key_size, 0);
3106 if (vsi->rss_key == NULL) {
3107 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3111 if (!vsi->rss_lut) {
3112 vsi->rss_lut = rte_zmalloc(NULL,
3113 vsi->rss_lut_size, 0);
3114 if (vsi->rss_lut == NULL) {
3115 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3116 rte_free(vsi->rss_key);
3117 vsi->rss_key = NULL;
3121 /* configure RSS key */
3122 if (!rss_conf->rss_key) {
3123 /* Calculate the default hash key */
3124 for (i = 0; i <= vsi->rss_key_size; i++)
3125 vsi->rss_key[i] = (uint8_t)rte_rand();
3127 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3128 RTE_MIN(rss_conf->rss_key_len,
3129 vsi->rss_key_size));
3131 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3132 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3136 /* init RSS LUT table */
3137 for (i = 0; i < vsi->rss_lut_size; i++)
3138 vsi->rss_lut[i] = i % nb_q;
3140 lut_params.vsi_handle = vsi->idx;
3141 lut_params.lut_size = vsi->rss_lut_size;
3142 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
3143 lut_params.lut = vsi->rss_lut;
3144 lut_params.global_lut_id = 0;
3145 ret = ice_aq_set_rss_lut(hw, &lut_params);
3149 /* Enable registers for symmetric_toeplitz function. */
3150 reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3151 reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3152 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3153 ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3155 /* RSS hash configuration */
3156 ice_rss_hash_set(pf, rss_conf->rss_hf);
3160 rte_free(vsi->rss_key);
3161 vsi->rss_key = NULL;
3162 rte_free(vsi->rss_lut);
3163 vsi->rss_lut = NULL;
3168 ice_dev_configure(struct rte_eth_dev *dev)
3170 struct ice_adapter *ad =
3171 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3172 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3175 /* Initialize to TRUE. If any of Rx queues doesn't meet the
3176 * bulk allocation or vector Rx preconditions we will reset it.
3178 ad->rx_bulk_alloc_allowed = true;
3179 ad->tx_simple_allowed = true;
3181 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
3182 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
3184 if (dev->data->nb_rx_queues) {
3185 ret = ice_init_rss(pf);
3187 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3196 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3197 int base_queue, int nb_queue)
3199 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3200 uint32_t val, val_tx;
3203 for (i = 0; i < nb_queue; i++) {
3205 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3206 (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3207 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3208 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3210 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3211 base_queue + i, msix_vect);
3212 /* set ITR0 value */
3213 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
3214 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3215 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3220 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3222 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3223 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3224 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3225 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3226 uint16_t msix_vect = vsi->msix_intr;
3227 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
3228 uint16_t queue_idx = 0;
3232 /* clear Rx/Tx queue interrupt */
3233 for (i = 0; i < vsi->nb_used_qps; i++) {
3234 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3235 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3238 /* PF bind interrupt */
3239 if (rte_intr_dp_is_en(intr_handle)) {
3244 for (i = 0; i < vsi->nb_used_qps; i++) {
3246 if (!rte_intr_allow_others(intr_handle))
3247 msix_vect = ICE_MISC_VEC_ID;
3249 /* uio mapping all queue to one msix_vect */
3250 __vsi_queues_bind_intr(vsi, msix_vect,
3251 vsi->base_queue + i,
3252 vsi->nb_used_qps - i);
3254 for (; !!record && i < vsi->nb_used_qps; i++)
3255 intr_handle->intr_vec[queue_idx + i] =
3260 /* vfio 1:1 queue/msix_vect mapping */
3261 __vsi_queues_bind_intr(vsi, msix_vect,
3262 vsi->base_queue + i, 1);
3265 intr_handle->intr_vec[queue_idx + i] = msix_vect;
3273 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3275 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3276 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3277 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3278 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3279 uint16_t msix_intr, i;
3281 if (rte_intr_allow_others(intr_handle))
3282 for (i = 0; i < vsi->nb_used_qps; i++) {
3283 msix_intr = vsi->msix_intr + i;
3284 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3285 GLINT_DYN_CTL_INTENA_M |
3286 GLINT_DYN_CTL_CLEARPBA_M |
3287 GLINT_DYN_CTL_ITR_INDX_M |
3288 GLINT_DYN_CTL_WB_ON_ITR_M);
3291 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3292 GLINT_DYN_CTL_INTENA_M |
3293 GLINT_DYN_CTL_CLEARPBA_M |
3294 GLINT_DYN_CTL_ITR_INDX_M |
3295 GLINT_DYN_CTL_WB_ON_ITR_M);
3299 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3301 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3302 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3303 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3304 struct ice_vsi *vsi = pf->main_vsi;
3305 uint32_t intr_vector = 0;
3307 rte_intr_disable(intr_handle);
3309 /* check and configure queue intr-vector mapping */
3310 if ((rte_intr_cap_multiple(intr_handle) ||
3311 !RTE_ETH_DEV_SRIOV(dev).active) &&
3312 dev->data->dev_conf.intr_conf.rxq != 0) {
3313 intr_vector = dev->data->nb_rx_queues;
3314 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3315 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3316 ICE_MAX_INTR_QUEUE_NUM);
3319 if (rte_intr_efd_enable(intr_handle, intr_vector))
3323 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3324 intr_handle->intr_vec =
3325 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
3327 if (!intr_handle->intr_vec) {
3329 "Failed to allocate %d rx_queues intr_vec",
3330 dev->data->nb_rx_queues);
3335 /* Map queues with MSIX interrupt */
3336 vsi->nb_used_qps = dev->data->nb_rx_queues;
3337 ice_vsi_queues_bind_intr(vsi);
3339 /* Enable interrupts for all the queues */
3340 ice_vsi_enable_queues_intr(vsi);
3342 rte_intr_enable(intr_handle);
3348 ice_get_init_link_status(struct rte_eth_dev *dev)
3350 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3351 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3352 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3353 struct ice_link_status link_status;
3356 ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3357 &link_status, NULL);
3358 if (ret != ICE_SUCCESS) {
3359 PMD_DRV_LOG(ERR, "Failed to get link info");
3360 pf->init_link_up = false;
3364 if (link_status.link_info & ICE_AQ_LINK_UP)
3365 pf->init_link_up = true;
3369 ice_dev_start(struct rte_eth_dev *dev)
3371 struct rte_eth_dev_data *data = dev->data;
3372 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3373 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3374 struct ice_vsi *vsi = pf->main_vsi;
3375 uint16_t nb_rxq = 0;
3377 uint16_t max_frame_size;
3380 /* program Tx queues' context in hardware */
3381 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3382 ret = ice_tx_queue_start(dev, nb_txq);
3384 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3389 /* program Rx queues' context in hardware*/
3390 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3391 ret = ice_rx_queue_start(dev, nb_rxq);
3393 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3398 ice_set_rx_function(dev);
3399 ice_set_tx_function(dev);
3401 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
3402 ETH_VLAN_EXTEND_MASK;
3403 ret = ice_vlan_offload_set(dev, mask);
3405 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3409 /* enable Rx interrput and mapping Rx queue to interrupt vector */
3410 if (ice_rxq_intr_setup(dev))
3413 /* Enable receiving broadcast packets and transmitting packets */
3414 ret = ice_set_vsi_promisc(hw, vsi->idx,
3415 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3416 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3418 if (ret != ICE_SUCCESS)
3419 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3421 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3422 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3423 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3424 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3425 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3426 ICE_AQ_LINK_EVENT_AN_COMPLETED |
3427 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3429 if (ret != ICE_SUCCESS)
3430 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3432 ice_get_init_link_status(dev);
3434 ice_dev_set_link_up(dev);
3436 /* Call get_link_info aq commond to enable/disable LSE */
3437 ice_link_update(dev, 0);
3439 pf->adapter_stopped = false;
3441 /* Set the max frame size to default value*/
3442 max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3443 pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3446 /* Set the max frame size to HW*/
3447 ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3451 /* stop the started queues if failed to start all queues */
3453 for (i = 0; i < nb_rxq; i++)
3454 ice_rx_queue_stop(dev, i);
3456 for (i = 0; i < nb_txq; i++)
3457 ice_tx_queue_stop(dev, i);
3463 ice_dev_reset(struct rte_eth_dev *dev)
3467 if (dev->data->sriov.active)
3470 ret = ice_dev_uninit(dev);
3472 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3476 ret = ice_dev_init(dev);
3478 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3486 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3488 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3489 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3490 struct ice_vsi *vsi = pf->main_vsi;
3491 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3492 bool is_safe_mode = pf->adapter->is_safe_mode;
3496 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3497 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3498 dev_info->max_rx_queues = vsi->nb_qps;
3499 dev_info->max_tx_queues = vsi->nb_qps;
3500 dev_info->max_mac_addrs = vsi->max_macaddrs;
3501 dev_info->max_vfs = pci_dev->max_vfs;
3502 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3503 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3505 dev_info->rx_offload_capa =
3506 DEV_RX_OFFLOAD_VLAN_STRIP |
3507 DEV_RX_OFFLOAD_JUMBO_FRAME |
3508 DEV_RX_OFFLOAD_KEEP_CRC |
3509 DEV_RX_OFFLOAD_SCATTER |
3510 DEV_RX_OFFLOAD_VLAN_FILTER;
3511 dev_info->tx_offload_capa =
3512 DEV_TX_OFFLOAD_VLAN_INSERT |
3513 DEV_TX_OFFLOAD_TCP_TSO |
3514 DEV_TX_OFFLOAD_MULTI_SEGS |
3515 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3516 dev_info->flow_type_rss_offloads = 0;
3518 if (!is_safe_mode) {
3519 dev_info->rx_offload_capa |=
3520 DEV_RX_OFFLOAD_IPV4_CKSUM |
3521 DEV_RX_OFFLOAD_UDP_CKSUM |
3522 DEV_RX_OFFLOAD_TCP_CKSUM |
3523 DEV_RX_OFFLOAD_QINQ_STRIP |
3524 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3525 DEV_RX_OFFLOAD_VLAN_EXTEND |
3526 DEV_RX_OFFLOAD_RSS_HASH;
3527 dev_info->tx_offload_capa |=
3528 DEV_TX_OFFLOAD_QINQ_INSERT |
3529 DEV_TX_OFFLOAD_IPV4_CKSUM |
3530 DEV_TX_OFFLOAD_UDP_CKSUM |
3531 DEV_TX_OFFLOAD_TCP_CKSUM |
3532 DEV_TX_OFFLOAD_SCTP_CKSUM |
3533 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3534 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3535 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3538 dev_info->rx_queue_offload_capa = 0;
3539 dev_info->tx_queue_offload_capa = 0;
3541 dev_info->reta_size = pf->hash_lut_size;
3542 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3544 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3546 .pthresh = ICE_DEFAULT_RX_PTHRESH,
3547 .hthresh = ICE_DEFAULT_RX_HTHRESH,
3548 .wthresh = ICE_DEFAULT_RX_WTHRESH,
3550 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3555 dev_info->default_txconf = (struct rte_eth_txconf) {
3557 .pthresh = ICE_DEFAULT_TX_PTHRESH,
3558 .hthresh = ICE_DEFAULT_TX_HTHRESH,
3559 .wthresh = ICE_DEFAULT_TX_WTHRESH,
3561 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3562 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3566 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3567 .nb_max = ICE_MAX_RING_DESC,
3568 .nb_min = ICE_MIN_RING_DESC,
3569 .nb_align = ICE_ALIGN_RING_DESC,
3572 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3573 .nb_max = ICE_MAX_RING_DESC,
3574 .nb_min = ICE_MIN_RING_DESC,
3575 .nb_align = ICE_ALIGN_RING_DESC,
3578 dev_info->speed_capa = ETH_LINK_SPEED_10M |
3579 ETH_LINK_SPEED_100M |
3581 ETH_LINK_SPEED_2_5G |
3583 ETH_LINK_SPEED_10G |
3584 ETH_LINK_SPEED_20G |
3587 phy_type_low = hw->port_info->phy.phy_type_low;
3588 phy_type_high = hw->port_info->phy.phy_type_high;
3590 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3591 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3593 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3594 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3595 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3597 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3598 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3600 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3601 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3602 dev_info->default_rxportconf.nb_queues = 1;
3603 dev_info->default_txportconf.nb_queues = 1;
3604 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3605 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3611 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3612 struct rte_eth_link *link)
3614 struct rte_eth_link *dst = link;
3615 struct rte_eth_link *src = &dev->data->dev_link;
3617 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3618 *(uint64_t *)src) == 0)
3625 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3626 struct rte_eth_link *link)
3628 struct rte_eth_link *dst = &dev->data->dev_link;
3629 struct rte_eth_link *src = link;
3631 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3632 *(uint64_t *)src) == 0)
3639 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3641 #define CHECK_INTERVAL 100 /* 100ms */
3642 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
3643 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3644 struct ice_link_status link_status;
3645 struct rte_eth_link link, old;
3647 unsigned int rep_cnt = MAX_REPEAT_TIME;
3648 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3650 memset(&link, 0, sizeof(link));
3651 memset(&old, 0, sizeof(old));
3652 memset(&link_status, 0, sizeof(link_status));
3653 ice_atomic_read_link_status(dev, &old);
3656 /* Get link status information from hardware */
3657 status = ice_aq_get_link_info(hw->port_info, enable_lse,
3658 &link_status, NULL);
3659 if (status != ICE_SUCCESS) {
3660 link.link_speed = ETH_SPEED_NUM_100M;
3661 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3662 PMD_DRV_LOG(ERR, "Failed to get link info");
3666 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3667 if (!wait_to_complete || link.link_status)
3670 rte_delay_ms(CHECK_INTERVAL);
3671 } while (--rep_cnt);
3673 if (!link.link_status)
3676 /* Full-duplex operation at all supported speeds */
3677 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3679 /* Parse the link status */
3680 switch (link_status.link_speed) {
3681 case ICE_AQ_LINK_SPEED_10MB:
3682 link.link_speed = ETH_SPEED_NUM_10M;
3684 case ICE_AQ_LINK_SPEED_100MB:
3685 link.link_speed = ETH_SPEED_NUM_100M;
3687 case ICE_AQ_LINK_SPEED_1000MB:
3688 link.link_speed = ETH_SPEED_NUM_1G;
3690 case ICE_AQ_LINK_SPEED_2500MB:
3691 link.link_speed = ETH_SPEED_NUM_2_5G;
3693 case ICE_AQ_LINK_SPEED_5GB:
3694 link.link_speed = ETH_SPEED_NUM_5G;
3696 case ICE_AQ_LINK_SPEED_10GB:
3697 link.link_speed = ETH_SPEED_NUM_10G;
3699 case ICE_AQ_LINK_SPEED_20GB:
3700 link.link_speed = ETH_SPEED_NUM_20G;
3702 case ICE_AQ_LINK_SPEED_25GB:
3703 link.link_speed = ETH_SPEED_NUM_25G;
3705 case ICE_AQ_LINK_SPEED_40GB:
3706 link.link_speed = ETH_SPEED_NUM_40G;
3708 case ICE_AQ_LINK_SPEED_50GB:
3709 link.link_speed = ETH_SPEED_NUM_50G;
3711 case ICE_AQ_LINK_SPEED_100GB:
3712 link.link_speed = ETH_SPEED_NUM_100G;
3714 case ICE_AQ_LINK_SPEED_UNKNOWN:
3715 PMD_DRV_LOG(ERR, "Unknown link speed");
3716 link.link_speed = ETH_SPEED_NUM_UNKNOWN;
3719 PMD_DRV_LOG(ERR, "None link speed");
3720 link.link_speed = ETH_SPEED_NUM_NONE;
3724 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3725 ETH_LINK_SPEED_FIXED);
3728 ice_atomic_write_link_status(dev, &link);
3729 if (link.link_status == old.link_status)
3735 /* Force the physical link state by getting the current PHY capabilities from
3736 * hardware and setting the PHY config based on the determined capabilities. If
3737 * link changes, link event will be triggered because both the Enable Automatic
3738 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3740 static enum ice_status
3741 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3743 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3744 struct ice_aqc_get_phy_caps_data *pcaps;
3745 struct ice_port_info *pi;
3746 enum ice_status status;
3748 if (!hw || !hw->port_info)
3749 return ICE_ERR_PARAM;
3753 pcaps = (struct ice_aqc_get_phy_caps_data *)
3754 ice_malloc(hw, sizeof(*pcaps));
3756 return ICE_ERR_NO_MEMORY;
3758 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3763 /* No change in link */
3764 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3765 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3768 cfg.phy_type_low = pcaps->phy_type_low;
3769 cfg.phy_type_high = pcaps->phy_type_high;
3770 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3771 cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3772 cfg.eee_cap = pcaps->eee_cap;
3773 cfg.eeer_value = pcaps->eeer_value;
3774 cfg.link_fec_opt = pcaps->link_fec_options;
3776 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3778 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3780 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3783 ice_free(hw, pcaps);
3788 ice_dev_set_link_up(struct rte_eth_dev *dev)
3790 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3792 return ice_force_phys_link_state(hw, true);
3796 ice_dev_set_link_down(struct rte_eth_dev *dev)
3798 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3800 return ice_force_phys_link_state(hw, false);
3804 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3806 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3807 struct rte_eth_dev_data *dev_data = pf->dev_data;
3808 uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3810 /* check if mtu is within the allowed range */
3811 if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3814 /* mtu setting is forbidden if port is start */
3815 if (dev_data->dev_started) {
3817 "port %d must be stopped before configuration",
3822 if (frame_size > ICE_ETH_MAX_LEN)
3823 dev_data->dev_conf.rxmode.offloads |=
3824 DEV_RX_OFFLOAD_JUMBO_FRAME;
3826 dev_data->dev_conf.rxmode.offloads &=
3827 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3829 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3834 static int ice_macaddr_set(struct rte_eth_dev *dev,
3835 struct rte_ether_addr *mac_addr)
3837 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3838 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3839 struct ice_vsi *vsi = pf->main_vsi;
3840 struct ice_mac_filter *f;
3844 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3845 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3849 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3850 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3855 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3859 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3860 if (ret != ICE_SUCCESS) {
3861 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3864 ret = ice_add_mac_filter(vsi, mac_addr);
3865 if (ret != ICE_SUCCESS) {
3866 PMD_DRV_LOG(ERR, "Failed to add mac filter");
3869 rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3871 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3872 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3873 if (ret != ICE_SUCCESS)
3874 PMD_DRV_LOG(ERR, "Failed to set manage mac");
3879 /* Add a MAC address, and update filters */
3881 ice_macaddr_add(struct rte_eth_dev *dev,
3882 struct rte_ether_addr *mac_addr,
3883 __rte_unused uint32_t index,
3884 __rte_unused uint32_t pool)
3886 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3887 struct ice_vsi *vsi = pf->main_vsi;
3890 ret = ice_add_mac_filter(vsi, mac_addr);
3891 if (ret != ICE_SUCCESS) {
3892 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3899 /* Remove a MAC address, and update filters */
3901 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3903 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3904 struct ice_vsi *vsi = pf->main_vsi;
3905 struct rte_eth_dev_data *data = dev->data;
3906 struct rte_ether_addr *macaddr;
3909 macaddr = &data->mac_addrs[index];
3910 ret = ice_remove_mac_filter(vsi, macaddr);
3912 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3918 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3920 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3921 struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id);
3922 struct ice_vsi *vsi = pf->main_vsi;
3925 PMD_INIT_FUNC_TRACE();
3928 * Vlan 0 is the generic filter for untagged packets
3929 * and can't be removed or added by user.
3935 ret = ice_add_vlan_filter(vsi, &vlan);
3937 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3941 ret = ice_remove_vlan_filter(vsi, &vlan);
3943 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3951 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are
3952 * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8)
3953 * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3954 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3956 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3957 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3958 * traffic in SVM, since the VLAN TPID isn't part of filtering.
3960 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3961 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3962 * part of filtering.
3965 ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3967 struct ice_vlan vlan;
3970 vlan = ICE_VLAN(0, 0);
3971 err = ice_add_vlan_filter(vsi, &vlan);
3973 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0");
3977 /* in SVM both VLAN 0 filters are identical */
3978 if (!ice_is_dvm_ena(&vsi->adapter->hw))
3981 vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
3982 err = ice_add_vlan_filter(vsi, &vlan);
3984 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode");
3992 * Delete the VLAN 0 filters in the same manner that they were added in
3993 * ice_vsi_add_vlan_zero.
3996 ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3998 struct ice_vlan vlan;
4001 vlan = ICE_VLAN(0, 0);
4002 err = ice_remove_vlan_filter(vsi, &vlan);
4004 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0");
4008 /* in SVM both VLAN 0 filters are identical */
4009 if (!ice_is_dvm_ena(&vsi->adapter->hw))
4012 vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4013 err = ice_remove_vlan_filter(vsi, &vlan);
4015 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode");
4022 /* Configure vlan filter on or off */
4024 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
4026 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4027 struct ice_vsi_ctx ctxt;
4031 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4034 vsi->info.sw_flags2 |= sw_flags2;
4036 vsi->info.sw_flags2 &= ~sw_flags2;
4038 vsi->info.sw_id = hw->port_info->sw_id;
4039 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4040 ctxt.info.valid_sections =
4041 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4042 ICE_AQ_VSI_PROP_SECURITY_VALID);
4043 ctxt.vsi_num = vsi->vsi_id;
4045 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4047 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
4048 on ? "enable" : "disable");
4051 vsi->info.valid_sections |=
4052 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4053 ICE_AQ_VSI_PROP_SECURITY_VALID);
4056 /* consist with other drivers, allow untagged packet when vlan filter on */
4058 ret = ice_vsi_add_vlan_zero(vsi);
4060 ret = ice_vsi_del_vlan_zero(vsi);
4065 /* Manage VLAN stripping for the VSI for Rx */
4067 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4069 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4070 struct ice_vsi_ctx ctxt;
4071 enum ice_status status;
4074 /* do not allow modifying VLAN stripping when a port VLAN is configured
4077 if (vsi->info.port_based_inner_vlan)
4080 memset(&ctxt, 0, sizeof(ctxt));
4083 /* Strip VLAN tag from Rx packet and put it in the desc */
4084 ctxt.info.inner_vlan_flags =
4085 ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
4087 /* Disable stripping. Leave tag in packet */
4088 ctxt.info.inner_vlan_flags =
4089 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4091 /* Allow all packets untagged/tagged */
4092 ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
4094 ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4096 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4098 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping",
4099 ena ? "enable" : "disable");
4102 vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags;
4109 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi)
4111 return ice_vsi_manage_vlan_stripping(vsi, true);
4115 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
4117 return ice_vsi_manage_vlan_stripping(vsi, false);
4120 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi)
4122 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4123 struct ice_vsi_ctx ctxt;
4124 enum ice_status status;
4127 /* do not allow modifying VLAN stripping when a port VLAN is configured
4130 if (vsi->info.port_based_outer_vlan)
4133 memset(&ctxt, 0, sizeof(ctxt));
4135 ctxt.info.valid_sections =
4136 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4137 /* clear current outer VLAN strip settings */
4138 ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4139 ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
4140 ctxt.info.outer_vlan_flags |=
4141 (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
4142 ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
4143 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
4144 ICE_AQ_VSI_OUTER_TAG_TYPE_S);
4146 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4148 PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping");
4151 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4158 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
4160 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4161 struct ice_vsi_ctx ctxt;
4162 enum ice_status status;
4165 if (vsi->info.port_based_outer_vlan)
4168 memset(&ctxt, 0, sizeof(ctxt));
4170 ctxt.info.valid_sections =
4171 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4172 /* clear current outer VLAN strip settings */
4173 ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4174 ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
4175 ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
4176 ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
4178 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4180 PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping");
4183 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4190 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena)
4192 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4195 if (ice_is_dvm_ena(hw)) {
4197 ret = ice_vsi_ena_outer_stripping(vsi);
4199 ret = ice_vsi_dis_outer_stripping(vsi);
4202 ret = ice_vsi_ena_inner_stripping(vsi);
4204 ret = ice_vsi_dis_inner_stripping(vsi);
4211 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4213 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4214 struct ice_vsi *vsi = pf->main_vsi;
4215 struct rte_eth_rxmode *rxmode;
4217 rxmode = &dev->data->dev_conf.rxmode;
4218 if (mask & ETH_VLAN_FILTER_MASK) {
4219 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4220 ice_vsi_config_vlan_filter(vsi, true);
4222 ice_vsi_config_vlan_filter(vsi, false);
4225 if (mask & ETH_VLAN_STRIP_MASK) {
4226 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4227 ice_vsi_config_vlan_stripping(vsi, true);
4229 ice_vsi_config_vlan_stripping(vsi, false);
4236 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4238 struct ice_aq_get_set_rss_lut_params lut_params;
4239 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4240 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4246 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4247 lut_params.vsi_handle = vsi->idx;
4248 lut_params.lut_size = lut_size;
4249 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4250 lut_params.lut = lut;
4251 lut_params.global_lut_id = 0;
4252 ret = ice_aq_get_rss_lut(hw, &lut_params);
4254 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4258 uint64_t *lut_dw = (uint64_t *)lut;
4259 uint16_t i, lut_size_dw = lut_size / 4;
4261 for (i = 0; i < lut_size_dw; i++)
4262 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4269 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4271 struct ice_aq_get_set_rss_lut_params lut_params;
4279 pf = ICE_VSI_TO_PF(vsi);
4280 hw = ICE_VSI_TO_HW(vsi);
4282 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4283 lut_params.vsi_handle = vsi->idx;
4284 lut_params.lut_size = lut_size;
4285 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4286 lut_params.lut = lut;
4287 lut_params.global_lut_id = 0;
4288 ret = ice_aq_set_rss_lut(hw, &lut_params);
4290 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4294 uint64_t *lut_dw = (uint64_t *)lut;
4295 uint16_t i, lut_size_dw = lut_size / 4;
4297 for (i = 0; i < lut_size_dw; i++)
4298 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4307 ice_rss_reta_update(struct rte_eth_dev *dev,
4308 struct rte_eth_rss_reta_entry64 *reta_conf,
4311 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4312 uint16_t i, lut_size = pf->hash_lut_size;
4313 uint16_t idx, shift;
4317 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4318 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4319 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4321 "The size of hash lookup table configured (%d)"
4322 "doesn't match the number hardware can "
4323 "supported (128, 512, 2048)",
4328 /* It MUST use the current LUT size to get the RSS lookup table,
4329 * otherwise if will fail with -100 error code.
4331 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0);
4333 PMD_DRV_LOG(ERR, "No memory can be allocated");
4336 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4340 for (i = 0; i < reta_size; i++) {
4341 idx = i / RTE_RETA_GROUP_SIZE;
4342 shift = i % RTE_RETA_GROUP_SIZE;
4343 if (reta_conf[idx].mask & (1ULL << shift))
4344 lut[i] = reta_conf[idx].reta[shift];
4346 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4347 if (ret == 0 && lut_size != reta_size) {
4349 "The size of hash lookup table is changed from (%d) to (%d)",
4350 lut_size, reta_size);
4351 pf->hash_lut_size = reta_size;
4361 ice_rss_reta_query(struct rte_eth_dev *dev,
4362 struct rte_eth_rss_reta_entry64 *reta_conf,
4365 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4366 uint16_t i, lut_size = pf->hash_lut_size;
4367 uint16_t idx, shift;
4371 if (reta_size != lut_size) {
4373 "The size of hash lookup table configured (%d)"
4374 "doesn't match the number hardware can "
4376 reta_size, lut_size);
4380 lut = rte_zmalloc(NULL, reta_size, 0);
4382 PMD_DRV_LOG(ERR, "No memory can be allocated");
4386 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4390 for (i = 0; i < reta_size; i++) {
4391 idx = i / RTE_RETA_GROUP_SIZE;
4392 shift = i % RTE_RETA_GROUP_SIZE;
4393 if (reta_conf[idx].mask & (1ULL << shift))
4394 reta_conf[idx].reta[shift] = lut[i];
4404 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4406 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4409 if (!key || key_len == 0) {
4410 PMD_DRV_LOG(DEBUG, "No key to be configured");
4412 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4414 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4418 struct ice_aqc_get_set_rss_keys *key_dw =
4419 (struct ice_aqc_get_set_rss_keys *)key;
4421 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4423 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4431 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4433 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4436 if (!key || !key_len)
4439 ret = ice_aq_get_rss_key
4441 (struct ice_aqc_get_set_rss_keys *)key);
4443 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4446 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4452 ice_rss_hash_update(struct rte_eth_dev *dev,
4453 struct rte_eth_rss_conf *rss_conf)
4455 enum ice_status status = ICE_SUCCESS;
4456 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4457 struct ice_vsi *vsi = pf->main_vsi;
4460 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4464 if (rss_conf->rss_hf == 0) {
4469 /* RSS hash configuration */
4470 ice_rss_hash_set(pf, rss_conf->rss_hf);
4476 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4477 struct rte_eth_rss_conf *rss_conf)
4479 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4480 struct ice_vsi *vsi = pf->main_vsi;
4482 ice_get_rss_key(vsi, rss_conf->rss_key,
4483 &rss_conf->rss_key_len);
4485 rss_conf->rss_hf = pf->rss_hf;
4490 ice_promisc_enable(struct rte_eth_dev *dev)
4492 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4493 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4494 struct ice_vsi *vsi = pf->main_vsi;
4495 enum ice_status status;
4499 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4500 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4502 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4504 case ICE_ERR_ALREADY_EXISTS:
4505 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4509 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4517 ice_promisc_disable(struct rte_eth_dev *dev)
4519 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4520 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4521 struct ice_vsi *vsi = pf->main_vsi;
4522 enum ice_status status;
4526 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4527 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4529 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4530 if (status != ICE_SUCCESS) {
4531 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4539 ice_allmulti_enable(struct rte_eth_dev *dev)
4541 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4542 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4543 struct ice_vsi *vsi = pf->main_vsi;
4544 enum ice_status status;
4548 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4550 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4553 case ICE_ERR_ALREADY_EXISTS:
4554 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4558 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4566 ice_allmulti_disable(struct rte_eth_dev *dev)
4568 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4569 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4570 struct ice_vsi *vsi = pf->main_vsi;
4571 enum ice_status status;
4575 if (dev->data->promiscuous == 1)
4576 return 0; /* must remain in all_multicast mode */
4578 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4580 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4581 if (status != ICE_SUCCESS) {
4582 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4589 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4592 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4593 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4594 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4598 msix_intr = intr_handle->intr_vec[queue_id];
4600 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4601 GLINT_DYN_CTL_ITR_INDX_M;
4602 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4604 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4605 rte_intr_ack(&pci_dev->intr_handle);
4610 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4613 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4614 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4615 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4618 msix_intr = intr_handle->intr_vec[queue_id];
4620 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4626 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4628 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4633 ver = hw->flash.orom.major;
4634 patch = hw->flash.orom.patch;
4635 build = hw->flash.orom.build;
4637 ret = snprintf(fw_version, fw_size,
4638 "%x.%02x 0x%08x %d.%d.%d",
4639 hw->flash.nvm.major,
4640 hw->flash.nvm.minor,
4641 hw->flash.nvm.eetrack,
4644 /* add the size of '\0' */
4646 if (fw_size < (u32)ret)
4653 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4656 struct ice_vsi_ctx ctxt;
4657 uint8_t vlan_flags = 0;
4660 if (!vsi || !info) {
4661 PMD_DRV_LOG(ERR, "invalid parameters");
4666 vsi->info.port_based_inner_vlan = info->config.pvid;
4668 * If insert pvid is enabled, only tagged pkts are
4669 * allowed to be sent out.
4671 vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4672 ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4674 vsi->info.port_based_inner_vlan = 0;
4675 if (info->config.reject.tagged == 0)
4676 vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED;
4678 if (info->config.reject.untagged == 0)
4679 vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4681 vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4682 ICE_AQ_VSI_INNER_VLAN_EMODE_M);
4683 vsi->info.inner_vlan_flags |= vlan_flags;
4684 memset(&ctxt, 0, sizeof(ctxt));
4685 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4686 ctxt.info.valid_sections =
4687 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4688 ctxt.vsi_num = vsi->vsi_id;
4690 hw = ICE_VSI_TO_HW(vsi);
4691 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4692 if (ret != ICE_SUCCESS) {
4694 "update VSI for VLAN insert failed, err %d",
4699 vsi->info.valid_sections |=
4700 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4706 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4708 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4709 struct ice_vsi *vsi = pf->main_vsi;
4710 struct rte_eth_dev_data *data = pf->dev_data;
4711 struct ice_vsi_vlan_pvid_info info;
4714 memset(&info, 0, sizeof(info));
4717 info.config.pvid = pvid;
4719 info.config.reject.tagged =
4720 data->dev_conf.txmode.hw_vlan_reject_tagged;
4721 info.config.reject.untagged =
4722 data->dev_conf.txmode.hw_vlan_reject_untagged;
4725 ret = ice_vsi_vlan_pvid_set(vsi, &info);
4727 PMD_DRV_LOG(ERR, "Failed to set pvid.");
4735 ice_get_eeprom_length(struct rte_eth_dev *dev)
4737 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4739 return hw->flash.flash_size;
4743 ice_get_eeprom(struct rte_eth_dev *dev,
4744 struct rte_dev_eeprom_info *eeprom)
4746 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4747 enum ice_status status = ICE_SUCCESS;
4748 uint8_t *data = eeprom->data;
4750 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4752 status = ice_acquire_nvm(hw, ICE_RES_READ);
4754 PMD_DRV_LOG(ERR, "acquire nvm failed.");
4758 status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4761 ice_release_nvm(hw);
4764 PMD_DRV_LOG(ERR, "EEPROM read failed.");
4772 ice_stat_update_32(struct ice_hw *hw,
4780 new_data = (uint64_t)ICE_READ_REG(hw, reg);
4784 if (new_data >= *offset)
4785 *stat = (uint64_t)(new_data - *offset);
4787 *stat = (uint64_t)((new_data +
4788 ((uint64_t)1 << ICE_32_BIT_WIDTH))
4793 ice_stat_update_40(struct ice_hw *hw,
4802 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4803 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4809 if (new_data >= *offset)
4810 *stat = new_data - *offset;
4812 *stat = (uint64_t)((new_data +
4813 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4816 *stat &= ICE_40_BIT_MASK;
4819 /* Get all the statistics of a VSI */
4821 ice_update_vsi_stats(struct ice_vsi *vsi)
4823 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4824 struct ice_eth_stats *nes = &vsi->eth_stats;
4825 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4826 int idx = rte_le_to_cpu_16(vsi->vsi_id);
4828 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4829 vsi->offset_loaded, &oes->rx_bytes,
4831 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4832 vsi->offset_loaded, &oes->rx_unicast,
4834 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4835 vsi->offset_loaded, &oes->rx_multicast,
4836 &nes->rx_multicast);
4837 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4838 vsi->offset_loaded, &oes->rx_broadcast,
4839 &nes->rx_broadcast);
4840 /* enlarge the limitation when rx_bytes overflowed */
4841 if (vsi->offset_loaded) {
4842 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
4843 nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4844 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
4846 vsi->old_rx_bytes = nes->rx_bytes;
4847 /* exclude CRC bytes */
4848 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4849 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4851 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4852 &oes->rx_discards, &nes->rx_discards);
4853 /* GLV_REPC not supported */
4854 /* GLV_RMPC not supported */
4855 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4856 &oes->rx_unknown_protocol,
4857 &nes->rx_unknown_protocol);
4858 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4859 vsi->offset_loaded, &oes->tx_bytes,
4861 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4862 vsi->offset_loaded, &oes->tx_unicast,
4864 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4865 vsi->offset_loaded, &oes->tx_multicast,
4866 &nes->tx_multicast);
4867 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4868 vsi->offset_loaded, &oes->tx_broadcast,
4869 &nes->tx_broadcast);
4870 /* GLV_TDPC not supported */
4871 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4872 &oes->tx_errors, &nes->tx_errors);
4873 /* enlarge the limitation when tx_bytes overflowed */
4874 if (vsi->offset_loaded) {
4875 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
4876 nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4877 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
4879 vsi->old_tx_bytes = nes->tx_bytes;
4880 vsi->offset_loaded = true;
4882 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4884 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
4885 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
4886 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
4887 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
4888 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
4889 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4890 nes->rx_unknown_protocol);
4891 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
4892 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
4893 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
4894 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
4895 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
4896 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
4897 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4902 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4904 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4905 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4907 /* Get statistics of struct ice_eth_stats */
4908 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4909 GLPRT_GORCL(hw->port_info->lport),
4910 pf->offset_loaded, &os->eth.rx_bytes,
4912 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4913 GLPRT_UPRCL(hw->port_info->lport),
4914 pf->offset_loaded, &os->eth.rx_unicast,
4915 &ns->eth.rx_unicast);
4916 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4917 GLPRT_MPRCL(hw->port_info->lport),
4918 pf->offset_loaded, &os->eth.rx_multicast,
4919 &ns->eth.rx_multicast);
4920 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4921 GLPRT_BPRCL(hw->port_info->lport),
4922 pf->offset_loaded, &os->eth.rx_broadcast,
4923 &ns->eth.rx_broadcast);
4924 ice_stat_update_32(hw, PRTRPB_RDPC,
4925 pf->offset_loaded, &os->eth.rx_discards,
4926 &ns->eth.rx_discards);
4927 /* enlarge the limitation when rx_bytes overflowed */
4928 if (pf->offset_loaded) {
4929 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
4930 ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4931 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
4933 pf->old_rx_bytes = ns->eth.rx_bytes;
4935 /* Workaround: CRC size should not be included in byte statistics,
4936 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4939 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4940 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4942 /* GLPRT_REPC not supported */
4943 /* GLPRT_RMPC not supported */
4944 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4946 &os->eth.rx_unknown_protocol,
4947 &ns->eth.rx_unknown_protocol);
4948 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4949 GLPRT_GOTCL(hw->port_info->lport),
4950 pf->offset_loaded, &os->eth.tx_bytes,
4952 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4953 GLPRT_UPTCL(hw->port_info->lport),
4954 pf->offset_loaded, &os->eth.tx_unicast,
4955 &ns->eth.tx_unicast);
4956 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4957 GLPRT_MPTCL(hw->port_info->lport),
4958 pf->offset_loaded, &os->eth.tx_multicast,
4959 &ns->eth.tx_multicast);
4960 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4961 GLPRT_BPTCL(hw->port_info->lport),
4962 pf->offset_loaded, &os->eth.tx_broadcast,
4963 &ns->eth.tx_broadcast);
4964 /* enlarge the limitation when tx_bytes overflowed */
4965 if (pf->offset_loaded) {
4966 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
4967 ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4968 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
4970 pf->old_tx_bytes = ns->eth.tx_bytes;
4971 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4972 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4974 /* GLPRT_TEPC not supported */
4976 /* additional port specific stats */
4977 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4978 pf->offset_loaded, &os->tx_dropped_link_down,
4979 &ns->tx_dropped_link_down);
4980 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4981 pf->offset_loaded, &os->crc_errors,
4983 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4984 pf->offset_loaded, &os->illegal_bytes,
4985 &ns->illegal_bytes);
4986 /* GLPRT_ERRBC not supported */
4987 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4988 pf->offset_loaded, &os->mac_local_faults,
4989 &ns->mac_local_faults);
4990 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4991 pf->offset_loaded, &os->mac_remote_faults,
4992 &ns->mac_remote_faults);
4994 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4995 pf->offset_loaded, &os->rx_len_errors,
4996 &ns->rx_len_errors);
4998 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4999 pf->offset_loaded, &os->link_xon_rx,
5001 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
5002 pf->offset_loaded, &os->link_xoff_rx,
5004 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
5005 pf->offset_loaded, &os->link_xon_tx,
5007 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
5008 pf->offset_loaded, &os->link_xoff_tx,
5010 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
5011 GLPRT_PRC64L(hw->port_info->lport),
5012 pf->offset_loaded, &os->rx_size_64,
5014 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
5015 GLPRT_PRC127L(hw->port_info->lport),
5016 pf->offset_loaded, &os->rx_size_127,
5018 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
5019 GLPRT_PRC255L(hw->port_info->lport),
5020 pf->offset_loaded, &os->rx_size_255,
5022 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
5023 GLPRT_PRC511L(hw->port_info->lport),
5024 pf->offset_loaded, &os->rx_size_511,
5026 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
5027 GLPRT_PRC1023L(hw->port_info->lport),
5028 pf->offset_loaded, &os->rx_size_1023,
5030 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
5031 GLPRT_PRC1522L(hw->port_info->lport),
5032 pf->offset_loaded, &os->rx_size_1522,
5034 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
5035 GLPRT_PRC9522L(hw->port_info->lport),
5036 pf->offset_loaded, &os->rx_size_big,
5038 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
5039 pf->offset_loaded, &os->rx_undersize,
5041 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
5042 pf->offset_loaded, &os->rx_fragments,
5044 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
5045 pf->offset_loaded, &os->rx_oversize,
5047 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
5048 pf->offset_loaded, &os->rx_jabber,
5050 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
5051 GLPRT_PTC64L(hw->port_info->lport),
5052 pf->offset_loaded, &os->tx_size_64,
5054 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
5055 GLPRT_PTC127L(hw->port_info->lport),
5056 pf->offset_loaded, &os->tx_size_127,
5058 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
5059 GLPRT_PTC255L(hw->port_info->lport),
5060 pf->offset_loaded, &os->tx_size_255,
5062 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
5063 GLPRT_PTC511L(hw->port_info->lport),
5064 pf->offset_loaded, &os->tx_size_511,
5066 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
5067 GLPRT_PTC1023L(hw->port_info->lport),
5068 pf->offset_loaded, &os->tx_size_1023,
5070 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
5071 GLPRT_PTC1522L(hw->port_info->lport),
5072 pf->offset_loaded, &os->tx_size_1522,
5074 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
5075 GLPRT_PTC9522L(hw->port_info->lport),
5076 pf->offset_loaded, &os->tx_size_big,
5079 /* GLPRT_MSPDC not supported */
5080 /* GLPRT_XEC not supported */
5082 pf->offset_loaded = true;
5085 ice_update_vsi_stats(pf->main_vsi);
5088 /* Get all statistics of a port */
5090 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
5092 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5093 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5094 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5096 /* call read registers - updates values, now write them to struct */
5097 ice_read_stats_registers(pf, hw);
5099 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
5100 pf->main_vsi->eth_stats.rx_multicast +
5101 pf->main_vsi->eth_stats.rx_broadcast -
5102 pf->main_vsi->eth_stats.rx_discards;
5103 stats->opackets = ns->eth.tx_unicast +
5104 ns->eth.tx_multicast +
5105 ns->eth.tx_broadcast;
5106 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
5107 stats->obytes = ns->eth.tx_bytes;
5108 stats->oerrors = ns->eth.tx_errors +
5109 pf->main_vsi->eth_stats.tx_errors;
5112 stats->imissed = ns->eth.rx_discards +
5113 pf->main_vsi->eth_stats.rx_discards;
5114 stats->ierrors = ns->crc_errors +
5116 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
5118 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
5119 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
5120 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
5121 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
5122 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
5123 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
5124 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
5125 pf->main_vsi->eth_stats.rx_discards);
5126 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
5127 ns->eth.rx_unknown_protocol);
5128 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
5129 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
5130 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
5131 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
5132 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
5133 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
5134 pf->main_vsi->eth_stats.tx_discards);
5135 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
5137 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
5138 ns->tx_dropped_link_down);
5139 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
5140 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
5142 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
5143 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
5144 ns->mac_local_faults);
5145 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
5146 ns->mac_remote_faults);
5147 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
5148 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
5149 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
5150 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
5151 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
5152 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
5153 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
5154 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
5155 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
5156 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
5157 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
5158 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
5159 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
5160 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
5161 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
5162 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
5163 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
5164 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
5165 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
5166 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
5167 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
5168 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
5169 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
5170 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
5174 /* Reset the statistics */
5176 ice_stats_reset(struct rte_eth_dev *dev)
5178 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5179 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5181 /* Mark PF and VSI stats to update the offset, aka "reset" */
5182 pf->offset_loaded = false;
5184 pf->main_vsi->offset_loaded = false;
5186 /* read the stats, reading current register values into offset */
5187 ice_read_stats_registers(pf, hw);
5193 ice_xstats_calc_num(void)
5197 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5203 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5206 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5207 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5210 struct ice_hw_port_stats *hw_stats = &pf->stats;
5212 count = ice_xstats_calc_num();
5216 ice_read_stats_registers(pf, hw);
5223 /* Get stats from ice_eth_stats struct */
5224 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5225 xstats[count].value =
5226 *(uint64_t *)((char *)&hw_stats->eth +
5227 ice_stats_strings[i].offset);
5228 xstats[count].id = count;
5232 /* Get individiual stats from ice_hw_port struct */
5233 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5234 xstats[count].value =
5235 *(uint64_t *)((char *)hw_stats +
5236 ice_hw_port_strings[i].offset);
5237 xstats[count].id = count;
5244 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5245 struct rte_eth_xstat_name *xstats_names,
5246 __rte_unused unsigned int limit)
5248 unsigned int count = 0;
5252 return ice_xstats_calc_num();
5254 /* Note: limit checked in rte_eth_xstats_names() */
5256 /* Get stats from ice_eth_stats struct */
5257 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5258 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5259 sizeof(xstats_names[count].name));
5263 /* Get individiual stats from ice_hw_port struct */
5264 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5265 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5266 sizeof(xstats_names[count].name));
5274 ice_dev_flow_ops_get(struct rte_eth_dev *dev,
5275 const struct rte_flow_ops **ops)
5280 *ops = &ice_flow_ops;
5284 /* Add UDP tunneling port */
5286 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5287 struct rte_eth_udp_tunnel *udp_tunnel)
5290 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5292 if (udp_tunnel == NULL)
5295 switch (udp_tunnel->prot_type) {
5296 case RTE_TUNNEL_TYPE_VXLAN:
5297 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5300 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5308 /* Delete UDP tunneling port */
5310 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5311 struct rte_eth_udp_tunnel *udp_tunnel)
5314 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5316 if (udp_tunnel == NULL)
5319 switch (udp_tunnel->prot_type) {
5320 case RTE_TUNNEL_TYPE_VXLAN:
5321 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5324 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5333 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5334 struct rte_pci_device *pci_dev)
5336 return rte_eth_dev_pci_generic_probe(pci_dev,
5337 sizeof(struct ice_adapter),
5342 ice_pci_remove(struct rte_pci_device *pci_dev)
5344 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5347 static struct rte_pci_driver rte_ice_pmd = {
5348 .id_table = pci_id_ice_map,
5349 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5350 .probe = ice_pci_probe,
5351 .remove = ice_pci_remove,
5355 * Driver initialization routine.
5356 * Invoked once at EAL init time.
5357 * Register itself as the [Poll Mode] Driver of PCI devices.
5359 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5360 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5361 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5362 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5363 ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5364 ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5365 ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
5367 RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE);
5368 RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE);
5369 #ifdef RTE_ETHDEV_DEBUG_RX
5370 RTE_LOG_REGISTER(ice_logtype_rx, pmd.net.ice.rx, DEBUG);
5372 #ifdef RTE_ETHDEV_DEBUG_TX
5373 RTE_LOG_REGISTER(ice_logtype_tx, pmd.net.ice.tx, DEBUG);