1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "base/ice_common.h"
18 #include "rte_pmd_ice.h"
19 #include "ice_ethdev.h"
21 #include "ice_generic_flow.h"
24 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
25 #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support"
26 #define ICE_FLOW_MARK_SUPPORT_ARG "flow-mark-support"
27 #define ICE_PROTO_XTR_ARG "proto_xtr"
29 static const char * const ice_valid_args[] = {
30 ICE_SAFE_MODE_SUPPORT_ARG,
31 ICE_PIPELINE_MODE_SUPPORT_ARG,
32 ICE_FLOW_MARK_SUPPORT_ARG,
37 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
38 .name = "ice_dynfield_proto_xtr_metadata",
39 .size = sizeof(uint32_t),
40 .align = __alignof__(uint32_t),
44 struct proto_xtr_ol_flag {
45 const struct rte_mbuf_dynflag param;
50 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
52 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
54 .param = { .name = "ice_dynflag_proto_xtr_vlan" },
55 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
57 .param = { .name = "ice_dynflag_proto_xtr_ipv4" },
58 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
60 .param = { .name = "ice_dynflag_proto_xtr_ipv6" },
61 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
62 [PROTO_XTR_IPV6_FLOW] = {
63 .param = { .name = "ice_dynflag_proto_xtr_ipv6_flow" },
64 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
66 .param = { .name = "ice_dynflag_proto_xtr_tcp" },
67 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
68 [PROTO_XTR_IP_OFFSET] = {
69 .param = { .name = "ice_dynflag_proto_xtr_ip_offset" },
70 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
73 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
75 #define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package"
76 #define ICE_COMMS_PKG_NAME "ICE COMMS Package"
77 #define ICE_MAX_RES_DESC_NUM 1024
79 static int ice_dev_configure(struct rte_eth_dev *dev);
80 static int ice_dev_start(struct rte_eth_dev *dev);
81 static void ice_dev_stop(struct rte_eth_dev *dev);
82 static void ice_dev_close(struct rte_eth_dev *dev);
83 static int ice_dev_reset(struct rte_eth_dev *dev);
84 static int ice_dev_info_get(struct rte_eth_dev *dev,
85 struct rte_eth_dev_info *dev_info);
86 static int ice_link_update(struct rte_eth_dev *dev,
87 int wait_to_complete);
88 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
89 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
91 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
92 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
93 static int ice_rss_reta_update(struct rte_eth_dev *dev,
94 struct rte_eth_rss_reta_entry64 *reta_conf,
96 static int ice_rss_reta_query(struct rte_eth_dev *dev,
97 struct rte_eth_rss_reta_entry64 *reta_conf,
99 static int ice_rss_hash_update(struct rte_eth_dev *dev,
100 struct rte_eth_rss_conf *rss_conf);
101 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
102 struct rte_eth_rss_conf *rss_conf);
103 static int ice_promisc_enable(struct rte_eth_dev *dev);
104 static int ice_promisc_disable(struct rte_eth_dev *dev);
105 static int ice_allmulti_enable(struct rte_eth_dev *dev);
106 static int ice_allmulti_disable(struct rte_eth_dev *dev);
107 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
110 static int ice_macaddr_set(struct rte_eth_dev *dev,
111 struct rte_ether_addr *mac_addr);
112 static int ice_macaddr_add(struct rte_eth_dev *dev,
113 struct rte_ether_addr *mac_addr,
114 __rte_unused uint32_t index,
116 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
117 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
119 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
121 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
123 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
124 uint16_t pvid, int on);
125 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
126 static int ice_get_eeprom(struct rte_eth_dev *dev,
127 struct rte_dev_eeprom_info *eeprom);
128 static int ice_stats_get(struct rte_eth_dev *dev,
129 struct rte_eth_stats *stats);
130 static int ice_stats_reset(struct rte_eth_dev *dev);
131 static int ice_xstats_get(struct rte_eth_dev *dev,
132 struct rte_eth_xstat *xstats, unsigned int n);
133 static int ice_xstats_get_names(struct rte_eth_dev *dev,
134 struct rte_eth_xstat_name *xstats_names,
136 static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
137 enum rte_filter_type filter_type,
138 enum rte_filter_op filter_op,
140 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
141 struct rte_eth_udp_tunnel *udp_tunnel);
142 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
143 struct rte_eth_udp_tunnel *udp_tunnel);
145 static const struct rte_pci_id pci_id_ice_map[] = {
146 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
147 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
148 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
149 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
150 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
151 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
152 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
153 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
154 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
155 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
156 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
157 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
158 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
159 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
160 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
161 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
162 { .vendor_id = 0, /* sentinel */ },
165 static const struct eth_dev_ops ice_eth_dev_ops = {
166 .dev_configure = ice_dev_configure,
167 .dev_start = ice_dev_start,
168 .dev_stop = ice_dev_stop,
169 .dev_close = ice_dev_close,
170 .dev_reset = ice_dev_reset,
171 .dev_set_link_up = ice_dev_set_link_up,
172 .dev_set_link_down = ice_dev_set_link_down,
173 .rx_queue_start = ice_rx_queue_start,
174 .rx_queue_stop = ice_rx_queue_stop,
175 .tx_queue_start = ice_tx_queue_start,
176 .tx_queue_stop = ice_tx_queue_stop,
177 .rx_queue_setup = ice_rx_queue_setup,
178 .rx_queue_release = ice_rx_queue_release,
179 .tx_queue_setup = ice_tx_queue_setup,
180 .tx_queue_release = ice_tx_queue_release,
181 .dev_infos_get = ice_dev_info_get,
182 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
183 .link_update = ice_link_update,
184 .mtu_set = ice_mtu_set,
185 .mac_addr_set = ice_macaddr_set,
186 .mac_addr_add = ice_macaddr_add,
187 .mac_addr_remove = ice_macaddr_remove,
188 .vlan_filter_set = ice_vlan_filter_set,
189 .vlan_offload_set = ice_vlan_offload_set,
190 .reta_update = ice_rss_reta_update,
191 .reta_query = ice_rss_reta_query,
192 .rss_hash_update = ice_rss_hash_update,
193 .rss_hash_conf_get = ice_rss_hash_conf_get,
194 .promiscuous_enable = ice_promisc_enable,
195 .promiscuous_disable = ice_promisc_disable,
196 .allmulticast_enable = ice_allmulti_enable,
197 .allmulticast_disable = ice_allmulti_disable,
198 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
199 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
200 .fw_version_get = ice_fw_version_get,
201 .vlan_pvid_set = ice_vlan_pvid_set,
202 .rxq_info_get = ice_rxq_info_get,
203 .txq_info_get = ice_txq_info_get,
204 .rx_burst_mode_get = ice_rx_burst_mode_get,
205 .tx_burst_mode_get = ice_tx_burst_mode_get,
206 .get_eeprom_length = ice_get_eeprom_length,
207 .get_eeprom = ice_get_eeprom,
208 .stats_get = ice_stats_get,
209 .stats_reset = ice_stats_reset,
210 .xstats_get = ice_xstats_get,
211 .xstats_get_names = ice_xstats_get_names,
212 .xstats_reset = ice_stats_reset,
213 .filter_ctrl = ice_dev_filter_ctrl,
214 .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add,
215 .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
216 .tx_done_cleanup = ice_tx_done_cleanup,
219 /* store statistics names and its offset in stats structure */
220 struct ice_xstats_name_off {
221 char name[RTE_ETH_XSTATS_NAME_SIZE];
225 static const struct ice_xstats_name_off ice_stats_strings[] = {
226 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
227 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
228 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
229 {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
230 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
231 rx_unknown_protocol)},
232 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
233 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
234 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
235 {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
238 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
239 sizeof(ice_stats_strings[0]))
241 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
242 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
243 tx_dropped_link_down)},
244 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
245 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
247 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
248 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
250 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
252 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
254 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
255 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
256 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
257 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
258 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
259 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
261 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
263 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
265 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
267 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
269 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
271 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
273 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
275 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
276 mac_short_pkt_dropped)},
277 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
279 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
280 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
281 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
283 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
285 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
287 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
289 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
291 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
295 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
296 sizeof(ice_hw_port_strings[0]))
299 ice_init_controlq_parameter(struct ice_hw *hw)
301 /* fields for adminq */
302 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
303 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
304 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
305 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
307 /* fields for mailboxq, DPDK used as PF host */
308 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
309 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
310 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
311 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
315 lookup_proto_xtr_type(const char *xtr_name)
319 enum proto_xtr_type type;
321 { "vlan", PROTO_XTR_VLAN },
322 { "ipv4", PROTO_XTR_IPV4 },
323 { "ipv6", PROTO_XTR_IPV6 },
324 { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
325 { "tcp", PROTO_XTR_TCP },
326 { "ip_offset", PROTO_XTR_IP_OFFSET },
330 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
331 if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
332 return xtr_type_map[i].type;
339 * Parse elem, the elem could be single number/range or '(' ')' group
340 * 1) A single number elem, it's just a simple digit. e.g. 9
341 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
342 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
343 * Within group elem, '-' used for a range separator;
344 * ',' used for a single number.
347 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
349 const char *str = input;
354 while (isblank(*str))
357 if (!isdigit(*str) && *str != '(')
360 /* process single number or single range of number */
363 idx = strtoul(str, &end, 10);
364 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
367 while (isblank(*end))
373 /* process single <number>-<number> */
376 while (isblank(*end))
382 idx = strtoul(end, &end, 10);
383 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
387 while (isblank(*end))
394 for (idx = RTE_MIN(min, max);
395 idx <= RTE_MAX(min, max); idx++)
396 devargs->proto_xtr[idx] = xtr_type;
401 /* process set within bracket */
403 while (isblank(*str))
408 min = ICE_MAX_QUEUE_NUM;
410 /* go ahead to the first digit */
411 while (isblank(*str))
416 /* get the digit value */
418 idx = strtoul(str, &end, 10);
419 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
422 /* go ahead to separator '-',',' and ')' */
423 while (isblank(*end))
426 if (min == ICE_MAX_QUEUE_NUM)
428 else /* avoid continuous '-' */
430 } else if (*end == ',' || *end == ')') {
432 if (min == ICE_MAX_QUEUE_NUM)
435 for (idx = RTE_MIN(min, max);
436 idx <= RTE_MAX(min, max); idx++)
437 devargs->proto_xtr[idx] = xtr_type;
439 min = ICE_MAX_QUEUE_NUM;
445 } while (*end != ')' && *end != '\0');
451 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
453 const char *queue_start;
458 while (isblank(*queues))
461 if (*queues != '[') {
462 xtr_type = lookup_proto_xtr_type(queues);
466 devargs->proto_xtr_dflt = xtr_type;
473 while (isblank(*queues))
478 queue_start = queues;
480 /* go across a complete bracket */
481 if (*queue_start == '(') {
482 queues += strcspn(queues, ")");
487 /* scan the separator ':' */
488 queues += strcspn(queues, ":");
489 if (*queues++ != ':')
491 while (isblank(*queues))
494 for (idx = 0; ; idx++) {
495 if (isblank(queues[idx]) ||
496 queues[idx] == ',' ||
497 queues[idx] == ']' ||
501 if (idx > sizeof(xtr_name) - 2)
504 xtr_name[idx] = queues[idx];
506 xtr_name[idx] = '\0';
507 xtr_type = lookup_proto_xtr_type(xtr_name);
513 while (isblank(*queues) || *queues == ',' || *queues == ']')
516 if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
518 } while (*queues != '\0');
524 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
527 struct ice_devargs *devargs = extra_args;
529 if (value == NULL || extra_args == NULL)
532 if (parse_queue_proto_xtr(value, devargs) < 0) {
534 "The protocol extraction parameter is wrong : '%s'",
543 ice_check_proto_xtr_support(struct ice_hw *hw)
545 #define FLX_REG(val, fld, idx) \
546 (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
547 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
554 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
556 ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
557 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
559 ICE_PROT_IPV4_OF_OR_S,
560 ICE_PROT_IPV4_OF_OR_S },
561 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
563 ICE_PROT_IPV6_OF_OR_S,
564 ICE_PROT_IPV6_OF_OR_S },
565 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
567 ICE_PROT_IPV6_OF_OR_S,
568 ICE_PROT_IPV6_OF_OR_S },
569 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
571 ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
572 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
574 ICE_PROT_IPV4_OF_OR_S,
575 ICE_PROT_IPV6_OF_OR_S },
579 for (i = 0; i < RTE_DIM(xtr_sets); i++) {
580 uint32_t rxdid = xtr_sets[i].rxdid;
583 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
584 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
586 if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
587 FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
588 ice_proto_xtr_hw_support[i] = true;
591 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
592 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
594 if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
595 FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
596 ice_proto_xtr_hw_support[i] = true;
602 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
605 struct pool_entry *entry;
610 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
613 "Failed to allocate memory for resource pool");
617 /* queue heap initialize */
618 pool->num_free = num;
621 LIST_INIT(&pool->alloc_list);
622 LIST_INIT(&pool->free_list);
624 /* Initialize element */
628 LIST_INSERT_HEAD(&pool->free_list, entry, next);
633 ice_res_pool_alloc(struct ice_res_pool_info *pool,
636 struct pool_entry *entry, *valid_entry;
639 PMD_INIT_LOG(ERR, "Invalid parameter");
643 if (pool->num_free < num) {
644 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
645 num, pool->num_free);
650 /* Lookup in free list and find most fit one */
651 LIST_FOREACH(entry, &pool->free_list, next) {
652 if (entry->len >= num) {
654 if (entry->len == num) {
659 valid_entry->len > entry->len)
664 /* Not find one to satisfy the request, return */
666 PMD_INIT_LOG(ERR, "No valid entry found");
670 * The entry have equal queue number as requested,
671 * remove it from alloc_list.
673 if (valid_entry->len == num) {
674 LIST_REMOVE(valid_entry, next);
677 * The entry have more numbers than requested,
678 * create a new entry for alloc_list and minus its
679 * queue base and number in free_list.
681 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
684 "Failed to allocate memory for "
688 entry->base = valid_entry->base;
690 valid_entry->base += num;
691 valid_entry->len -= num;
695 /* Insert it into alloc list, not sorted */
696 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
698 pool->num_free -= valid_entry->len;
699 pool->num_alloc += valid_entry->len;
701 return valid_entry->base + pool->base;
705 ice_res_pool_destroy(struct ice_res_pool_info *pool)
707 struct pool_entry *entry, *next_entry;
712 for (entry = LIST_FIRST(&pool->alloc_list);
713 entry && (next_entry = LIST_NEXT(entry, next), 1);
714 entry = next_entry) {
715 LIST_REMOVE(entry, next);
719 for (entry = LIST_FIRST(&pool->free_list);
720 entry && (next_entry = LIST_NEXT(entry, next), 1);
721 entry = next_entry) {
722 LIST_REMOVE(entry, next);
729 LIST_INIT(&pool->alloc_list);
730 LIST_INIT(&pool->free_list);
734 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
736 /* Set VSI LUT selection */
737 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
738 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
739 /* Set Hash scheme */
740 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
741 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
743 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
746 static enum ice_status
747 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
748 struct ice_aqc_vsi_props *info,
749 uint8_t enabled_tcmap)
751 uint16_t bsf, qp_idx;
753 /* default tc 0 now. Multi-TC supporting need to be done later.
754 * Configure TC and queue mapping parameters, for enabled TC,
755 * allocate qpnum_per_tc queues to this traffic.
757 if (enabled_tcmap != 0x01) {
758 PMD_INIT_LOG(ERR, "only TC0 is supported");
762 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
763 bsf = rte_bsf32(vsi->nb_qps);
764 /* Adjust the queue number to actual queues that can be applied */
765 vsi->nb_qps = 0x1 << bsf;
768 /* Set tc and queue mapping with VSI */
769 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
770 ICE_AQ_VSI_TC_Q_OFFSET_S) |
771 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
773 /* Associate queue number with VSI */
774 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
775 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
776 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
777 info->valid_sections |=
778 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
779 /* Set the info.ingress_table and info.egress_table
780 * for UP translate table. Now just set it to 1:1 map by default
781 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
783 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
784 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
785 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
786 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
791 ice_init_mac_address(struct rte_eth_dev *dev)
793 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
795 if (!rte_is_unicast_ether_addr
796 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
797 PMD_INIT_LOG(ERR, "Invalid MAC address");
802 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
803 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
805 dev->data->mac_addrs =
806 rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0);
807 if (!dev->data->mac_addrs) {
809 "Failed to allocate memory to store mac address");
812 /* store it to dev data */
814 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
815 &dev->data->mac_addrs[0]);
819 /* Find out specific MAC filter */
820 static struct ice_mac_filter *
821 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
823 struct ice_mac_filter *f;
825 TAILQ_FOREACH(f, &vsi->mac_list, next) {
826 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
834 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
836 struct ice_fltr_list_entry *m_list_itr = NULL;
837 struct ice_mac_filter *f;
838 struct LIST_HEAD_TYPE list_head;
839 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
842 /* If it's added and configured, return */
843 f = ice_find_mac_filter(vsi, mac_addr);
845 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
849 INIT_LIST_HEAD(&list_head);
851 m_list_itr = (struct ice_fltr_list_entry *)
852 ice_malloc(hw, sizeof(*m_list_itr));
857 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
858 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
859 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
860 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
861 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
862 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
863 m_list_itr->fltr_info.vsi_handle = vsi->idx;
865 LIST_ADD(&m_list_itr->list_entry, &list_head);
868 ret = ice_add_mac(hw, &list_head);
869 if (ret != ICE_SUCCESS) {
870 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
874 /* Add the mac addr into mac list */
875 f = rte_zmalloc(NULL, sizeof(*f), 0);
877 PMD_DRV_LOG(ERR, "failed to allocate memory");
881 rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
882 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
888 rte_free(m_list_itr);
893 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
895 struct ice_fltr_list_entry *m_list_itr = NULL;
896 struct ice_mac_filter *f;
897 struct LIST_HEAD_TYPE list_head;
898 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
901 /* Can't find it, return an error */
902 f = ice_find_mac_filter(vsi, mac_addr);
906 INIT_LIST_HEAD(&list_head);
908 m_list_itr = (struct ice_fltr_list_entry *)
909 ice_malloc(hw, sizeof(*m_list_itr));
914 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
915 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
916 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
917 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
918 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
919 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
920 m_list_itr->fltr_info.vsi_handle = vsi->idx;
922 LIST_ADD(&m_list_itr->list_entry, &list_head);
924 /* remove the mac filter */
925 ret = ice_remove_mac(hw, &list_head);
926 if (ret != ICE_SUCCESS) {
927 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
932 /* Remove the mac addr from mac list */
933 TAILQ_REMOVE(&vsi->mac_list, f, next);
939 rte_free(m_list_itr);
943 /* Find out specific VLAN filter */
944 static struct ice_vlan_filter *
945 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
947 struct ice_vlan_filter *f;
949 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
950 if (vlan_id == f->vlan_info.vlan_id)
958 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
960 struct ice_fltr_list_entry *v_list_itr = NULL;
961 struct ice_vlan_filter *f;
962 struct LIST_HEAD_TYPE list_head;
966 if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)
969 hw = ICE_VSI_TO_HW(vsi);
971 /* If it's added and configured, return. */
972 f = ice_find_vlan_filter(vsi, vlan_id);
974 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
978 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
981 INIT_LIST_HEAD(&list_head);
983 v_list_itr = (struct ice_fltr_list_entry *)
984 ice_malloc(hw, sizeof(*v_list_itr));
989 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
990 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
991 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
992 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
993 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
994 v_list_itr->fltr_info.vsi_handle = vsi->idx;
996 LIST_ADD(&v_list_itr->list_entry, &list_head);
999 ret = ice_add_vlan(hw, &list_head);
1000 if (ret != ICE_SUCCESS) {
1001 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1006 /* Add vlan into vlan list */
1007 f = rte_zmalloc(NULL, sizeof(*f), 0);
1009 PMD_DRV_LOG(ERR, "failed to allocate memory");
1013 f->vlan_info.vlan_id = vlan_id;
1014 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1020 rte_free(v_list_itr);
1025 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
1027 struct ice_fltr_list_entry *v_list_itr = NULL;
1028 struct ice_vlan_filter *f;
1029 struct LIST_HEAD_TYPE list_head;
1034 * Vlan 0 is the generic filter for untagged packets
1035 * and can't be removed.
1037 if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
1040 hw = ICE_VSI_TO_HW(vsi);
1042 /* Can't find it, return an error */
1043 f = ice_find_vlan_filter(vsi, vlan_id);
1047 INIT_LIST_HEAD(&list_head);
1049 v_list_itr = (struct ice_fltr_list_entry *)
1050 ice_malloc(hw, sizeof(*v_list_itr));
1056 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
1057 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1058 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1059 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1060 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1061 v_list_itr->fltr_info.vsi_handle = vsi->idx;
1063 LIST_ADD(&v_list_itr->list_entry, &list_head);
1065 /* remove the vlan filter */
1066 ret = ice_remove_vlan(hw, &list_head);
1067 if (ret != ICE_SUCCESS) {
1068 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1073 /* Remove the vlan id from vlan list */
1074 TAILQ_REMOVE(&vsi->vlan_list, f, next);
1080 rte_free(v_list_itr);
1085 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1087 struct ice_mac_filter *m_f;
1088 struct ice_vlan_filter *v_f;
1091 if (!vsi || !vsi->mac_num)
1094 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
1095 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1096 if (ret != ICE_SUCCESS) {
1102 if (vsi->vlan_num == 0)
1105 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
1106 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
1107 if (ret != ICE_SUCCESS) {
1118 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
1120 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1121 struct ice_vsi_ctx ctxt;
1125 /* Check if it has been already on or off */
1126 if (vsi->info.valid_sections &
1127 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1129 if ((vsi->info.outer_tag_flags &
1130 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
1131 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
1132 return 0; /* already on */
1134 if (!(vsi->info.outer_tag_flags &
1135 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
1136 return 0; /* already off */
1141 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
1144 /* clear global insertion and use per packet insertion */
1145 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
1146 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
1147 vsi->info.outer_tag_flags |= qinq_flags;
1148 /* use default vlan type 0x8100 */
1149 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1150 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1151 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1152 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1153 ctxt.info.valid_sections =
1154 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1155 ctxt.vsi_num = vsi->vsi_id;
1156 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1159 "Update VSI failed to %s qinq stripping",
1160 on ? "enable" : "disable");
1164 vsi->info.valid_sections |=
1165 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1171 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
1173 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1174 struct ice_vsi_ctx ctxt;
1178 /* Check if it has been already on or off */
1179 if (vsi->info.valid_sections &
1180 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1182 if ((vsi->info.outer_tag_flags &
1183 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1184 ICE_AQ_VSI_OUTER_TAG_COPY)
1185 return 0; /* already on */
1187 if ((vsi->info.outer_tag_flags &
1188 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1189 ICE_AQ_VSI_OUTER_TAG_NOTHING)
1190 return 0; /* already off */
1195 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
1197 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
1198 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
1199 vsi->info.outer_tag_flags |= qinq_flags;
1200 /* use default vlan type 0x8100 */
1201 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1202 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1203 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1204 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1205 ctxt.info.valid_sections =
1206 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1207 ctxt.vsi_num = vsi->vsi_id;
1208 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1211 "Update VSI failed to %s qinq stripping",
1212 on ? "enable" : "disable");
1216 vsi->info.valid_sections |=
1217 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1223 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
1227 ret = ice_vsi_config_qinq_stripping(vsi, on);
1229 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
1231 ret = ice_vsi_config_qinq_insertion(vsi, on);
1233 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
1240 ice_pf_enable_irq0(struct ice_hw *hw)
1242 /* reset the registers */
1243 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1244 ICE_READ_REG(hw, PFINT_OICR);
1247 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1248 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1249 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1251 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1252 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1253 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1254 PFINT_OICR_CTL_ITR_INDX_M) |
1255 PFINT_OICR_CTL_CAUSE_ENA_M);
1257 ICE_WRITE_REG(hw, PFINT_FW_CTL,
1258 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1259 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1260 PFINT_FW_CTL_ITR_INDX_M) |
1261 PFINT_FW_CTL_CAUSE_ENA_M);
1263 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1266 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1267 GLINT_DYN_CTL_INTENA_M |
1268 GLINT_DYN_CTL_CLEARPBA_M |
1269 GLINT_DYN_CTL_ITR_INDX_M);
1276 ice_pf_disable_irq0(struct ice_hw *hw)
1278 /* Disable all interrupt types */
1279 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1285 ice_handle_aq_msg(struct rte_eth_dev *dev)
1287 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1288 struct ice_ctl_q_info *cq = &hw->adminq;
1289 struct ice_rq_event_info event;
1290 uint16_t pending, opcode;
1293 event.buf_len = ICE_AQ_MAX_BUF_LEN;
1294 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1295 if (!event.msg_buf) {
1296 PMD_DRV_LOG(ERR, "Failed to allocate mem");
1302 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1304 if (ret != ICE_SUCCESS) {
1306 "Failed to read msg from AdminQ, "
1308 hw->adminq.sq_last_status);
1311 opcode = rte_le_to_cpu_16(event.desc.opcode);
1314 case ice_aqc_opc_get_link_status:
1315 ret = ice_link_update(dev, 0);
1317 rte_eth_dev_callback_process
1318 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1321 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1326 rte_free(event.msg_buf);
1331 * Interrupt handler triggered by NIC for handling
1332 * specific interrupt.
1335 * Pointer to interrupt handle.
1337 * The address of parameter (struct rte_eth_dev *) regsitered before.
1343 ice_interrupt_handler(void *param)
1345 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1346 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1354 uint32_t int_fw_ctl;
1357 /* Disable interrupt */
1358 ice_pf_disable_irq0(hw);
1360 /* read out interrupt causes */
1361 oicr = ICE_READ_REG(hw, PFINT_OICR);
1363 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1366 /* No interrupt event indicated */
1367 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1368 PMD_DRV_LOG(INFO, "No interrupt event");
1373 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1374 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1375 ice_handle_aq_msg(dev);
1378 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1379 PMD_DRV_LOG(INFO, "OICR: link state change event");
1380 ret = ice_link_update(dev, 0);
1382 rte_eth_dev_callback_process
1383 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1387 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1388 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1389 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1390 if (reg & GL_MDET_TX_PQM_VALID_M) {
1391 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1392 GL_MDET_TX_PQM_PF_NUM_S;
1393 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1394 GL_MDET_TX_PQM_MAL_TYPE_S;
1395 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1396 GL_MDET_TX_PQM_QNUM_S;
1398 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1399 "%d by PQM on TX queue %d PF# %d",
1400 event, queue, pf_num);
1403 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1404 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1405 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1406 GL_MDET_TX_TCLAN_PF_NUM_S;
1407 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1408 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1409 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1410 GL_MDET_TX_TCLAN_QNUM_S;
1412 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1413 "%d by TCLAN on TX queue %d PF# %d",
1414 event, queue, pf_num);
1418 /* Enable interrupt */
1419 ice_pf_enable_irq0(hw);
1420 rte_intr_ack(dev->intr_handle);
1424 ice_init_proto_xtr(struct rte_eth_dev *dev)
1426 struct ice_adapter *ad =
1427 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1428 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1429 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1430 const struct proto_xtr_ol_flag *ol_flag;
1431 bool proto_xtr_enable = false;
1435 pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1436 if (unlikely(pf->proto_xtr == NULL)) {
1437 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1441 for (i = 0; i < pf->lan_nb_qps; i++) {
1442 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1443 ad->devargs.proto_xtr[i] :
1444 ad->devargs.proto_xtr_dflt;
1446 if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1447 uint8_t type = pf->proto_xtr[i];
1449 ice_proto_xtr_ol_flag_params[type].required = true;
1450 proto_xtr_enable = true;
1454 if (likely(!proto_xtr_enable))
1457 ice_check_proto_xtr_support(hw);
1459 offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1460 if (unlikely(offset == -1)) {
1462 "Protocol extraction metadata is disabled in mbuf with error %d",
1468 "Protocol extraction metadata offset in mbuf is : %d",
1470 rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1472 for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1473 ol_flag = &ice_proto_xtr_ol_flag_params[i];
1475 if (!ol_flag->required)
1478 if (!ice_proto_xtr_hw_support[i]) {
1480 "Protocol extraction type %u is not supported in hardware",
1482 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1486 offset = rte_mbuf_dynflag_register(&ol_flag->param);
1487 if (unlikely(offset == -1)) {
1489 "Protocol extraction offload '%s' failed to register with error %d",
1490 ol_flag->param.name, -rte_errno);
1492 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1497 "Protocol extraction offload '%s' offset in mbuf is : %d",
1498 ol_flag->param.name, offset);
1499 *ol_flag->ol_flag = 1ULL << offset;
1503 /* Initialize SW parameters of PF */
1505 ice_pf_sw_init(struct rte_eth_dev *dev)
1507 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1508 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1511 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1512 hw->func_caps.common_cap.num_rxq);
1514 pf->lan_nb_qps = pf->lan_nb_qp_max;
1516 ice_init_proto_xtr(dev);
1518 if (hw->func_caps.fd_fltr_guar > 0 ||
1519 hw->func_caps.fd_fltr_best_effort > 0) {
1520 pf->flags |= ICE_FLAG_FDIR;
1521 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1522 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1524 pf->fdir_nb_qps = 0;
1526 pf->fdir_qp_offset = 0;
1532 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1534 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1535 struct ice_vsi *vsi = NULL;
1536 struct ice_vsi_ctx vsi_ctx;
1538 struct rte_ether_addr broadcast = {
1539 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1540 struct rte_ether_addr mac_addr;
1541 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1542 uint8_t tc_bitmap = 0x1;
1545 /* hw->num_lports = 1 in NIC mode */
1546 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1550 vsi->idx = pf->next_vsi_idx;
1553 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1554 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1555 vsi->vlan_anti_spoof_on = 0;
1556 vsi->vlan_filter_on = 1;
1557 TAILQ_INIT(&vsi->mac_list);
1558 TAILQ_INIT(&vsi->vlan_list);
1560 /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1561 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1562 ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1563 hw->func_caps.common_cap.rss_table_size;
1564 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1566 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1569 vsi->nb_qps = pf->lan_nb_qps;
1570 vsi->base_queue = 1;
1571 ice_vsi_config_default_rss(&vsi_ctx.info);
1572 vsi_ctx.alloc_from_pool = true;
1573 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1574 /* switch_id is queried by get_switch_config aq, which is done
1577 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1578 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1579 /* Allow all untagged or tagged packets */
1580 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1581 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1582 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1583 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1586 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1587 ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1588 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1589 cfg = ICE_AQ_VSI_FD_ENABLE;
1590 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1591 vsi_ctx.info.max_fd_fltr_dedicated =
1592 rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1593 vsi_ctx.info.max_fd_fltr_shared =
1594 rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1596 /* Enable VLAN/UP trip */
1597 ret = ice_vsi_config_tc_queue_mapping(vsi,
1602 "tc queue mapping with vsi failed, "
1610 vsi->nb_qps = pf->fdir_nb_qps;
1611 vsi->base_queue = ICE_FDIR_QUEUE_ID;
1612 vsi_ctx.alloc_from_pool = true;
1613 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1615 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1616 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1617 cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1618 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1619 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1620 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1621 ret = ice_vsi_config_tc_queue_mapping(vsi,
1626 "tc queue mapping with vsi failed, "
1633 /* for other types of VSI */
1634 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1638 /* VF has MSIX interrupt in VF range, don't allocate here */
1639 if (type == ICE_VSI_PF) {
1640 ret = ice_res_pool_alloc(&pf->msix_pool,
1641 RTE_MIN(vsi->nb_qps,
1642 RTE_MAX_RXTX_INTR_VEC_ID));
1644 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1647 vsi->msix_intr = ret;
1648 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1649 } else if (type == ICE_VSI_CTRL) {
1650 ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1652 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1655 vsi->msix_intr = ret;
1661 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1662 if (ret != ICE_SUCCESS) {
1663 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1666 /* store vsi information is SW structure */
1667 vsi->vsi_id = vsi_ctx.vsi_num;
1668 vsi->info = vsi_ctx.info;
1669 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1670 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1672 if (type == ICE_VSI_PF) {
1673 /* MAC configuration */
1674 rte_ether_addr_copy((struct rte_ether_addr *)
1675 hw->port_info->mac.perm_addr,
1678 rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1679 ret = ice_add_mac_filter(vsi, &mac_addr);
1680 if (ret != ICE_SUCCESS)
1681 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1683 rte_ether_addr_copy(&broadcast, &mac_addr);
1684 ret = ice_add_mac_filter(vsi, &mac_addr);
1685 if (ret != ICE_SUCCESS)
1686 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1689 /* At the beginning, only TC0. */
1690 /* What we need here is the maximam number of the TX queues.
1691 * Currently vsi->nb_qps means it.
1692 * Correct it if any change.
1694 max_txqs[0] = vsi->nb_qps;
1695 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1696 tc_bitmap, max_txqs);
1697 if (ret != ICE_SUCCESS)
1698 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1708 ice_send_driver_ver(struct ice_hw *hw)
1710 struct ice_driver_ver dv;
1712 /* we don't have driver version use 0 for dummy */
1716 dv.subbuild_ver = 0;
1717 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1719 return ice_aq_send_driver_ver(hw, &dv, NULL);
1723 ice_pf_setup(struct ice_pf *pf)
1725 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1726 struct ice_vsi *vsi;
1729 /* Clear all stats counters */
1730 pf->offset_loaded = false;
1731 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1732 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1733 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1734 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1736 /* force guaranteed filter pool for PF */
1737 ice_alloc_fd_guar_item(hw, &unused,
1738 hw->func_caps.fd_fltr_guar);
1739 /* force shared filter pool for PF */
1740 ice_alloc_fd_shrd_item(hw, &unused,
1741 hw->func_caps.fd_fltr_best_effort);
1743 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1745 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1754 /* PCIe configuration space setting */
1755 #define PCI_CFG_SPACE_SIZE 256
1756 #define PCI_CFG_SPACE_EXP_SIZE 4096
1757 #define PCI_EXT_CAP_ID(header) (int)((header) & 0x0000ffff)
1758 #define PCI_EXT_CAP_NEXT(header) (((header) >> 20) & 0xffc)
1759 #define PCI_EXT_CAP_ID_DSN 0x03
1762 ice_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
1766 int pos = PCI_CFG_SPACE_SIZE;
1768 /* minimum 8 bytes per capability */
1769 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1771 if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1772 PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1777 * If we have no capabilities, this is indicated by cap ID,
1778 * cap version and next pointer all being 0.
1784 if (PCI_EXT_CAP_ID(header) == cap)
1787 pos = PCI_EXT_CAP_NEXT(header);
1789 if (pos < PCI_CFG_SPACE_SIZE)
1792 if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1793 PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1802 * Extract device serial number from PCIe Configuration Space and
1803 * determine the pkg file path according to the DSN.
1806 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
1809 char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1810 uint32_t dsn_low, dsn_high;
1811 memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1813 pos = ice_pci_find_next_ext_capability(pci_dev, PCI_EXT_CAP_ID_DSN);
1816 rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4);
1817 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8);
1818 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1819 "ice-%08x%08x.pkg", dsn_high, dsn_low);
1821 PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
1825 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1826 ICE_MAX_PKG_FILENAME_SIZE);
1827 if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1830 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1831 ICE_MAX_PKG_FILENAME_SIZE);
1832 if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1836 strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1837 if (!access(pkg_file, 0))
1839 strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1844 ice_load_pkg_type(struct ice_hw *hw)
1846 enum ice_pkg_type package_type;
1848 /* store the activated package type (OS default or Comms) */
1849 if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1851 package_type = ICE_PKG_TYPE_OS_DEFAULT;
1852 else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1854 package_type = ICE_PKG_TYPE_COMMS;
1856 package_type = ICE_PKG_TYPE_UNKNOWN;
1858 PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s",
1859 hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1860 hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1861 hw->active_pkg_name);
1863 return package_type;
1866 static int ice_load_pkg(struct rte_eth_dev *dev)
1868 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1869 char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1875 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1876 struct ice_adapter *ad =
1877 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1879 ice_pkg_file_search_path(pci_dev, pkg_file);
1881 file = fopen(pkg_file, "rb");
1883 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1887 err = stat(pkg_file, &fstat);
1889 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1894 buf_len = fstat.st_size;
1895 buf = rte_malloc(NULL, buf_len, 0);
1898 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1904 err = fread(buf, buf_len, 1, file);
1906 PMD_INIT_LOG(ERR, "failed to read package data\n");
1914 err = ice_copy_and_init_pkg(hw, buf, buf_len);
1916 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1920 /* store the loaded pkg type info */
1921 ad->active_pkg_type = ice_load_pkg_type(hw);
1923 err = ice_init_hw_tbls(hw);
1925 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1926 goto fail_init_tbls;
1932 rte_free(hw->pkg_copy);
1939 ice_base_queue_get(struct ice_pf *pf)
1942 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1944 reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1945 if (reg & PFLAN_RX_QALLOC_VALID_M) {
1946 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1948 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1954 parse_bool(const char *key, const char *value, void *args)
1956 int *i = (int *)args;
1960 num = strtoul(value, &end, 10);
1962 if (num != 0 && num != 1) {
1963 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1964 "value must be 0 or 1",
1973 static int ice_parse_devargs(struct rte_eth_dev *dev)
1975 struct ice_adapter *ad =
1976 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1977 struct rte_devargs *devargs = dev->device->devargs;
1978 struct rte_kvargs *kvlist;
1981 if (devargs == NULL)
1984 kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1985 if (kvlist == NULL) {
1986 PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1990 ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1991 memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1992 sizeof(ad->devargs.proto_xtr));
1994 ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1995 &handle_proto_xtr_arg, &ad->devargs);
1999 ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
2000 &parse_bool, &ad->devargs.safe_mode_support);
2004 ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
2005 &parse_bool, &ad->devargs.pipe_mode_support);
2009 ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG,
2010 &parse_bool, &ad->devargs.flow_mark_support);
2015 rte_kvargs_free(kvlist);
2019 /* Forward LLDP packets to default VSI by set switch rules */
2021 ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on)
2023 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2024 struct ice_fltr_list_entry *s_list_itr = NULL;
2025 struct LIST_HEAD_TYPE list_head;
2028 INIT_LIST_HEAD(&list_head);
2030 s_list_itr = (struct ice_fltr_list_entry *)
2031 ice_malloc(hw, sizeof(*s_list_itr));
2034 s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2035 s_list_itr->fltr_info.vsi_handle = vsi->idx;
2036 s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
2037 RTE_ETHER_TYPE_LLDP;
2038 s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2039 s_list_itr->fltr_info.flag = ICE_FLTR_RX;
2040 s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
2041 LIST_ADD(&s_list_itr->list_entry, &list_head);
2043 ret = ice_add_eth_mac(hw, &list_head);
2045 ret = ice_remove_eth_mac(hw, &list_head);
2047 rte_free(s_list_itr);
2051 static enum ice_status
2052 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
2053 uint16_t num, uint16_t desc_id,
2054 uint16_t *prof_buf, uint16_t *num_prof)
2056 struct ice_aqc_get_allocd_res_desc_resp *resp_buf;
2059 bool res_shared = 1;
2060 struct ice_aq_desc aq_desc;
2061 struct ice_sq_cd *cd = NULL;
2062 struct ice_aqc_get_allocd_res_desc *cmd =
2063 &aq_desc.params.get_res_desc;
2065 buf_len = sizeof(resp_buf->elem) * num;
2066 resp_buf = ice_malloc(hw, buf_len);
2070 ice_fill_dflt_direct_cmd_desc(&aq_desc,
2071 ice_aqc_opc_get_allocd_res_desc);
2073 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2074 ICE_AQC_RES_TYPE_M) | (res_shared ?
2075 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2076 cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
2078 ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
2080 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
2084 ice_memcpy(prof_buf, resp_buf->elem, sizeof(resp_buf->elem) *
2085 (*num_prof), ICE_NONDMA_TO_NONDMA);
2092 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
2096 uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
2097 uint16_t first_desc = 1;
2098 uint16_t num_prof = 0;
2100 ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
2101 first_desc, prof_buf, &num_prof);
2103 PMD_INIT_LOG(ERR, "Failed to get fxp resource");
2107 for (prof_id = 0; prof_id < num_prof; prof_id++) {
2108 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
2110 PMD_INIT_LOG(ERR, "Failed to free fxp resource");
2118 ice_reset_fxp_resource(struct ice_hw *hw)
2122 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
2124 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
2128 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
2130 PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
2138 ice_rss_ctx_init(struct ice_pf *pf)
2140 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
2141 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
2143 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2144 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2146 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2147 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2151 ice_dev_init(struct rte_eth_dev *dev)
2153 struct rte_pci_device *pci_dev;
2154 struct rte_intr_handle *intr_handle;
2155 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2156 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2157 struct ice_adapter *ad =
2158 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2159 struct ice_vsi *vsi;
2162 dev->dev_ops = &ice_eth_dev_ops;
2163 dev->rx_queue_count = ice_rx_queue_count;
2164 dev->rx_descriptor_status = ice_rx_descriptor_status;
2165 dev->tx_descriptor_status = ice_tx_descriptor_status;
2166 dev->rx_pkt_burst = ice_recv_pkts;
2167 dev->tx_pkt_burst = ice_xmit_pkts;
2168 dev->tx_pkt_prepare = ice_prep_pkts;
2170 /* for secondary processes, we don't initialise any further as primary
2171 * has already done this work.
2173 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2174 ice_set_rx_function(dev);
2175 ice_set_tx_function(dev);
2179 ice_set_default_ptype_table(dev);
2180 pci_dev = RTE_DEV_TO_PCI(dev->device);
2181 intr_handle = &pci_dev->intr_handle;
2183 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2184 pf->adapter->eth_dev = dev;
2185 pf->dev_data = dev->data;
2186 hw->back = pf->adapter;
2187 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2188 hw->vendor_id = pci_dev->id.vendor_id;
2189 hw->device_id = pci_dev->id.device_id;
2190 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2191 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2192 hw->bus.device = pci_dev->addr.devid;
2193 hw->bus.func = pci_dev->addr.function;
2195 ret = ice_parse_devargs(dev);
2197 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2201 ice_init_controlq_parameter(hw);
2203 ret = ice_init_hw(hw);
2205 PMD_INIT_LOG(ERR, "Failed to initialize HW");
2209 ret = ice_load_pkg(dev);
2211 if (ad->devargs.safe_mode_support == 0) {
2212 PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2213 "Use safe-mode-support=1 to enter Safe Mode");
2217 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2218 "Entering Safe Mode");
2219 ad->is_safe_mode = 1;
2222 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2223 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2224 hw->api_maj_ver, hw->api_min_ver);
2226 ice_pf_sw_init(dev);
2227 ret = ice_init_mac_address(dev);
2229 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2233 /* Pass the information to the rte_eth_dev_close() that it should also
2234 * release the private port resources.
2236 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2238 ret = ice_res_pool_init(&pf->msix_pool, 1,
2239 hw->func_caps.common_cap.num_msix_vectors - 1);
2241 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2242 goto err_msix_pool_init;
2245 ret = ice_pf_setup(pf);
2247 PMD_INIT_LOG(ERR, "Failed to setup PF");
2251 ret = ice_send_driver_ver(hw);
2253 PMD_INIT_LOG(ERR, "Failed to send driver version");
2259 /* Disable double vlan by default */
2260 ice_vsi_config_double_vlan(vsi, false);
2262 ret = ice_aq_stop_lldp(hw, true, false, NULL);
2263 if (ret != ICE_SUCCESS)
2264 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2265 ret = ice_init_dcb(hw, true);
2266 if (ret != ICE_SUCCESS)
2267 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2268 /* Forward LLDP packets to default VSI */
2269 ret = ice_vsi_config_sw_lldp(vsi, true);
2270 if (ret != ICE_SUCCESS)
2271 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2272 /* register callback func to eal lib */
2273 rte_intr_callback_register(intr_handle,
2274 ice_interrupt_handler, dev);
2276 ice_pf_enable_irq0(hw);
2278 /* enable uio intr after callback register */
2279 rte_intr_enable(intr_handle);
2281 /* get base queue pairs index in the device */
2282 ice_base_queue_get(pf);
2284 /* Initialize RSS context for gtpu_eh */
2285 ice_rss_ctx_init(pf);
2287 if (!ad->is_safe_mode) {
2288 ret = ice_flow_init(ad);
2290 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2295 ret = ice_reset_fxp_resource(hw);
2297 PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2304 ice_res_pool_destroy(&pf->msix_pool);
2306 rte_free(dev->data->mac_addrs);
2307 dev->data->mac_addrs = NULL;
2309 ice_sched_cleanup_all(hw);
2310 rte_free(hw->port_info);
2311 ice_shutdown_all_ctrlq(hw);
2312 rte_free(pf->proto_xtr);
2318 ice_release_vsi(struct ice_vsi *vsi)
2321 struct ice_vsi_ctx vsi_ctx;
2322 enum ice_status ret;
2328 hw = ICE_VSI_TO_HW(vsi);
2330 ice_remove_all_mac_vlan_filters(vsi);
2332 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2334 vsi_ctx.vsi_num = vsi->vsi_id;
2335 vsi_ctx.info = vsi->info;
2336 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2337 if (ret != ICE_SUCCESS) {
2338 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2342 rte_free(vsi->rss_lut);
2343 rte_free(vsi->rss_key);
2349 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2351 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2352 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2353 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2354 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2355 uint16_t msix_intr, i;
2357 /* disable interrupt and also clear all the exist config */
2358 for (i = 0; i < vsi->nb_qps; i++) {
2359 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2360 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2364 if (rte_intr_allow_others(intr_handle))
2366 for (i = 0; i < vsi->nb_msix; i++) {
2367 msix_intr = vsi->msix_intr + i;
2368 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2369 GLINT_DYN_CTL_WB_ON_ITR_M);
2373 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2377 ice_dev_stop(struct rte_eth_dev *dev)
2379 struct rte_eth_dev_data *data = dev->data;
2380 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2381 struct ice_vsi *main_vsi = pf->main_vsi;
2382 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2383 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2386 /* avoid stopping again */
2387 if (pf->adapter_stopped)
2390 /* stop and clear all Rx queues */
2391 for (i = 0; i < data->nb_rx_queues; i++)
2392 ice_rx_queue_stop(dev, i);
2394 /* stop and clear all Tx queues */
2395 for (i = 0; i < data->nb_tx_queues; i++)
2396 ice_tx_queue_stop(dev, i);
2398 /* disable all queue interrupts */
2399 ice_vsi_disable_queues_intr(main_vsi);
2401 if (pf->init_link_up)
2402 ice_dev_set_link_up(dev);
2404 ice_dev_set_link_down(dev);
2406 /* Clean datapath event and queue/vec mapping */
2407 rte_intr_efd_disable(intr_handle);
2408 if (intr_handle->intr_vec) {
2409 rte_free(intr_handle->intr_vec);
2410 intr_handle->intr_vec = NULL;
2413 pf->adapter_stopped = true;
2417 ice_dev_close(struct rte_eth_dev *dev)
2419 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2420 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2421 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2422 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2423 struct ice_adapter *ad =
2424 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2426 /* Since stop will make link down, then the link event will be
2427 * triggered, disable the irq firstly to avoid the port_infoe etc
2428 * resources deallocation causing the interrupt service thread
2431 ice_pf_disable_irq0(hw);
2435 if (!ad->is_safe_mode)
2436 ice_flow_uninit(ad);
2438 /* release all queue resource */
2439 ice_free_queues(dev);
2441 ice_res_pool_destroy(&pf->msix_pool);
2442 ice_release_vsi(pf->main_vsi);
2443 ice_sched_cleanup_all(hw);
2444 ice_free_hw_tbls(hw);
2445 rte_free(hw->port_info);
2446 hw->port_info = NULL;
2447 ice_shutdown_all_ctrlq(hw);
2448 rte_free(pf->proto_xtr);
2449 pf->proto_xtr = NULL;
2451 dev->dev_ops = NULL;
2452 dev->rx_pkt_burst = NULL;
2453 dev->tx_pkt_burst = NULL;
2455 rte_free(dev->data->mac_addrs);
2456 dev->data->mac_addrs = NULL;
2458 /* disable uio intr before callback unregister */
2459 rte_intr_disable(intr_handle);
2461 /* unregister callback func from eal lib */
2462 rte_intr_callback_unregister(intr_handle,
2463 ice_interrupt_handler, dev);
2467 ice_dev_uninit(struct rte_eth_dev *dev)
2475 ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
2477 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2478 struct ice_vsi *vsi = pf->main_vsi;
2480 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
2481 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2482 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2483 pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr;
2484 pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld;
2485 pf->gtpu_hash_ctx.ipv4_udp.symm = symm;
2486 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2487 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2488 pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr;
2489 pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld;
2490 pf->gtpu_hash_ctx.ipv6_udp.symm = symm;
2491 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2492 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2493 pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr;
2494 pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld;
2495 pf->gtpu_hash_ctx.ipv4_tcp.symm = symm;
2496 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2497 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2498 pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr;
2499 pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld;
2500 pf->gtpu_hash_ctx.ipv6_tcp.symm = symm;
2501 } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
2502 pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr;
2503 pf->gtpu_hash_ctx.ipv4.hash_fld = fld;
2504 pf->gtpu_hash_ctx.ipv4.symm = symm;
2505 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2506 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2507 } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
2508 pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr;
2509 pf->gtpu_hash_ctx.ipv6.hash_fld = fld;
2510 pf->gtpu_hash_ctx.ipv6.symm = symm;
2511 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2512 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2516 if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
2517 ICE_FLOW_SEG_HDR_GTPU_UP)) {
2518 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2519 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2520 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
2521 ice_add_rss_cfg(hw, vsi->idx,
2522 pf->gtpu_hash_ctx.ipv4.hash_fld,
2523 pf->gtpu_hash_ctx.ipv4.pkt_hdr,
2524 pf->gtpu_hash_ctx.ipv4.symm);
2525 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
2527 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2528 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2529 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
2530 ice_add_rss_cfg(hw, vsi->idx,
2531 pf->gtpu_hash_ctx.ipv6.hash_fld,
2532 pf->gtpu_hash_ctx.ipv6.pkt_hdr,
2533 pf->gtpu_hash_ctx.ipv6.symm);
2534 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
2536 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2537 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2538 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
2539 ice_add_rss_cfg(hw, vsi->idx,
2540 pf->gtpu_hash_ctx.ipv4.hash_fld,
2541 pf->gtpu_hash_ctx.ipv4.pkt_hdr,
2542 pf->gtpu_hash_ctx.ipv4.symm);
2543 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
2545 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2546 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2547 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
2548 ice_add_rss_cfg(hw, vsi->idx,
2549 pf->gtpu_hash_ctx.ipv6.hash_fld,
2550 pf->gtpu_hash_ctx.ipv6.pkt_hdr,
2551 pf->gtpu_hash_ctx.ipv6.symm);
2552 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
2561 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2563 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2564 struct ice_vsi *vsi = pf->main_vsi;
2566 if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
2567 ICE_FLOW_SEG_HDR_GTPU_UP)) {
2568 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2569 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2570 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
2571 ice_rem_rss_cfg(hw, vsi->idx,
2572 pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
2573 pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
2574 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2577 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
2578 ice_rem_rss_cfg(hw, vsi->idx,
2579 pf->gtpu_hash_ctx.ipv4.hash_fld,
2580 pf->gtpu_hash_ctx.ipv4.pkt_hdr);
2581 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
2583 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2584 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2585 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
2586 ice_rem_rss_cfg(hw, vsi->idx,
2587 pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
2588 pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
2589 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2592 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
2593 ice_rem_rss_cfg(hw, vsi->idx,
2594 pf->gtpu_hash_ctx.ipv6.hash_fld,
2595 pf->gtpu_hash_ctx.ipv6.pkt_hdr);
2596 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
2598 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2599 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2600 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
2601 ice_rem_rss_cfg(hw, vsi->idx,
2602 pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
2603 pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
2604 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2607 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
2608 ice_rem_rss_cfg(hw, vsi->idx,
2609 pf->gtpu_hash_ctx.ipv4.hash_fld,
2610 pf->gtpu_hash_ctx.ipv4.pkt_hdr);
2611 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
2613 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2614 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2615 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
2616 ice_rem_rss_cfg(hw, vsi->idx,
2617 pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
2618 pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
2619 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2622 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
2623 ice_rem_rss_cfg(hw, vsi->idx,
2624 pf->gtpu_hash_ctx.ipv6.hash_fld,
2625 pf->gtpu_hash_ctx.ipv6.pkt_hdr);
2626 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
2628 } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
2629 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
2630 ice_rem_rss_cfg(hw, vsi->idx,
2631 pf->gtpu_hash_ctx.ipv4.hash_fld,
2632 pf->gtpu_hash_ctx.ipv4.pkt_hdr);
2633 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
2636 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
2637 ice_rem_rss_cfg(hw, vsi->idx,
2638 pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
2639 pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
2640 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2643 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
2644 ice_rem_rss_cfg(hw, vsi->idx,
2645 pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
2646 pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
2647 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2649 } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
2650 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
2651 ice_rem_rss_cfg(hw, vsi->idx,
2652 pf->gtpu_hash_ctx.ipv6.hash_fld,
2653 pf->gtpu_hash_ctx.ipv6.pkt_hdr);
2654 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
2657 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
2658 ice_rem_rss_cfg(hw, vsi->idx,
2659 pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
2660 pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
2661 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2664 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
2665 ice_rem_rss_cfg(hw, vsi->idx,
2666 pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
2667 pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
2668 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2677 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2679 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
2680 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2681 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2682 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2683 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2684 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2685 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2686 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2687 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2688 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2689 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2690 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2691 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2692 } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
2693 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
2694 } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
2695 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
2703 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2704 uint64_t fld, uint32_t hdr)
2706 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2709 ret = ice_rem_rss_cfg(hw, vsi_id, fld, hdr);
2710 if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2711 PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2713 ret = ice_rem_rss_cfg_post(pf, hdr);
2715 PMD_DRV_LOG(ERR, "remove rss cfg post failed\n");
2721 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2722 uint64_t fld, uint32_t hdr, bool symm)
2724 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2727 ret = ice_add_rss_cfg_pre(pf, hdr);
2729 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2731 ret = ice_add_rss_cfg(hw, vsi_id, fld, hdr, symm);
2733 PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2735 ret = ice_add_rss_cfg_post(pf, hdr, fld, symm);
2737 PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2743 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2745 struct ice_vsi *vsi = pf->main_vsi;
2748 /* Configure RSS for IPv4 with src/dst addr as input set */
2749 if (rss_hf & ETH_RSS_IPV4) {
2750 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2751 ICE_FLOW_SEG_HDR_IPV4 |
2752 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2754 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
2758 /* Configure RSS for IPv6 with src/dst addr as input set */
2759 if (rss_hf & ETH_RSS_IPV6) {
2760 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2761 ICE_FLOW_SEG_HDR_IPV6 |
2762 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2764 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
2768 /* Configure RSS for udp4 with src/dst addr and port as input set */
2769 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2770 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2771 ICE_FLOW_SEG_HDR_UDP |
2772 ICE_FLOW_SEG_HDR_IPV4 |
2773 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2775 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
2779 /* Configure RSS for udp6 with src/dst addr and port as input set */
2780 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2781 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2782 ICE_FLOW_SEG_HDR_UDP |
2783 ICE_FLOW_SEG_HDR_IPV6 |
2784 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2786 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
2790 /* Configure RSS for tcp4 with src/dst addr and port as input set */
2791 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2792 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2793 ICE_FLOW_SEG_HDR_TCP |
2794 ICE_FLOW_SEG_HDR_IPV4 |
2795 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2797 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
2801 /* Configure RSS for tcp6 with src/dst addr and port as input set */
2802 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2803 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
2804 ICE_FLOW_SEG_HDR_TCP |
2805 ICE_FLOW_SEG_HDR_IPV6 |
2806 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2808 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
2812 /* Configure RSS for sctp4 with src/dst addr and port as input set */
2813 if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2814 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2815 ICE_FLOW_SEG_HDR_SCTP |
2816 ICE_FLOW_SEG_HDR_IPV4 |
2817 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2819 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2823 /* Configure RSS for sctp6 with src/dst addr and port as input set */
2824 if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2825 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2826 ICE_FLOW_SEG_HDR_SCTP |
2827 ICE_FLOW_SEG_HDR_IPV6 |
2828 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2830 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2834 if (rss_hf & ETH_RSS_IPV4) {
2835 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2836 ICE_FLOW_SEG_HDR_GTPU_IP |
2837 ICE_FLOW_SEG_HDR_IPV4 |
2838 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2840 PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d",
2843 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2844 ICE_FLOW_SEG_HDR_GTPU_EH |
2845 ICE_FLOW_SEG_HDR_IPV4 |
2846 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2848 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
2851 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2852 ICE_FLOW_SEG_HDR_PPPOE |
2853 ICE_FLOW_SEG_HDR_IPV4 |
2854 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2856 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
2860 if (rss_hf & ETH_RSS_IPV6) {
2861 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2862 ICE_FLOW_SEG_HDR_GTPU_IP |
2863 ICE_FLOW_SEG_HDR_IPV6 |
2864 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2866 PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d",
2869 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2870 ICE_FLOW_SEG_HDR_GTPU_EH |
2871 ICE_FLOW_SEG_HDR_IPV6 |
2872 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2874 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
2877 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2878 ICE_FLOW_SEG_HDR_PPPOE |
2879 ICE_FLOW_SEG_HDR_IPV6 |
2880 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2882 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
2886 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2887 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2888 ICE_FLOW_SEG_HDR_GTPU_IP |
2889 ICE_FLOW_SEG_HDR_UDP |
2890 ICE_FLOW_SEG_HDR_IPV4 |
2891 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2893 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d",
2896 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2897 ICE_FLOW_SEG_HDR_GTPU_EH |
2898 ICE_FLOW_SEG_HDR_UDP |
2899 ICE_FLOW_SEG_HDR_IPV4 |
2900 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2902 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
2905 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2906 ICE_FLOW_SEG_HDR_PPPOE |
2907 ICE_FLOW_SEG_HDR_UDP |
2908 ICE_FLOW_SEG_HDR_IPV4 |
2909 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2911 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
2915 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2916 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2917 ICE_FLOW_SEG_HDR_GTPU_IP |
2918 ICE_FLOW_SEG_HDR_UDP |
2919 ICE_FLOW_SEG_HDR_IPV6 |
2920 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2922 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d",
2925 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2926 ICE_FLOW_SEG_HDR_GTPU_EH |
2927 ICE_FLOW_SEG_HDR_UDP |
2928 ICE_FLOW_SEG_HDR_IPV6 |
2929 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2931 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
2934 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2935 ICE_FLOW_SEG_HDR_PPPOE |
2936 ICE_FLOW_SEG_HDR_UDP |
2937 ICE_FLOW_SEG_HDR_IPV6 |
2938 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2940 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
2944 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2945 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2946 ICE_FLOW_SEG_HDR_GTPU_IP |
2947 ICE_FLOW_SEG_HDR_TCP |
2948 ICE_FLOW_SEG_HDR_IPV4 |
2949 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2951 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d",
2954 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2955 ICE_FLOW_SEG_HDR_GTPU_EH |
2956 ICE_FLOW_SEG_HDR_TCP |
2957 ICE_FLOW_SEG_HDR_IPV4 |
2958 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2960 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
2963 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2964 ICE_FLOW_SEG_HDR_PPPOE |
2965 ICE_FLOW_SEG_HDR_TCP |
2966 ICE_FLOW_SEG_HDR_IPV4 |
2967 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2969 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
2973 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2974 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
2975 ICE_FLOW_SEG_HDR_GTPU_IP |
2976 ICE_FLOW_SEG_HDR_TCP |
2977 ICE_FLOW_SEG_HDR_IPV6 |
2978 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2980 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d",
2983 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
2984 ICE_FLOW_SEG_HDR_GTPU_EH |
2985 ICE_FLOW_SEG_HDR_TCP |
2986 ICE_FLOW_SEG_HDR_IPV6 |
2987 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2989 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
2992 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
2993 ICE_FLOW_SEG_HDR_PPPOE |
2994 ICE_FLOW_SEG_HDR_TCP |
2995 ICE_FLOW_SEG_HDR_IPV6 |
2996 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2998 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
3002 if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
3003 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
3004 ICE_FLOW_SEG_HDR_GTPU_IP |
3005 ICE_FLOW_SEG_HDR_SCTP |
3006 ICE_FLOW_SEG_HDR_IPV4 |
3007 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3009 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_SCTP rss flow fail %d",
3012 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
3013 ICE_FLOW_SEG_HDR_GTPU_EH |
3014 ICE_FLOW_SEG_HDR_SCTP |
3015 ICE_FLOW_SEG_HDR_IPV4 |
3016 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3018 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_SCTP rss flow fail %d",
3022 if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
3023 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
3024 ICE_FLOW_SEG_HDR_GTPU_IP |
3025 ICE_FLOW_SEG_HDR_SCTP |
3026 ICE_FLOW_SEG_HDR_IPV6 |
3027 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3029 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_SCTP rss flow fail %d",
3032 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
3033 ICE_FLOW_SEG_HDR_GTPU_EH |
3034 ICE_FLOW_SEG_HDR_SCTP |
3035 ICE_FLOW_SEG_HDR_IPV6 |
3036 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3038 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_SCTP rss flow fail %d",
3043 static int ice_init_rss(struct ice_pf *pf)
3045 struct ice_hw *hw = ICE_PF_TO_HW(pf);
3046 struct ice_vsi *vsi = pf->main_vsi;
3047 struct rte_eth_dev *dev = pf->adapter->eth_dev;
3048 struct rte_eth_rss_conf *rss_conf;
3049 struct ice_aqc_get_set_rss_keys key;
3052 bool is_safe_mode = pf->adapter->is_safe_mode;
3055 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
3056 nb_q = dev->data->nb_rx_queues;
3057 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3058 vsi->rss_lut_size = pf->hash_lut_size;
3061 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3065 if (!vsi->rss_key) {
3066 vsi->rss_key = rte_zmalloc(NULL,
3067 vsi->rss_key_size, 0);
3068 if (vsi->rss_key == NULL) {
3069 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3073 if (!vsi->rss_lut) {
3074 vsi->rss_lut = rte_zmalloc(NULL,
3075 vsi->rss_lut_size, 0);
3076 if (vsi->rss_lut == NULL) {
3077 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3078 rte_free(vsi->rss_key);
3079 vsi->rss_key = NULL;
3083 /* configure RSS key */
3084 if (!rss_conf->rss_key) {
3085 /* Calculate the default hash key */
3086 for (i = 0; i <= vsi->rss_key_size; i++)
3087 vsi->rss_key[i] = (uint8_t)rte_rand();
3089 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3090 RTE_MIN(rss_conf->rss_key_len,
3091 vsi->rss_key_size));
3093 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3094 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3098 /* init RSS LUT table */
3099 for (i = 0; i < vsi->rss_lut_size; i++)
3100 vsi->rss_lut[i] = i % nb_q;
3102 ret = ice_aq_set_rss_lut(hw, vsi->idx,
3103 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
3104 vsi->rss_lut, vsi->rss_lut_size);
3108 /* Enable registers for symmetric_toeplitz function. */
3109 reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3110 reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3111 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3112 ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3114 /* RSS hash configuration */
3115 ice_rss_hash_set(pf, rss_conf->rss_hf);
3119 rte_free(vsi->rss_key);
3120 vsi->rss_key = NULL;
3121 rte_free(vsi->rss_lut);
3122 vsi->rss_lut = NULL;
3127 ice_dev_configure(struct rte_eth_dev *dev)
3129 struct ice_adapter *ad =
3130 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3131 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3134 /* Initialize to TRUE. If any of Rx queues doesn't meet the
3135 * bulk allocation or vector Rx preconditions we will reset it.
3137 ad->rx_bulk_alloc_allowed = true;
3138 ad->tx_simple_allowed = true;
3140 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
3141 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
3143 ret = ice_init_rss(pf);
3145 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3153 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3154 int base_queue, int nb_queue)
3156 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3157 uint32_t val, val_tx;
3160 for (i = 0; i < nb_queue; i++) {
3162 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3163 (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3164 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3165 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3167 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3168 base_queue + i, msix_vect);
3169 /* set ITR0 value */
3170 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
3171 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3172 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3177 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3179 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3180 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3181 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3182 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3183 uint16_t msix_vect = vsi->msix_intr;
3184 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
3185 uint16_t queue_idx = 0;
3189 /* clear Rx/Tx queue interrupt */
3190 for (i = 0; i < vsi->nb_used_qps; i++) {
3191 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3192 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3195 /* PF bind interrupt */
3196 if (rte_intr_dp_is_en(intr_handle)) {
3201 for (i = 0; i < vsi->nb_used_qps; i++) {
3203 if (!rte_intr_allow_others(intr_handle))
3204 msix_vect = ICE_MISC_VEC_ID;
3206 /* uio mapping all queue to one msix_vect */
3207 __vsi_queues_bind_intr(vsi, msix_vect,
3208 vsi->base_queue + i,
3209 vsi->nb_used_qps - i);
3211 for (; !!record && i < vsi->nb_used_qps; i++)
3212 intr_handle->intr_vec[queue_idx + i] =
3217 /* vfio 1:1 queue/msix_vect mapping */
3218 __vsi_queues_bind_intr(vsi, msix_vect,
3219 vsi->base_queue + i, 1);
3222 intr_handle->intr_vec[queue_idx + i] = msix_vect;
3230 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3232 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3233 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3234 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3235 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3236 uint16_t msix_intr, i;
3238 if (rte_intr_allow_others(intr_handle))
3239 for (i = 0; i < vsi->nb_used_qps; i++) {
3240 msix_intr = vsi->msix_intr + i;
3241 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3242 GLINT_DYN_CTL_INTENA_M |
3243 GLINT_DYN_CTL_CLEARPBA_M |
3244 GLINT_DYN_CTL_ITR_INDX_M |
3245 GLINT_DYN_CTL_WB_ON_ITR_M);
3248 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3249 GLINT_DYN_CTL_INTENA_M |
3250 GLINT_DYN_CTL_CLEARPBA_M |
3251 GLINT_DYN_CTL_ITR_INDX_M |
3252 GLINT_DYN_CTL_WB_ON_ITR_M);
3256 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3258 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3259 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3260 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3261 struct ice_vsi *vsi = pf->main_vsi;
3262 uint32_t intr_vector = 0;
3264 rte_intr_disable(intr_handle);
3266 /* check and configure queue intr-vector mapping */
3267 if ((rte_intr_cap_multiple(intr_handle) ||
3268 !RTE_ETH_DEV_SRIOV(dev).active) &&
3269 dev->data->dev_conf.intr_conf.rxq != 0) {
3270 intr_vector = dev->data->nb_rx_queues;
3271 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3272 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3273 ICE_MAX_INTR_QUEUE_NUM);
3276 if (rte_intr_efd_enable(intr_handle, intr_vector))
3280 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3281 intr_handle->intr_vec =
3282 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
3284 if (!intr_handle->intr_vec) {
3286 "Failed to allocate %d rx_queues intr_vec",
3287 dev->data->nb_rx_queues);
3292 /* Map queues with MSIX interrupt */
3293 vsi->nb_used_qps = dev->data->nb_rx_queues;
3294 ice_vsi_queues_bind_intr(vsi);
3296 /* Enable interrupts for all the queues */
3297 ice_vsi_enable_queues_intr(vsi);
3299 rte_intr_enable(intr_handle);
3305 ice_get_init_link_status(struct rte_eth_dev *dev)
3307 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3308 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3309 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3310 struct ice_link_status link_status;
3313 ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3314 &link_status, NULL);
3315 if (ret != ICE_SUCCESS) {
3316 PMD_DRV_LOG(ERR, "Failed to get link info");
3317 pf->init_link_up = false;
3321 if (link_status.link_info & ICE_AQ_LINK_UP)
3322 pf->init_link_up = true;
3326 ice_dev_start(struct rte_eth_dev *dev)
3328 struct rte_eth_dev_data *data = dev->data;
3329 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3330 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3331 struct ice_vsi *vsi = pf->main_vsi;
3332 uint16_t nb_rxq = 0;
3334 uint16_t max_frame_size;
3337 /* program Tx queues' context in hardware */
3338 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3339 ret = ice_tx_queue_start(dev, nb_txq);
3341 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3346 /* program Rx queues' context in hardware*/
3347 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3348 ret = ice_rx_queue_start(dev, nb_rxq);
3350 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3355 ice_set_rx_function(dev);
3356 ice_set_tx_function(dev);
3358 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
3359 ETH_VLAN_EXTEND_MASK;
3360 ret = ice_vlan_offload_set(dev, mask);
3362 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3366 /* enable Rx interrput and mapping Rx queue to interrupt vector */
3367 if (ice_rxq_intr_setup(dev))
3370 /* Enable receiving broadcast packets and transmitting packets */
3371 ret = ice_set_vsi_promisc(hw, vsi->idx,
3372 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3373 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3375 if (ret != ICE_SUCCESS)
3376 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3378 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3379 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3380 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3381 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3382 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3383 ICE_AQ_LINK_EVENT_AN_COMPLETED |
3384 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3386 if (ret != ICE_SUCCESS)
3387 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3389 ice_get_init_link_status(dev);
3391 ice_dev_set_link_up(dev);
3393 /* Call get_link_info aq commond to enable/disable LSE */
3394 ice_link_update(dev, 0);
3396 pf->adapter_stopped = false;
3398 /* Set the max frame size to default value*/
3399 max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3400 pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3403 /* Set the max frame size to HW*/
3404 ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3408 /* stop the started queues if failed to start all queues */
3410 for (i = 0; i < nb_rxq; i++)
3411 ice_rx_queue_stop(dev, i);
3413 for (i = 0; i < nb_txq; i++)
3414 ice_tx_queue_stop(dev, i);
3420 ice_dev_reset(struct rte_eth_dev *dev)
3424 if (dev->data->sriov.active)
3427 ret = ice_dev_uninit(dev);
3429 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3433 ret = ice_dev_init(dev);
3435 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3443 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3445 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3446 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3447 struct ice_vsi *vsi = pf->main_vsi;
3448 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3449 bool is_safe_mode = pf->adapter->is_safe_mode;
3453 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3454 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3455 dev_info->max_rx_queues = vsi->nb_qps;
3456 dev_info->max_tx_queues = vsi->nb_qps;
3457 dev_info->max_mac_addrs = vsi->max_macaddrs;
3458 dev_info->max_vfs = pci_dev->max_vfs;
3459 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3460 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3462 dev_info->rx_offload_capa =
3463 DEV_RX_OFFLOAD_VLAN_STRIP |
3464 DEV_RX_OFFLOAD_JUMBO_FRAME |
3465 DEV_RX_OFFLOAD_KEEP_CRC |
3466 DEV_RX_OFFLOAD_SCATTER |
3467 DEV_RX_OFFLOAD_VLAN_FILTER;
3468 dev_info->tx_offload_capa =
3469 DEV_TX_OFFLOAD_VLAN_INSERT |
3470 DEV_TX_OFFLOAD_TCP_TSO |
3471 DEV_TX_OFFLOAD_MULTI_SEGS |
3472 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3473 dev_info->flow_type_rss_offloads = 0;
3475 if (!is_safe_mode) {
3476 dev_info->rx_offload_capa |=
3477 DEV_RX_OFFLOAD_IPV4_CKSUM |
3478 DEV_RX_OFFLOAD_UDP_CKSUM |
3479 DEV_RX_OFFLOAD_TCP_CKSUM |
3480 DEV_RX_OFFLOAD_QINQ_STRIP |
3481 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3482 DEV_RX_OFFLOAD_VLAN_EXTEND |
3483 DEV_RX_OFFLOAD_RSS_HASH;
3484 dev_info->tx_offload_capa |=
3485 DEV_TX_OFFLOAD_QINQ_INSERT |
3486 DEV_TX_OFFLOAD_IPV4_CKSUM |
3487 DEV_TX_OFFLOAD_UDP_CKSUM |
3488 DEV_TX_OFFLOAD_TCP_CKSUM |
3489 DEV_TX_OFFLOAD_SCTP_CKSUM |
3490 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3491 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3492 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3495 dev_info->rx_queue_offload_capa = 0;
3496 dev_info->tx_queue_offload_capa = 0;
3498 dev_info->reta_size = pf->hash_lut_size;
3499 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3501 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3503 .pthresh = ICE_DEFAULT_RX_PTHRESH,
3504 .hthresh = ICE_DEFAULT_RX_HTHRESH,
3505 .wthresh = ICE_DEFAULT_RX_WTHRESH,
3507 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3512 dev_info->default_txconf = (struct rte_eth_txconf) {
3514 .pthresh = ICE_DEFAULT_TX_PTHRESH,
3515 .hthresh = ICE_DEFAULT_TX_HTHRESH,
3516 .wthresh = ICE_DEFAULT_TX_WTHRESH,
3518 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3519 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3523 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3524 .nb_max = ICE_MAX_RING_DESC,
3525 .nb_min = ICE_MIN_RING_DESC,
3526 .nb_align = ICE_ALIGN_RING_DESC,
3529 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3530 .nb_max = ICE_MAX_RING_DESC,
3531 .nb_min = ICE_MIN_RING_DESC,
3532 .nb_align = ICE_ALIGN_RING_DESC,
3535 dev_info->speed_capa = ETH_LINK_SPEED_10M |
3536 ETH_LINK_SPEED_100M |
3538 ETH_LINK_SPEED_2_5G |
3540 ETH_LINK_SPEED_10G |
3541 ETH_LINK_SPEED_20G |
3544 phy_type_low = hw->port_info->phy.phy_type_low;
3545 phy_type_high = hw->port_info->phy.phy_type_high;
3547 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3548 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3550 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3551 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3552 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3554 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3555 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3557 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3558 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3559 dev_info->default_rxportconf.nb_queues = 1;
3560 dev_info->default_txportconf.nb_queues = 1;
3561 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3562 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3568 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3569 struct rte_eth_link *link)
3571 struct rte_eth_link *dst = link;
3572 struct rte_eth_link *src = &dev->data->dev_link;
3574 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3575 *(uint64_t *)src) == 0)
3582 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3583 struct rte_eth_link *link)
3585 struct rte_eth_link *dst = &dev->data->dev_link;
3586 struct rte_eth_link *src = link;
3588 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3589 *(uint64_t *)src) == 0)
3596 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3598 #define CHECK_INTERVAL 100 /* 100ms */
3599 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
3600 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3601 struct ice_link_status link_status;
3602 struct rte_eth_link link, old;
3604 unsigned int rep_cnt = MAX_REPEAT_TIME;
3605 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3607 memset(&link, 0, sizeof(link));
3608 memset(&old, 0, sizeof(old));
3609 memset(&link_status, 0, sizeof(link_status));
3610 ice_atomic_read_link_status(dev, &old);
3613 /* Get link status information from hardware */
3614 status = ice_aq_get_link_info(hw->port_info, enable_lse,
3615 &link_status, NULL);
3616 if (status != ICE_SUCCESS) {
3617 link.link_speed = ETH_SPEED_NUM_100M;
3618 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3619 PMD_DRV_LOG(ERR, "Failed to get link info");
3623 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3624 if (!wait_to_complete || link.link_status)
3627 rte_delay_ms(CHECK_INTERVAL);
3628 } while (--rep_cnt);
3630 if (!link.link_status)
3633 /* Full-duplex operation at all supported speeds */
3634 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3636 /* Parse the link status */
3637 switch (link_status.link_speed) {
3638 case ICE_AQ_LINK_SPEED_10MB:
3639 link.link_speed = ETH_SPEED_NUM_10M;
3641 case ICE_AQ_LINK_SPEED_100MB:
3642 link.link_speed = ETH_SPEED_NUM_100M;
3644 case ICE_AQ_LINK_SPEED_1000MB:
3645 link.link_speed = ETH_SPEED_NUM_1G;
3647 case ICE_AQ_LINK_SPEED_2500MB:
3648 link.link_speed = ETH_SPEED_NUM_2_5G;
3650 case ICE_AQ_LINK_SPEED_5GB:
3651 link.link_speed = ETH_SPEED_NUM_5G;
3653 case ICE_AQ_LINK_SPEED_10GB:
3654 link.link_speed = ETH_SPEED_NUM_10G;
3656 case ICE_AQ_LINK_SPEED_20GB:
3657 link.link_speed = ETH_SPEED_NUM_20G;
3659 case ICE_AQ_LINK_SPEED_25GB:
3660 link.link_speed = ETH_SPEED_NUM_25G;
3662 case ICE_AQ_LINK_SPEED_40GB:
3663 link.link_speed = ETH_SPEED_NUM_40G;
3665 case ICE_AQ_LINK_SPEED_50GB:
3666 link.link_speed = ETH_SPEED_NUM_50G;
3668 case ICE_AQ_LINK_SPEED_100GB:
3669 link.link_speed = ETH_SPEED_NUM_100G;
3671 case ICE_AQ_LINK_SPEED_UNKNOWN:
3672 PMD_DRV_LOG(ERR, "Unknown link speed");
3673 link.link_speed = ETH_SPEED_NUM_UNKNOWN;
3676 PMD_DRV_LOG(ERR, "None link speed");
3677 link.link_speed = ETH_SPEED_NUM_NONE;
3681 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3682 ETH_LINK_SPEED_FIXED);
3685 ice_atomic_write_link_status(dev, &link);
3686 if (link.link_status == old.link_status)
3692 /* Force the physical link state by getting the current PHY capabilities from
3693 * hardware and setting the PHY config based on the determined capabilities. If
3694 * link changes, link event will be triggered because both the Enable Automatic
3695 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3697 static enum ice_status
3698 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3700 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3701 struct ice_aqc_get_phy_caps_data *pcaps;
3702 struct ice_port_info *pi;
3703 enum ice_status status;
3705 if (!hw || !hw->port_info)
3706 return ICE_ERR_PARAM;
3710 pcaps = (struct ice_aqc_get_phy_caps_data *)
3711 ice_malloc(hw, sizeof(*pcaps));
3713 return ICE_ERR_NO_MEMORY;
3715 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3720 /* No change in link */
3721 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3722 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3725 cfg.phy_type_low = pcaps->phy_type_low;
3726 cfg.phy_type_high = pcaps->phy_type_high;
3727 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3728 cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3729 cfg.eee_cap = pcaps->eee_cap;
3730 cfg.eeer_value = pcaps->eeer_value;
3731 cfg.link_fec_opt = pcaps->link_fec_options;
3733 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3735 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3737 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3740 ice_free(hw, pcaps);
3745 ice_dev_set_link_up(struct rte_eth_dev *dev)
3747 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3749 return ice_force_phys_link_state(hw, true);
3753 ice_dev_set_link_down(struct rte_eth_dev *dev)
3755 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3757 return ice_force_phys_link_state(hw, false);
3761 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3763 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3764 struct rte_eth_dev_data *dev_data = pf->dev_data;
3765 uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3767 /* check if mtu is within the allowed range */
3768 if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3771 /* mtu setting is forbidden if port is start */
3772 if (dev_data->dev_started) {
3774 "port %d must be stopped before configuration",
3779 if (frame_size > RTE_ETHER_MAX_LEN)
3780 dev_data->dev_conf.rxmode.offloads |=
3781 DEV_RX_OFFLOAD_JUMBO_FRAME;
3783 dev_data->dev_conf.rxmode.offloads &=
3784 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3786 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3791 static int ice_macaddr_set(struct rte_eth_dev *dev,
3792 struct rte_ether_addr *mac_addr)
3794 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3795 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3796 struct ice_vsi *vsi = pf->main_vsi;
3797 struct ice_mac_filter *f;
3801 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3802 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3806 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3807 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3812 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3816 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3817 if (ret != ICE_SUCCESS) {
3818 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3821 ret = ice_add_mac_filter(vsi, mac_addr);
3822 if (ret != ICE_SUCCESS) {
3823 PMD_DRV_LOG(ERR, "Failed to add mac filter");
3826 rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3828 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3829 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3830 if (ret != ICE_SUCCESS)
3831 PMD_DRV_LOG(ERR, "Failed to set manage mac");
3836 /* Add a MAC address, and update filters */
3838 ice_macaddr_add(struct rte_eth_dev *dev,
3839 struct rte_ether_addr *mac_addr,
3840 __rte_unused uint32_t index,
3841 __rte_unused uint32_t pool)
3843 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3844 struct ice_vsi *vsi = pf->main_vsi;
3847 ret = ice_add_mac_filter(vsi, mac_addr);
3848 if (ret != ICE_SUCCESS) {
3849 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3856 /* Remove a MAC address, and update filters */
3858 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3860 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3861 struct ice_vsi *vsi = pf->main_vsi;
3862 struct rte_eth_dev_data *data = dev->data;
3863 struct rte_ether_addr *macaddr;
3866 macaddr = &data->mac_addrs[index];
3867 ret = ice_remove_mac_filter(vsi, macaddr);
3869 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3875 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3877 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3878 struct ice_vsi *vsi = pf->main_vsi;
3881 PMD_INIT_FUNC_TRACE();
3884 ret = ice_add_vlan_filter(vsi, vlan_id);
3886 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3890 ret = ice_remove_vlan_filter(vsi, vlan_id);
3892 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3900 /* Configure vlan filter on or off */
3902 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
3904 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3905 struct ice_vsi_ctx ctxt;
3906 uint8_t sec_flags, sw_flags2;
3909 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3910 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
3911 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3914 vsi->info.sec_flags |= sec_flags;
3915 vsi->info.sw_flags2 |= sw_flags2;
3917 vsi->info.sec_flags &= ~sec_flags;
3918 vsi->info.sw_flags2 &= ~sw_flags2;
3920 vsi->info.sw_id = hw->port_info->sw_id;
3921 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3922 ctxt.info.valid_sections =
3923 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3924 ICE_AQ_VSI_PROP_SECURITY_VALID);
3925 ctxt.vsi_num = vsi->vsi_id;
3927 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3929 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
3930 on ? "enable" : "disable");
3933 vsi->info.valid_sections |=
3934 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3935 ICE_AQ_VSI_PROP_SECURITY_VALID);
3938 /* consist with other drivers, allow untagged packet when vlan filter on */
3940 ret = ice_add_vlan_filter(vsi, 0);
3942 ret = ice_remove_vlan_filter(vsi, 0);
3948 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
3950 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3951 struct ice_vsi_ctx ctxt;
3955 /* Check if it has been already on or off */
3956 if (vsi->info.valid_sections &
3957 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
3959 if ((vsi->info.vlan_flags &
3960 ICE_AQ_VSI_VLAN_EMOD_M) ==
3961 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
3962 return 0; /* already on */
3964 if ((vsi->info.vlan_flags &
3965 ICE_AQ_VSI_VLAN_EMOD_M) ==
3966 ICE_AQ_VSI_VLAN_EMOD_NOTHING)
3967 return 0; /* already off */
3972 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3974 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3975 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
3976 vsi->info.vlan_flags |= vlan_flags;
3977 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3978 ctxt.info.valid_sections =
3979 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3980 ctxt.vsi_num = vsi->vsi_id;
3981 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3983 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3984 on ? "enable" : "disable");
3988 vsi->info.valid_sections |=
3989 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3995 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3997 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3998 struct ice_vsi *vsi = pf->main_vsi;
3999 struct rte_eth_rxmode *rxmode;
4001 rxmode = &dev->data->dev_conf.rxmode;
4002 if (mask & ETH_VLAN_FILTER_MASK) {
4003 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4004 ice_vsi_config_vlan_filter(vsi, true);
4006 ice_vsi_config_vlan_filter(vsi, false);
4009 if (mask & ETH_VLAN_STRIP_MASK) {
4010 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4011 ice_vsi_config_vlan_stripping(vsi, true);
4013 ice_vsi_config_vlan_stripping(vsi, false);
4016 if (mask & ETH_VLAN_EXTEND_MASK) {
4017 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
4018 ice_vsi_config_double_vlan(vsi, true);
4020 ice_vsi_config_double_vlan(vsi, false);
4027 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4029 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4030 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4036 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4037 ret = ice_aq_get_rss_lut(hw, vsi->idx,
4038 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
4040 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4044 uint64_t *lut_dw = (uint64_t *)lut;
4045 uint16_t i, lut_size_dw = lut_size / 4;
4047 for (i = 0; i < lut_size_dw; i++)
4048 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4055 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4064 pf = ICE_VSI_TO_PF(vsi);
4065 hw = ICE_VSI_TO_HW(vsi);
4067 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4068 ret = ice_aq_set_rss_lut(hw, vsi->idx,
4069 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
4071 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4075 uint64_t *lut_dw = (uint64_t *)lut;
4076 uint16_t i, lut_size_dw = lut_size / 4;
4078 for (i = 0; i < lut_size_dw; i++)
4079 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4088 ice_rss_reta_update(struct rte_eth_dev *dev,
4089 struct rte_eth_rss_reta_entry64 *reta_conf,
4092 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4093 uint16_t i, lut_size = pf->hash_lut_size;
4094 uint16_t idx, shift;
4098 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4099 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4100 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4102 "The size of hash lookup table configured (%d)"
4103 "doesn't match the number hardware can "
4104 "supported (128, 512, 2048)",
4109 /* It MUST use the current LUT size to get the RSS lookup table,
4110 * otherwise if will fail with -100 error code.
4112 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0);
4114 PMD_DRV_LOG(ERR, "No memory can be allocated");
4117 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4121 for (i = 0; i < reta_size; i++) {
4122 idx = i / RTE_RETA_GROUP_SIZE;
4123 shift = i % RTE_RETA_GROUP_SIZE;
4124 if (reta_conf[idx].mask & (1ULL << shift))
4125 lut[i] = reta_conf[idx].reta[shift];
4127 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4128 if (ret == 0 && lut_size != reta_size) {
4130 "The size of hash lookup table is changed from (%d) to (%d)",
4131 lut_size, reta_size);
4132 pf->hash_lut_size = reta_size;
4142 ice_rss_reta_query(struct rte_eth_dev *dev,
4143 struct rte_eth_rss_reta_entry64 *reta_conf,
4146 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4147 uint16_t i, lut_size = pf->hash_lut_size;
4148 uint16_t idx, shift;
4152 if (reta_size != lut_size) {
4154 "The size of hash lookup table configured (%d)"
4155 "doesn't match the number hardware can "
4157 reta_size, lut_size);
4161 lut = rte_zmalloc(NULL, reta_size, 0);
4163 PMD_DRV_LOG(ERR, "No memory can be allocated");
4167 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4171 for (i = 0; i < reta_size; i++) {
4172 idx = i / RTE_RETA_GROUP_SIZE;
4173 shift = i % RTE_RETA_GROUP_SIZE;
4174 if (reta_conf[idx].mask & (1ULL << shift))
4175 reta_conf[idx].reta[shift] = lut[i];
4185 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4187 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4190 if (!key || key_len == 0) {
4191 PMD_DRV_LOG(DEBUG, "No key to be configured");
4193 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4195 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4199 struct ice_aqc_get_set_rss_keys *key_dw =
4200 (struct ice_aqc_get_set_rss_keys *)key;
4202 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4204 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4212 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4214 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4217 if (!key || !key_len)
4220 ret = ice_aq_get_rss_key
4222 (struct ice_aqc_get_set_rss_keys *)key);
4224 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4227 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4233 ice_rss_hash_update(struct rte_eth_dev *dev,
4234 struct rte_eth_rss_conf *rss_conf)
4236 enum ice_status status = ICE_SUCCESS;
4237 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4238 struct ice_vsi *vsi = pf->main_vsi;
4241 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4245 if (rss_conf->rss_hf == 0)
4248 /* RSS hash configuration */
4249 ice_rss_hash_set(pf, rss_conf->rss_hf);
4255 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4256 struct rte_eth_rss_conf *rss_conf)
4258 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4259 struct ice_vsi *vsi = pf->main_vsi;
4261 ice_get_rss_key(vsi, rss_conf->rss_key,
4262 &rss_conf->rss_key_len);
4264 /* TODO: default set to 0 as hf config is not supported now */
4265 rss_conf->rss_hf = 0;
4270 ice_promisc_enable(struct rte_eth_dev *dev)
4272 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4273 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4274 struct ice_vsi *vsi = pf->main_vsi;
4275 enum ice_status status;
4279 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4280 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4282 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4284 case ICE_ERR_ALREADY_EXISTS:
4285 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4289 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4297 ice_promisc_disable(struct rte_eth_dev *dev)
4299 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4300 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4301 struct ice_vsi *vsi = pf->main_vsi;
4302 enum ice_status status;
4306 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4307 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4309 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4310 if (status != ICE_SUCCESS) {
4311 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4319 ice_allmulti_enable(struct rte_eth_dev *dev)
4321 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4322 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4323 struct ice_vsi *vsi = pf->main_vsi;
4324 enum ice_status status;
4328 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4330 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4333 case ICE_ERR_ALREADY_EXISTS:
4334 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4338 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4346 ice_allmulti_disable(struct rte_eth_dev *dev)
4348 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4349 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4350 struct ice_vsi *vsi = pf->main_vsi;
4351 enum ice_status status;
4355 if (dev->data->promiscuous == 1)
4356 return 0; /* must remain in all_multicast mode */
4358 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4360 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4361 if (status != ICE_SUCCESS) {
4362 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4369 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4372 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4373 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4374 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4378 msix_intr = intr_handle->intr_vec[queue_id];
4380 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4381 GLINT_DYN_CTL_ITR_INDX_M;
4382 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4384 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4385 rte_intr_ack(&pci_dev->intr_handle);
4390 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4393 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4394 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4395 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4398 msix_intr = intr_handle->intr_vec[queue_id];
4400 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4406 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4408 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4413 ver = hw->nvm.orom.major;
4414 patch = hw->nvm.orom.patch;
4415 build = hw->nvm.orom.build;
4417 ret = snprintf(fw_version, fw_size,
4418 "%x.%02x 0x%08x %d.%d.%d",
4424 /* add the size of '\0' */
4426 if (fw_size < (u32)ret)
4433 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4436 struct ice_vsi_ctx ctxt;
4437 uint8_t vlan_flags = 0;
4440 if (!vsi || !info) {
4441 PMD_DRV_LOG(ERR, "invalid parameters");
4446 vsi->info.pvid = info->config.pvid;
4448 * If insert pvid is enabled, only tagged pkts are
4449 * allowed to be sent out.
4451 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
4452 ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
4455 if (info->config.reject.tagged == 0)
4456 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
4458 if (info->config.reject.untagged == 0)
4459 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
4461 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
4462 ICE_AQ_VSI_VLAN_MODE_M);
4463 vsi->info.vlan_flags |= vlan_flags;
4464 memset(&ctxt, 0, sizeof(ctxt));
4465 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4466 ctxt.info.valid_sections =
4467 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4468 ctxt.vsi_num = vsi->vsi_id;
4470 hw = ICE_VSI_TO_HW(vsi);
4471 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4472 if (ret != ICE_SUCCESS) {
4474 "update VSI for VLAN insert failed, err %d",
4479 vsi->info.valid_sections |=
4480 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4486 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4488 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4489 struct ice_vsi *vsi = pf->main_vsi;
4490 struct rte_eth_dev_data *data = pf->dev_data;
4491 struct ice_vsi_vlan_pvid_info info;
4494 memset(&info, 0, sizeof(info));
4497 info.config.pvid = pvid;
4499 info.config.reject.tagged =
4500 data->dev_conf.txmode.hw_vlan_reject_tagged;
4501 info.config.reject.untagged =
4502 data->dev_conf.txmode.hw_vlan_reject_untagged;
4505 ret = ice_vsi_vlan_pvid_set(vsi, &info);
4507 PMD_DRV_LOG(ERR, "Failed to set pvid.");
4515 ice_get_eeprom_length(struct rte_eth_dev *dev)
4517 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4519 return hw->nvm.flash_size;
4523 ice_get_eeprom(struct rte_eth_dev *dev,
4524 struct rte_dev_eeprom_info *eeprom)
4526 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4527 enum ice_status status = ICE_SUCCESS;
4528 uint8_t *data = eeprom->data;
4530 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4532 status = ice_acquire_nvm(hw, ICE_RES_READ);
4534 PMD_DRV_LOG(ERR, "acquire nvm failed.");
4538 status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4541 ice_release_nvm(hw);
4544 PMD_DRV_LOG(ERR, "EEPROM read failed.");
4552 ice_stat_update_32(struct ice_hw *hw,
4560 new_data = (uint64_t)ICE_READ_REG(hw, reg);
4564 if (new_data >= *offset)
4565 *stat = (uint64_t)(new_data - *offset);
4567 *stat = (uint64_t)((new_data +
4568 ((uint64_t)1 << ICE_32_BIT_WIDTH))
4573 ice_stat_update_40(struct ice_hw *hw,
4582 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4583 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4589 if (new_data >= *offset)
4590 *stat = new_data - *offset;
4592 *stat = (uint64_t)((new_data +
4593 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4596 *stat &= ICE_40_BIT_MASK;
4599 /* Get all the statistics of a VSI */
4601 ice_update_vsi_stats(struct ice_vsi *vsi)
4603 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4604 struct ice_eth_stats *nes = &vsi->eth_stats;
4605 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4606 int idx = rte_le_to_cpu_16(vsi->vsi_id);
4608 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4609 vsi->offset_loaded, &oes->rx_bytes,
4611 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4612 vsi->offset_loaded, &oes->rx_unicast,
4614 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4615 vsi->offset_loaded, &oes->rx_multicast,
4616 &nes->rx_multicast);
4617 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4618 vsi->offset_loaded, &oes->rx_broadcast,
4619 &nes->rx_broadcast);
4620 /* enlarge the limitation when rx_bytes overflowed */
4621 if (vsi->offset_loaded) {
4622 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
4623 nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4624 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
4626 vsi->old_rx_bytes = nes->rx_bytes;
4627 /* exclude CRC bytes */
4628 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4629 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4631 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4632 &oes->rx_discards, &nes->rx_discards);
4633 /* GLV_REPC not supported */
4634 /* GLV_RMPC not supported */
4635 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4636 &oes->rx_unknown_protocol,
4637 &nes->rx_unknown_protocol);
4638 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4639 vsi->offset_loaded, &oes->tx_bytes,
4641 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4642 vsi->offset_loaded, &oes->tx_unicast,
4644 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4645 vsi->offset_loaded, &oes->tx_multicast,
4646 &nes->tx_multicast);
4647 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4648 vsi->offset_loaded, &oes->tx_broadcast,
4649 &nes->tx_broadcast);
4650 /* GLV_TDPC not supported */
4651 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4652 &oes->tx_errors, &nes->tx_errors);
4653 /* enlarge the limitation when tx_bytes overflowed */
4654 if (vsi->offset_loaded) {
4655 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
4656 nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4657 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
4659 vsi->old_tx_bytes = nes->tx_bytes;
4660 vsi->offset_loaded = true;
4662 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4664 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
4665 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
4666 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
4667 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
4668 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
4669 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4670 nes->rx_unknown_protocol);
4671 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
4672 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
4673 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
4674 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
4675 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
4676 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
4677 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4682 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4684 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4685 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4687 /* Get statistics of struct ice_eth_stats */
4688 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4689 GLPRT_GORCL(hw->port_info->lport),
4690 pf->offset_loaded, &os->eth.rx_bytes,
4692 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4693 GLPRT_UPRCL(hw->port_info->lport),
4694 pf->offset_loaded, &os->eth.rx_unicast,
4695 &ns->eth.rx_unicast);
4696 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4697 GLPRT_MPRCL(hw->port_info->lport),
4698 pf->offset_loaded, &os->eth.rx_multicast,
4699 &ns->eth.rx_multicast);
4700 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4701 GLPRT_BPRCL(hw->port_info->lport),
4702 pf->offset_loaded, &os->eth.rx_broadcast,
4703 &ns->eth.rx_broadcast);
4704 ice_stat_update_32(hw, PRTRPB_RDPC,
4705 pf->offset_loaded, &os->eth.rx_discards,
4706 &ns->eth.rx_discards);
4707 /* enlarge the limitation when rx_bytes overflowed */
4708 if (pf->offset_loaded) {
4709 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
4710 ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4711 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
4713 pf->old_rx_bytes = ns->eth.rx_bytes;
4715 /* Workaround: CRC size should not be included in byte statistics,
4716 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4719 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4720 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4722 /* GLPRT_REPC not supported */
4723 /* GLPRT_RMPC not supported */
4724 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4726 &os->eth.rx_unknown_protocol,
4727 &ns->eth.rx_unknown_protocol);
4728 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4729 GLPRT_GOTCL(hw->port_info->lport),
4730 pf->offset_loaded, &os->eth.tx_bytes,
4732 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4733 GLPRT_UPTCL(hw->port_info->lport),
4734 pf->offset_loaded, &os->eth.tx_unicast,
4735 &ns->eth.tx_unicast);
4736 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4737 GLPRT_MPTCL(hw->port_info->lport),
4738 pf->offset_loaded, &os->eth.tx_multicast,
4739 &ns->eth.tx_multicast);
4740 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4741 GLPRT_BPTCL(hw->port_info->lport),
4742 pf->offset_loaded, &os->eth.tx_broadcast,
4743 &ns->eth.tx_broadcast);
4744 /* enlarge the limitation when tx_bytes overflowed */
4745 if (pf->offset_loaded) {
4746 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
4747 ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4748 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
4750 pf->old_tx_bytes = ns->eth.tx_bytes;
4751 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4752 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4754 /* GLPRT_TEPC not supported */
4756 /* additional port specific stats */
4757 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4758 pf->offset_loaded, &os->tx_dropped_link_down,
4759 &ns->tx_dropped_link_down);
4760 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4761 pf->offset_loaded, &os->crc_errors,
4763 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4764 pf->offset_loaded, &os->illegal_bytes,
4765 &ns->illegal_bytes);
4766 /* GLPRT_ERRBC not supported */
4767 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4768 pf->offset_loaded, &os->mac_local_faults,
4769 &ns->mac_local_faults);
4770 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4771 pf->offset_loaded, &os->mac_remote_faults,
4772 &ns->mac_remote_faults);
4774 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4775 pf->offset_loaded, &os->rx_len_errors,
4776 &ns->rx_len_errors);
4778 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4779 pf->offset_loaded, &os->link_xon_rx,
4781 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
4782 pf->offset_loaded, &os->link_xoff_rx,
4784 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
4785 pf->offset_loaded, &os->link_xon_tx,
4787 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
4788 pf->offset_loaded, &os->link_xoff_tx,
4790 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
4791 GLPRT_PRC64L(hw->port_info->lport),
4792 pf->offset_loaded, &os->rx_size_64,
4794 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
4795 GLPRT_PRC127L(hw->port_info->lport),
4796 pf->offset_loaded, &os->rx_size_127,
4798 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
4799 GLPRT_PRC255L(hw->port_info->lport),
4800 pf->offset_loaded, &os->rx_size_255,
4802 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
4803 GLPRT_PRC511L(hw->port_info->lport),
4804 pf->offset_loaded, &os->rx_size_511,
4806 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
4807 GLPRT_PRC1023L(hw->port_info->lport),
4808 pf->offset_loaded, &os->rx_size_1023,
4810 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
4811 GLPRT_PRC1522L(hw->port_info->lport),
4812 pf->offset_loaded, &os->rx_size_1522,
4814 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
4815 GLPRT_PRC9522L(hw->port_info->lport),
4816 pf->offset_loaded, &os->rx_size_big,
4818 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
4819 pf->offset_loaded, &os->rx_undersize,
4821 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
4822 pf->offset_loaded, &os->rx_fragments,
4824 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
4825 pf->offset_loaded, &os->rx_oversize,
4827 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
4828 pf->offset_loaded, &os->rx_jabber,
4830 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
4831 GLPRT_PTC64L(hw->port_info->lport),
4832 pf->offset_loaded, &os->tx_size_64,
4834 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
4835 GLPRT_PTC127L(hw->port_info->lport),
4836 pf->offset_loaded, &os->tx_size_127,
4838 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
4839 GLPRT_PTC255L(hw->port_info->lport),
4840 pf->offset_loaded, &os->tx_size_255,
4842 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
4843 GLPRT_PTC511L(hw->port_info->lport),
4844 pf->offset_loaded, &os->tx_size_511,
4846 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
4847 GLPRT_PTC1023L(hw->port_info->lport),
4848 pf->offset_loaded, &os->tx_size_1023,
4850 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
4851 GLPRT_PTC1522L(hw->port_info->lport),
4852 pf->offset_loaded, &os->tx_size_1522,
4854 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
4855 GLPRT_PTC9522L(hw->port_info->lport),
4856 pf->offset_loaded, &os->tx_size_big,
4859 /* GLPRT_MSPDC not supported */
4860 /* GLPRT_XEC not supported */
4862 pf->offset_loaded = true;
4865 ice_update_vsi_stats(pf->main_vsi);
4868 /* Get all statistics of a port */
4870 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
4872 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4873 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4874 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4876 /* call read registers - updates values, now write them to struct */
4877 ice_read_stats_registers(pf, hw);
4879 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
4880 pf->main_vsi->eth_stats.rx_multicast +
4881 pf->main_vsi->eth_stats.rx_broadcast -
4882 pf->main_vsi->eth_stats.rx_discards;
4883 stats->opackets = ns->eth.tx_unicast +
4884 ns->eth.tx_multicast +
4885 ns->eth.tx_broadcast;
4886 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
4887 stats->obytes = ns->eth.tx_bytes;
4888 stats->oerrors = ns->eth.tx_errors +
4889 pf->main_vsi->eth_stats.tx_errors;
4892 stats->imissed = ns->eth.rx_discards +
4893 pf->main_vsi->eth_stats.rx_discards;
4894 stats->ierrors = ns->crc_errors +
4896 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
4898 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
4899 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
4900 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
4901 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
4902 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
4903 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
4904 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
4905 pf->main_vsi->eth_stats.rx_discards);
4906 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4907 ns->eth.rx_unknown_protocol);
4908 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
4909 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
4910 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
4911 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
4912 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
4913 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
4914 pf->main_vsi->eth_stats.tx_discards);
4915 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
4917 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
4918 ns->tx_dropped_link_down);
4919 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
4920 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
4922 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
4923 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
4924 ns->mac_local_faults);
4925 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
4926 ns->mac_remote_faults);
4927 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
4928 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
4929 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
4930 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
4931 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
4932 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
4933 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
4934 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
4935 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
4936 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
4937 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
4938 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
4939 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
4940 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
4941 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
4942 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
4943 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
4944 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
4945 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
4946 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
4947 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
4948 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
4949 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
4950 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
4954 /* Reset the statistics */
4956 ice_stats_reset(struct rte_eth_dev *dev)
4958 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4959 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4961 /* Mark PF and VSI stats to update the offset, aka "reset" */
4962 pf->offset_loaded = false;
4964 pf->main_vsi->offset_loaded = false;
4966 /* read the stats, reading current register values into offset */
4967 ice_read_stats_registers(pf, hw);
4973 ice_xstats_calc_num(void)
4977 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
4983 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
4986 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4987 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4990 struct ice_hw_port_stats *hw_stats = &pf->stats;
4992 count = ice_xstats_calc_num();
4996 ice_read_stats_registers(pf, hw);
5003 /* Get stats from ice_eth_stats struct */
5004 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5005 xstats[count].value =
5006 *(uint64_t *)((char *)&hw_stats->eth +
5007 ice_stats_strings[i].offset);
5008 xstats[count].id = count;
5012 /* Get individiual stats from ice_hw_port struct */
5013 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5014 xstats[count].value =
5015 *(uint64_t *)((char *)hw_stats +
5016 ice_hw_port_strings[i].offset);
5017 xstats[count].id = count;
5024 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5025 struct rte_eth_xstat_name *xstats_names,
5026 __rte_unused unsigned int limit)
5028 unsigned int count = 0;
5032 return ice_xstats_calc_num();
5034 /* Note: limit checked in rte_eth_xstats_names() */
5036 /* Get stats from ice_eth_stats struct */
5037 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5038 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5039 sizeof(xstats_names[count].name));
5043 /* Get individiual stats from ice_hw_port struct */
5044 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5045 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5046 sizeof(xstats_names[count].name));
5054 ice_dev_filter_ctrl(struct rte_eth_dev *dev,
5055 enum rte_filter_type filter_type,
5056 enum rte_filter_op filter_op,
5064 switch (filter_type) {
5065 case RTE_ETH_FILTER_GENERIC:
5066 if (filter_op != RTE_ETH_FILTER_GET)
5068 *(const void **)arg = &ice_flow_ops;
5071 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5080 /* Add UDP tunneling port */
5082 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5083 struct rte_eth_udp_tunnel *udp_tunnel)
5086 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5088 if (udp_tunnel == NULL)
5091 switch (udp_tunnel->prot_type) {
5092 case RTE_TUNNEL_TYPE_VXLAN:
5093 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5096 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5104 /* Delete UDP tunneling port */
5106 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5107 struct rte_eth_udp_tunnel *udp_tunnel)
5110 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5112 if (udp_tunnel == NULL)
5115 switch (udp_tunnel->prot_type) {
5116 case RTE_TUNNEL_TYPE_VXLAN:
5117 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5120 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5129 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5130 struct rte_pci_device *pci_dev)
5132 return rte_eth_dev_pci_generic_probe(pci_dev,
5133 sizeof(struct ice_adapter),
5138 ice_pci_remove(struct rte_pci_device *pci_dev)
5140 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5143 static struct rte_pci_driver rte_ice_pmd = {
5144 .id_table = pci_id_ice_map,
5145 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5146 .probe = ice_pci_probe,
5147 .remove = ice_pci_remove,
5151 * Driver initialization routine.
5152 * Invoked once at EAL init time.
5153 * Register itself as the [Poll Mode] Driver of PCI devices.
5155 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5156 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5157 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5158 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5159 ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5160 ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5161 ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
5162 ICE_FLOW_MARK_SUPPORT_ARG "=<0|1>");
5164 RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE);
5165 RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE);
5166 #ifdef RTE_LIBRTE_ICE_DEBUG_RX
5167 RTE_LOG_REGISTER(ice_logtype_rx, pmd.net.ice.rx, DEBUG);
5169 #ifdef RTE_LIBRTE_ICE_DEBUG_TX
5170 RTE_LOG_REGISTER(ice_logtype_tx, pmd.net.ice.tx, DEBUG);
5172 #ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE
5173 RTE_LOG_REGISTER(ice_logtype_tx_free, pmd.net.ice.tx_free, DEBUG);