1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "base/ice_common.h"
18 #include "rte_pmd_ice.h"
19 #include "ice_ethdev.h"
21 #include "ice_generic_flow.h"
24 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
25 #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support"
26 #define ICE_PROTO_XTR_ARG "proto_xtr"
28 static const char * const ice_valid_args[] = {
29 ICE_SAFE_MODE_SUPPORT_ARG,
30 ICE_PIPELINE_MODE_SUPPORT_ARG,
35 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
36 .name = "ice_dynfield_proto_xtr_metadata",
37 .size = sizeof(uint32_t),
38 .align = __alignof__(uint32_t),
42 struct proto_xtr_ol_flag {
43 const struct rte_mbuf_dynflag param;
48 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
50 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
52 .param = { .name = "ice_dynflag_proto_xtr_vlan" },
53 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
55 .param = { .name = "ice_dynflag_proto_xtr_ipv4" },
56 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
58 .param = { .name = "ice_dynflag_proto_xtr_ipv6" },
59 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
60 [PROTO_XTR_IPV6_FLOW] = {
61 .param = { .name = "ice_dynflag_proto_xtr_ipv6_flow" },
62 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
64 .param = { .name = "ice_dynflag_proto_xtr_tcp" },
65 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
66 [PROTO_XTR_IP_OFFSET] = {
67 .param = { .name = "ice_dynflag_proto_xtr_ip_offset" },
68 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
71 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
73 #define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package"
74 #define ICE_COMMS_PKG_NAME "ICE COMMS Package"
75 #define ICE_MAX_RES_DESC_NUM 1024
77 static int ice_dev_configure(struct rte_eth_dev *dev);
78 static int ice_dev_start(struct rte_eth_dev *dev);
79 static void ice_dev_stop(struct rte_eth_dev *dev);
80 static void ice_dev_close(struct rte_eth_dev *dev);
81 static int ice_dev_reset(struct rte_eth_dev *dev);
82 static int ice_dev_info_get(struct rte_eth_dev *dev,
83 struct rte_eth_dev_info *dev_info);
84 static int ice_link_update(struct rte_eth_dev *dev,
85 int wait_to_complete);
86 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
87 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
89 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
90 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
91 static int ice_rss_reta_update(struct rte_eth_dev *dev,
92 struct rte_eth_rss_reta_entry64 *reta_conf,
94 static int ice_rss_reta_query(struct rte_eth_dev *dev,
95 struct rte_eth_rss_reta_entry64 *reta_conf,
97 static int ice_rss_hash_update(struct rte_eth_dev *dev,
98 struct rte_eth_rss_conf *rss_conf);
99 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
100 struct rte_eth_rss_conf *rss_conf);
101 static int ice_promisc_enable(struct rte_eth_dev *dev);
102 static int ice_promisc_disable(struct rte_eth_dev *dev);
103 static int ice_allmulti_enable(struct rte_eth_dev *dev);
104 static int ice_allmulti_disable(struct rte_eth_dev *dev);
105 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
108 static int ice_macaddr_set(struct rte_eth_dev *dev,
109 struct rte_ether_addr *mac_addr);
110 static int ice_macaddr_add(struct rte_eth_dev *dev,
111 struct rte_ether_addr *mac_addr,
112 __rte_unused uint32_t index,
114 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
115 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
117 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
119 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
121 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
122 uint16_t pvid, int on);
123 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
124 static int ice_get_eeprom(struct rte_eth_dev *dev,
125 struct rte_dev_eeprom_info *eeprom);
126 static int ice_stats_get(struct rte_eth_dev *dev,
127 struct rte_eth_stats *stats);
128 static int ice_stats_reset(struct rte_eth_dev *dev);
129 static int ice_xstats_get(struct rte_eth_dev *dev,
130 struct rte_eth_xstat *xstats, unsigned int n);
131 static int ice_xstats_get_names(struct rte_eth_dev *dev,
132 struct rte_eth_xstat_name *xstats_names,
134 static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
135 enum rte_filter_type filter_type,
136 enum rte_filter_op filter_op,
138 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
139 struct rte_eth_udp_tunnel *udp_tunnel);
140 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
141 struct rte_eth_udp_tunnel *udp_tunnel);
143 static const struct rte_pci_id pci_id_ice_map[] = {
144 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
145 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
146 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
147 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
148 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
149 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
150 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
151 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
152 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
153 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
154 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
155 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
156 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
157 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
158 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
159 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
160 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
161 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
162 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
163 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
164 { .vendor_id = 0, /* sentinel */ },
167 static const struct eth_dev_ops ice_eth_dev_ops = {
168 .dev_configure = ice_dev_configure,
169 .dev_start = ice_dev_start,
170 .dev_stop = ice_dev_stop,
171 .dev_close = ice_dev_close,
172 .dev_reset = ice_dev_reset,
173 .dev_set_link_up = ice_dev_set_link_up,
174 .dev_set_link_down = ice_dev_set_link_down,
175 .rx_queue_start = ice_rx_queue_start,
176 .rx_queue_stop = ice_rx_queue_stop,
177 .tx_queue_start = ice_tx_queue_start,
178 .tx_queue_stop = ice_tx_queue_stop,
179 .rx_queue_setup = ice_rx_queue_setup,
180 .rx_queue_release = ice_rx_queue_release,
181 .tx_queue_setup = ice_tx_queue_setup,
182 .tx_queue_release = ice_tx_queue_release,
183 .dev_infos_get = ice_dev_info_get,
184 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
185 .link_update = ice_link_update,
186 .mtu_set = ice_mtu_set,
187 .mac_addr_set = ice_macaddr_set,
188 .mac_addr_add = ice_macaddr_add,
189 .mac_addr_remove = ice_macaddr_remove,
190 .vlan_filter_set = ice_vlan_filter_set,
191 .vlan_offload_set = ice_vlan_offload_set,
192 .reta_update = ice_rss_reta_update,
193 .reta_query = ice_rss_reta_query,
194 .rss_hash_update = ice_rss_hash_update,
195 .rss_hash_conf_get = ice_rss_hash_conf_get,
196 .promiscuous_enable = ice_promisc_enable,
197 .promiscuous_disable = ice_promisc_disable,
198 .allmulticast_enable = ice_allmulti_enable,
199 .allmulticast_disable = ice_allmulti_disable,
200 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
201 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
202 .fw_version_get = ice_fw_version_get,
203 .vlan_pvid_set = ice_vlan_pvid_set,
204 .rxq_info_get = ice_rxq_info_get,
205 .txq_info_get = ice_txq_info_get,
206 .rx_burst_mode_get = ice_rx_burst_mode_get,
207 .tx_burst_mode_get = ice_tx_burst_mode_get,
208 .get_eeprom_length = ice_get_eeprom_length,
209 .get_eeprom = ice_get_eeprom,
210 .stats_get = ice_stats_get,
211 .stats_reset = ice_stats_reset,
212 .xstats_get = ice_xstats_get,
213 .xstats_get_names = ice_xstats_get_names,
214 .xstats_reset = ice_stats_reset,
215 .filter_ctrl = ice_dev_filter_ctrl,
216 .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add,
217 .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
218 .tx_done_cleanup = ice_tx_done_cleanup,
221 /* store statistics names and its offset in stats structure */
222 struct ice_xstats_name_off {
223 char name[RTE_ETH_XSTATS_NAME_SIZE];
227 static const struct ice_xstats_name_off ice_stats_strings[] = {
228 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
229 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
230 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
231 {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
232 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
233 rx_unknown_protocol)},
234 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
235 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
236 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
237 {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
240 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
241 sizeof(ice_stats_strings[0]))
243 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
244 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
245 tx_dropped_link_down)},
246 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
247 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
249 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
250 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
252 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
254 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
256 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
257 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
258 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
259 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
260 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
261 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
263 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
265 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
267 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
269 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
271 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
273 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
275 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
277 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
278 mac_short_pkt_dropped)},
279 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
281 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
282 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
283 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
285 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
287 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
289 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
291 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
293 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
297 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
298 sizeof(ice_hw_port_strings[0]))
301 ice_init_controlq_parameter(struct ice_hw *hw)
303 /* fields for adminq */
304 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
305 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
306 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
307 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
309 /* fields for mailboxq, DPDK used as PF host */
310 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
311 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
312 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
313 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
317 lookup_proto_xtr_type(const char *xtr_name)
321 enum proto_xtr_type type;
323 { "vlan", PROTO_XTR_VLAN },
324 { "ipv4", PROTO_XTR_IPV4 },
325 { "ipv6", PROTO_XTR_IPV6 },
326 { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
327 { "tcp", PROTO_XTR_TCP },
328 { "ip_offset", PROTO_XTR_IP_OFFSET },
332 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
333 if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
334 return xtr_type_map[i].type;
341 * Parse elem, the elem could be single number/range or '(' ')' group
342 * 1) A single number elem, it's just a simple digit. e.g. 9
343 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
344 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
345 * Within group elem, '-' used for a range separator;
346 * ',' used for a single number.
349 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
351 const char *str = input;
356 while (isblank(*str))
359 if (!isdigit(*str) && *str != '(')
362 /* process single number or single range of number */
365 idx = strtoul(str, &end, 10);
366 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
369 while (isblank(*end))
375 /* process single <number>-<number> */
378 while (isblank(*end))
384 idx = strtoul(end, &end, 10);
385 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
389 while (isblank(*end))
396 for (idx = RTE_MIN(min, max);
397 idx <= RTE_MAX(min, max); idx++)
398 devargs->proto_xtr[idx] = xtr_type;
403 /* process set within bracket */
405 while (isblank(*str))
410 min = ICE_MAX_QUEUE_NUM;
412 /* go ahead to the first digit */
413 while (isblank(*str))
418 /* get the digit value */
420 idx = strtoul(str, &end, 10);
421 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
424 /* go ahead to separator '-',',' and ')' */
425 while (isblank(*end))
428 if (min == ICE_MAX_QUEUE_NUM)
430 else /* avoid continuous '-' */
432 } else if (*end == ',' || *end == ')') {
434 if (min == ICE_MAX_QUEUE_NUM)
437 for (idx = RTE_MIN(min, max);
438 idx <= RTE_MAX(min, max); idx++)
439 devargs->proto_xtr[idx] = xtr_type;
441 min = ICE_MAX_QUEUE_NUM;
447 } while (*end != ')' && *end != '\0');
453 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
455 const char *queue_start;
460 while (isblank(*queues))
463 if (*queues != '[') {
464 xtr_type = lookup_proto_xtr_type(queues);
468 devargs->proto_xtr_dflt = xtr_type;
475 while (isblank(*queues))
480 queue_start = queues;
482 /* go across a complete bracket */
483 if (*queue_start == '(') {
484 queues += strcspn(queues, ")");
489 /* scan the separator ':' */
490 queues += strcspn(queues, ":");
491 if (*queues++ != ':')
493 while (isblank(*queues))
496 for (idx = 0; ; idx++) {
497 if (isblank(queues[idx]) ||
498 queues[idx] == ',' ||
499 queues[idx] == ']' ||
503 if (idx > sizeof(xtr_name) - 2)
506 xtr_name[idx] = queues[idx];
508 xtr_name[idx] = '\0';
509 xtr_type = lookup_proto_xtr_type(xtr_name);
515 while (isblank(*queues) || *queues == ',' || *queues == ']')
518 if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
520 } while (*queues != '\0');
526 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
529 struct ice_devargs *devargs = extra_args;
531 if (value == NULL || extra_args == NULL)
534 if (parse_queue_proto_xtr(value, devargs) < 0) {
536 "The protocol extraction parameter is wrong : '%s'",
545 ice_check_proto_xtr_support(struct ice_hw *hw)
547 #define FLX_REG(val, fld, idx) \
548 (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
549 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
556 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
558 ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
559 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
561 ICE_PROT_IPV4_OF_OR_S,
562 ICE_PROT_IPV4_OF_OR_S },
563 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
565 ICE_PROT_IPV6_OF_OR_S,
566 ICE_PROT_IPV6_OF_OR_S },
567 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
569 ICE_PROT_IPV6_OF_OR_S,
570 ICE_PROT_IPV6_OF_OR_S },
571 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
573 ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
574 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
576 ICE_PROT_IPV4_OF_OR_S,
577 ICE_PROT_IPV6_OF_OR_S },
581 for (i = 0; i < RTE_DIM(xtr_sets); i++) {
582 uint32_t rxdid = xtr_sets[i].rxdid;
585 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
586 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
588 if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
589 FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
590 ice_proto_xtr_hw_support[i] = true;
593 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
594 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
596 if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
597 FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
598 ice_proto_xtr_hw_support[i] = true;
604 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
607 struct pool_entry *entry;
612 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
615 "Failed to allocate memory for resource pool");
619 /* queue heap initialize */
620 pool->num_free = num;
623 LIST_INIT(&pool->alloc_list);
624 LIST_INIT(&pool->free_list);
626 /* Initialize element */
630 LIST_INSERT_HEAD(&pool->free_list, entry, next);
635 ice_res_pool_alloc(struct ice_res_pool_info *pool,
638 struct pool_entry *entry, *valid_entry;
641 PMD_INIT_LOG(ERR, "Invalid parameter");
645 if (pool->num_free < num) {
646 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
647 num, pool->num_free);
652 /* Lookup in free list and find most fit one */
653 LIST_FOREACH(entry, &pool->free_list, next) {
654 if (entry->len >= num) {
656 if (entry->len == num) {
661 valid_entry->len > entry->len)
666 /* Not find one to satisfy the request, return */
668 PMD_INIT_LOG(ERR, "No valid entry found");
672 * The entry have equal queue number as requested,
673 * remove it from alloc_list.
675 if (valid_entry->len == num) {
676 LIST_REMOVE(valid_entry, next);
679 * The entry have more numbers than requested,
680 * create a new entry for alloc_list and minus its
681 * queue base and number in free_list.
683 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
686 "Failed to allocate memory for "
690 entry->base = valid_entry->base;
692 valid_entry->base += num;
693 valid_entry->len -= num;
697 /* Insert it into alloc list, not sorted */
698 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
700 pool->num_free -= valid_entry->len;
701 pool->num_alloc += valid_entry->len;
703 return valid_entry->base + pool->base;
707 ice_res_pool_destroy(struct ice_res_pool_info *pool)
709 struct pool_entry *entry, *next_entry;
714 for (entry = LIST_FIRST(&pool->alloc_list);
715 entry && (next_entry = LIST_NEXT(entry, next), 1);
716 entry = next_entry) {
717 LIST_REMOVE(entry, next);
721 for (entry = LIST_FIRST(&pool->free_list);
722 entry && (next_entry = LIST_NEXT(entry, next), 1);
723 entry = next_entry) {
724 LIST_REMOVE(entry, next);
731 LIST_INIT(&pool->alloc_list);
732 LIST_INIT(&pool->free_list);
736 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
738 /* Set VSI LUT selection */
739 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
740 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
741 /* Set Hash scheme */
742 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
743 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
745 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
748 static enum ice_status
749 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
750 struct ice_aqc_vsi_props *info,
751 uint8_t enabled_tcmap)
753 uint16_t bsf, qp_idx;
755 /* default tc 0 now. Multi-TC supporting need to be done later.
756 * Configure TC and queue mapping parameters, for enabled TC,
757 * allocate qpnum_per_tc queues to this traffic.
759 if (enabled_tcmap != 0x01) {
760 PMD_INIT_LOG(ERR, "only TC0 is supported");
764 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
765 bsf = rte_bsf32(vsi->nb_qps);
766 /* Adjust the queue number to actual queues that can be applied */
767 vsi->nb_qps = 0x1 << bsf;
770 /* Set tc and queue mapping with VSI */
771 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
772 ICE_AQ_VSI_TC_Q_OFFSET_S) |
773 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
775 /* Associate queue number with VSI */
776 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
777 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
778 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
779 info->valid_sections |=
780 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
781 /* Set the info.ingress_table and info.egress_table
782 * for UP translate table. Now just set it to 1:1 map by default
783 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
785 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
786 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
787 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
788 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
793 ice_init_mac_address(struct rte_eth_dev *dev)
795 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
797 if (!rte_is_unicast_ether_addr
798 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
799 PMD_INIT_LOG(ERR, "Invalid MAC address");
804 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
805 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
807 dev->data->mac_addrs =
808 rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0);
809 if (!dev->data->mac_addrs) {
811 "Failed to allocate memory to store mac address");
814 /* store it to dev data */
816 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
817 &dev->data->mac_addrs[0]);
821 /* Find out specific MAC filter */
822 static struct ice_mac_filter *
823 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
825 struct ice_mac_filter *f;
827 TAILQ_FOREACH(f, &vsi->mac_list, next) {
828 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
836 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
838 struct ice_fltr_list_entry *m_list_itr = NULL;
839 struct ice_mac_filter *f;
840 struct LIST_HEAD_TYPE list_head;
841 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
844 /* If it's added and configured, return */
845 f = ice_find_mac_filter(vsi, mac_addr);
847 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
851 INIT_LIST_HEAD(&list_head);
853 m_list_itr = (struct ice_fltr_list_entry *)
854 ice_malloc(hw, sizeof(*m_list_itr));
859 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
860 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
861 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
862 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
863 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
864 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
865 m_list_itr->fltr_info.vsi_handle = vsi->idx;
867 LIST_ADD(&m_list_itr->list_entry, &list_head);
870 ret = ice_add_mac(hw, &list_head);
871 if (ret != ICE_SUCCESS) {
872 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
876 /* Add the mac addr into mac list */
877 f = rte_zmalloc(NULL, sizeof(*f), 0);
879 PMD_DRV_LOG(ERR, "failed to allocate memory");
883 rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
884 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
890 rte_free(m_list_itr);
895 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
897 struct ice_fltr_list_entry *m_list_itr = NULL;
898 struct ice_mac_filter *f;
899 struct LIST_HEAD_TYPE list_head;
900 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
903 /* Can't find it, return an error */
904 f = ice_find_mac_filter(vsi, mac_addr);
908 INIT_LIST_HEAD(&list_head);
910 m_list_itr = (struct ice_fltr_list_entry *)
911 ice_malloc(hw, sizeof(*m_list_itr));
916 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
917 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
918 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
919 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
920 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
921 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
922 m_list_itr->fltr_info.vsi_handle = vsi->idx;
924 LIST_ADD(&m_list_itr->list_entry, &list_head);
926 /* remove the mac filter */
927 ret = ice_remove_mac(hw, &list_head);
928 if (ret != ICE_SUCCESS) {
929 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
934 /* Remove the mac addr from mac list */
935 TAILQ_REMOVE(&vsi->mac_list, f, next);
941 rte_free(m_list_itr);
945 /* Find out specific VLAN filter */
946 static struct ice_vlan_filter *
947 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
949 struct ice_vlan_filter *f;
951 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
952 if (vlan_id == f->vlan_info.vlan_id)
960 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
962 struct ice_fltr_list_entry *v_list_itr = NULL;
963 struct ice_vlan_filter *f;
964 struct LIST_HEAD_TYPE list_head;
968 if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)
971 hw = ICE_VSI_TO_HW(vsi);
973 /* If it's added and configured, return. */
974 f = ice_find_vlan_filter(vsi, vlan_id);
976 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
980 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
983 INIT_LIST_HEAD(&list_head);
985 v_list_itr = (struct ice_fltr_list_entry *)
986 ice_malloc(hw, sizeof(*v_list_itr));
991 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
992 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
993 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
994 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
995 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
996 v_list_itr->fltr_info.vsi_handle = vsi->idx;
998 LIST_ADD(&v_list_itr->list_entry, &list_head);
1001 ret = ice_add_vlan(hw, &list_head);
1002 if (ret != ICE_SUCCESS) {
1003 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1008 /* Add vlan into vlan list */
1009 f = rte_zmalloc(NULL, sizeof(*f), 0);
1011 PMD_DRV_LOG(ERR, "failed to allocate memory");
1015 f->vlan_info.vlan_id = vlan_id;
1016 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1022 rte_free(v_list_itr);
1027 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
1029 struct ice_fltr_list_entry *v_list_itr = NULL;
1030 struct ice_vlan_filter *f;
1031 struct LIST_HEAD_TYPE list_head;
1036 * Vlan 0 is the generic filter for untagged packets
1037 * and can't be removed.
1039 if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
1042 hw = ICE_VSI_TO_HW(vsi);
1044 /* Can't find it, return an error */
1045 f = ice_find_vlan_filter(vsi, vlan_id);
1049 INIT_LIST_HEAD(&list_head);
1051 v_list_itr = (struct ice_fltr_list_entry *)
1052 ice_malloc(hw, sizeof(*v_list_itr));
1058 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
1059 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1060 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1061 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1062 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1063 v_list_itr->fltr_info.vsi_handle = vsi->idx;
1065 LIST_ADD(&v_list_itr->list_entry, &list_head);
1067 /* remove the vlan filter */
1068 ret = ice_remove_vlan(hw, &list_head);
1069 if (ret != ICE_SUCCESS) {
1070 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1075 /* Remove the vlan id from vlan list */
1076 TAILQ_REMOVE(&vsi->vlan_list, f, next);
1082 rte_free(v_list_itr);
1087 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1089 struct ice_mac_filter *m_f;
1090 struct ice_vlan_filter *v_f;
1093 if (!vsi || !vsi->mac_num)
1096 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
1097 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1098 if (ret != ICE_SUCCESS) {
1104 if (vsi->vlan_num == 0)
1107 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
1108 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
1109 if (ret != ICE_SUCCESS) {
1120 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
1122 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1123 struct ice_vsi_ctx ctxt;
1127 /* Check if it has been already on or off */
1128 if (vsi->info.valid_sections &
1129 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1131 if ((vsi->info.outer_tag_flags &
1132 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
1133 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
1134 return 0; /* already on */
1136 if (!(vsi->info.outer_tag_flags &
1137 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
1138 return 0; /* already off */
1143 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
1146 /* clear global insertion and use per packet insertion */
1147 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
1148 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
1149 vsi->info.outer_tag_flags |= qinq_flags;
1150 /* use default vlan type 0x8100 */
1151 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1152 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1153 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1154 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1155 ctxt.info.valid_sections =
1156 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1157 ctxt.vsi_num = vsi->vsi_id;
1158 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1161 "Update VSI failed to %s qinq stripping",
1162 on ? "enable" : "disable");
1166 vsi->info.valid_sections |=
1167 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1173 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
1175 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1176 struct ice_vsi_ctx ctxt;
1180 /* Check if it has been already on or off */
1181 if (vsi->info.valid_sections &
1182 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1184 if ((vsi->info.outer_tag_flags &
1185 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1186 ICE_AQ_VSI_OUTER_TAG_COPY)
1187 return 0; /* already on */
1189 if ((vsi->info.outer_tag_flags &
1190 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1191 ICE_AQ_VSI_OUTER_TAG_NOTHING)
1192 return 0; /* already off */
1197 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
1199 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
1200 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
1201 vsi->info.outer_tag_flags |= qinq_flags;
1202 /* use default vlan type 0x8100 */
1203 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1204 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1205 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1206 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1207 ctxt.info.valid_sections =
1208 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1209 ctxt.vsi_num = vsi->vsi_id;
1210 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1213 "Update VSI failed to %s qinq stripping",
1214 on ? "enable" : "disable");
1218 vsi->info.valid_sections |=
1219 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1225 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
1229 ret = ice_vsi_config_qinq_stripping(vsi, on);
1231 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
1233 ret = ice_vsi_config_qinq_insertion(vsi, on);
1235 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
1242 ice_pf_enable_irq0(struct ice_hw *hw)
1244 /* reset the registers */
1245 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1246 ICE_READ_REG(hw, PFINT_OICR);
1249 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1250 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1251 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1253 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1254 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1255 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1256 PFINT_OICR_CTL_ITR_INDX_M) |
1257 PFINT_OICR_CTL_CAUSE_ENA_M);
1259 ICE_WRITE_REG(hw, PFINT_FW_CTL,
1260 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1261 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1262 PFINT_FW_CTL_ITR_INDX_M) |
1263 PFINT_FW_CTL_CAUSE_ENA_M);
1265 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1268 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1269 GLINT_DYN_CTL_INTENA_M |
1270 GLINT_DYN_CTL_CLEARPBA_M |
1271 GLINT_DYN_CTL_ITR_INDX_M);
1278 ice_pf_disable_irq0(struct ice_hw *hw)
1280 /* Disable all interrupt types */
1281 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1287 ice_handle_aq_msg(struct rte_eth_dev *dev)
1289 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1290 struct ice_ctl_q_info *cq = &hw->adminq;
1291 struct ice_rq_event_info event;
1292 uint16_t pending, opcode;
1295 event.buf_len = ICE_AQ_MAX_BUF_LEN;
1296 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1297 if (!event.msg_buf) {
1298 PMD_DRV_LOG(ERR, "Failed to allocate mem");
1304 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1306 if (ret != ICE_SUCCESS) {
1308 "Failed to read msg from AdminQ, "
1310 hw->adminq.sq_last_status);
1313 opcode = rte_le_to_cpu_16(event.desc.opcode);
1316 case ice_aqc_opc_get_link_status:
1317 ret = ice_link_update(dev, 0);
1319 rte_eth_dev_callback_process
1320 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1323 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1328 rte_free(event.msg_buf);
1333 * Interrupt handler triggered by NIC for handling
1334 * specific interrupt.
1337 * Pointer to interrupt handle.
1339 * The address of parameter (struct rte_eth_dev *) regsitered before.
1345 ice_interrupt_handler(void *param)
1347 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1348 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1356 uint32_t int_fw_ctl;
1359 /* Disable interrupt */
1360 ice_pf_disable_irq0(hw);
1362 /* read out interrupt causes */
1363 oicr = ICE_READ_REG(hw, PFINT_OICR);
1365 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1368 /* No interrupt event indicated */
1369 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1370 PMD_DRV_LOG(INFO, "No interrupt event");
1375 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1376 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1377 ice_handle_aq_msg(dev);
1380 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1381 PMD_DRV_LOG(INFO, "OICR: link state change event");
1382 ret = ice_link_update(dev, 0);
1384 rte_eth_dev_callback_process
1385 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1389 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1390 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1391 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1392 if (reg & GL_MDET_TX_PQM_VALID_M) {
1393 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1394 GL_MDET_TX_PQM_PF_NUM_S;
1395 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1396 GL_MDET_TX_PQM_MAL_TYPE_S;
1397 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1398 GL_MDET_TX_PQM_QNUM_S;
1400 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1401 "%d by PQM on TX queue %d PF# %d",
1402 event, queue, pf_num);
1405 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1406 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1407 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1408 GL_MDET_TX_TCLAN_PF_NUM_S;
1409 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1410 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1411 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1412 GL_MDET_TX_TCLAN_QNUM_S;
1414 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1415 "%d by TCLAN on TX queue %d PF# %d",
1416 event, queue, pf_num);
1420 /* Enable interrupt */
1421 ice_pf_enable_irq0(hw);
1422 rte_intr_ack(dev->intr_handle);
1426 ice_init_proto_xtr(struct rte_eth_dev *dev)
1428 struct ice_adapter *ad =
1429 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1430 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1431 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1432 const struct proto_xtr_ol_flag *ol_flag;
1433 bool proto_xtr_enable = false;
1437 pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1438 if (unlikely(pf->proto_xtr == NULL)) {
1439 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1443 for (i = 0; i < pf->lan_nb_qps; i++) {
1444 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1445 ad->devargs.proto_xtr[i] :
1446 ad->devargs.proto_xtr_dflt;
1448 if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1449 uint8_t type = pf->proto_xtr[i];
1451 ice_proto_xtr_ol_flag_params[type].required = true;
1452 proto_xtr_enable = true;
1456 if (likely(!proto_xtr_enable))
1459 ice_check_proto_xtr_support(hw);
1461 offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1462 if (unlikely(offset == -1)) {
1464 "Protocol extraction metadata is disabled in mbuf with error %d",
1470 "Protocol extraction metadata offset in mbuf is : %d",
1472 rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1474 for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1475 ol_flag = &ice_proto_xtr_ol_flag_params[i];
1477 if (!ol_flag->required)
1480 if (!ice_proto_xtr_hw_support[i]) {
1482 "Protocol extraction type %u is not supported in hardware",
1484 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1488 offset = rte_mbuf_dynflag_register(&ol_flag->param);
1489 if (unlikely(offset == -1)) {
1491 "Protocol extraction offload '%s' failed to register with error %d",
1492 ol_flag->param.name, -rte_errno);
1494 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1499 "Protocol extraction offload '%s' offset in mbuf is : %d",
1500 ol_flag->param.name, offset);
1501 *ol_flag->ol_flag = 1ULL << offset;
1505 /* Initialize SW parameters of PF */
1507 ice_pf_sw_init(struct rte_eth_dev *dev)
1509 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1510 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1513 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1514 hw->func_caps.common_cap.num_rxq);
1516 pf->lan_nb_qps = pf->lan_nb_qp_max;
1518 ice_init_proto_xtr(dev);
1520 if (hw->func_caps.fd_fltr_guar > 0 ||
1521 hw->func_caps.fd_fltr_best_effort > 0) {
1522 pf->flags |= ICE_FLAG_FDIR;
1523 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1524 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1526 pf->fdir_nb_qps = 0;
1528 pf->fdir_qp_offset = 0;
1534 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1536 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1537 struct ice_vsi *vsi = NULL;
1538 struct ice_vsi_ctx vsi_ctx;
1540 struct rte_ether_addr broadcast = {
1541 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1542 struct rte_ether_addr mac_addr;
1543 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1544 uint8_t tc_bitmap = 0x1;
1547 /* hw->num_lports = 1 in NIC mode */
1548 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1552 vsi->idx = pf->next_vsi_idx;
1555 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1556 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1557 vsi->vlan_anti_spoof_on = 0;
1558 vsi->vlan_filter_on = 1;
1559 TAILQ_INIT(&vsi->mac_list);
1560 TAILQ_INIT(&vsi->vlan_list);
1562 /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1563 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1564 ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1565 hw->func_caps.common_cap.rss_table_size;
1566 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1568 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1571 vsi->nb_qps = pf->lan_nb_qps;
1572 vsi->base_queue = 1;
1573 ice_vsi_config_default_rss(&vsi_ctx.info);
1574 vsi_ctx.alloc_from_pool = true;
1575 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1576 /* switch_id is queried by get_switch_config aq, which is done
1579 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1580 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1581 /* Allow all untagged or tagged packets */
1582 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1583 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1584 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1585 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1588 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1589 ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1590 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1591 cfg = ICE_AQ_VSI_FD_ENABLE;
1592 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1593 vsi_ctx.info.max_fd_fltr_dedicated =
1594 rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1595 vsi_ctx.info.max_fd_fltr_shared =
1596 rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1598 /* Enable VLAN/UP trip */
1599 ret = ice_vsi_config_tc_queue_mapping(vsi,
1604 "tc queue mapping with vsi failed, "
1612 vsi->nb_qps = pf->fdir_nb_qps;
1613 vsi->base_queue = ICE_FDIR_QUEUE_ID;
1614 vsi_ctx.alloc_from_pool = true;
1615 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1617 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1618 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1619 cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1620 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1621 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1622 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1623 ret = ice_vsi_config_tc_queue_mapping(vsi,
1628 "tc queue mapping with vsi failed, "
1635 /* for other types of VSI */
1636 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1640 /* VF has MSIX interrupt in VF range, don't allocate here */
1641 if (type == ICE_VSI_PF) {
1642 ret = ice_res_pool_alloc(&pf->msix_pool,
1643 RTE_MIN(vsi->nb_qps,
1644 RTE_MAX_RXTX_INTR_VEC_ID));
1646 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1649 vsi->msix_intr = ret;
1650 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1651 } else if (type == ICE_VSI_CTRL) {
1652 ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1654 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1657 vsi->msix_intr = ret;
1663 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1664 if (ret != ICE_SUCCESS) {
1665 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1668 /* store vsi information is SW structure */
1669 vsi->vsi_id = vsi_ctx.vsi_num;
1670 vsi->info = vsi_ctx.info;
1671 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1672 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1674 if (type == ICE_VSI_PF) {
1675 /* MAC configuration */
1676 rte_ether_addr_copy((struct rte_ether_addr *)
1677 hw->port_info->mac.perm_addr,
1680 rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1681 ret = ice_add_mac_filter(vsi, &mac_addr);
1682 if (ret != ICE_SUCCESS)
1683 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1685 rte_ether_addr_copy(&broadcast, &mac_addr);
1686 ret = ice_add_mac_filter(vsi, &mac_addr);
1687 if (ret != ICE_SUCCESS)
1688 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1691 /* At the beginning, only TC0. */
1692 /* What we need here is the maximam number of the TX queues.
1693 * Currently vsi->nb_qps means it.
1694 * Correct it if any change.
1696 max_txqs[0] = vsi->nb_qps;
1697 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1698 tc_bitmap, max_txqs);
1699 if (ret != ICE_SUCCESS)
1700 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1710 ice_send_driver_ver(struct ice_hw *hw)
1712 struct ice_driver_ver dv;
1714 /* we don't have driver version use 0 for dummy */
1718 dv.subbuild_ver = 0;
1719 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1721 return ice_aq_send_driver_ver(hw, &dv, NULL);
1725 ice_pf_setup(struct ice_pf *pf)
1727 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1728 struct ice_vsi *vsi;
1731 /* Clear all stats counters */
1732 pf->offset_loaded = false;
1733 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1734 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1735 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1736 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1738 /* force guaranteed filter pool for PF */
1739 ice_alloc_fd_guar_item(hw, &unused,
1740 hw->func_caps.fd_fltr_guar);
1741 /* force shared filter pool for PF */
1742 ice_alloc_fd_shrd_item(hw, &unused,
1743 hw->func_caps.fd_fltr_best_effort);
1745 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1747 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1756 /* PCIe configuration space setting */
1757 #define PCI_CFG_SPACE_SIZE 256
1758 #define PCI_CFG_SPACE_EXP_SIZE 4096
1759 #define PCI_EXT_CAP_ID(header) (int)((header) & 0x0000ffff)
1760 #define PCI_EXT_CAP_NEXT(header) (((header) >> 20) & 0xffc)
1761 #define PCI_EXT_CAP_ID_DSN 0x03
1764 ice_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
1768 int pos = PCI_CFG_SPACE_SIZE;
1770 /* minimum 8 bytes per capability */
1771 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1773 if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1774 PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1779 * If we have no capabilities, this is indicated by cap ID,
1780 * cap version and next pointer all being 0.
1786 if (PCI_EXT_CAP_ID(header) == cap)
1789 pos = PCI_EXT_CAP_NEXT(header);
1791 if (pos < PCI_CFG_SPACE_SIZE)
1794 if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1795 PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1804 * Extract device serial number from PCIe Configuration Space and
1805 * determine the pkg file path according to the DSN.
1808 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
1811 char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1812 uint32_t dsn_low, dsn_high;
1813 memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1815 pos = ice_pci_find_next_ext_capability(pci_dev, PCI_EXT_CAP_ID_DSN);
1818 rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4);
1819 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8);
1820 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1821 "ice-%08x%08x.pkg", dsn_high, dsn_low);
1823 PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
1827 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1828 ICE_MAX_PKG_FILENAME_SIZE);
1829 if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1832 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1833 ICE_MAX_PKG_FILENAME_SIZE);
1834 if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1838 strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1839 if (!access(pkg_file, 0))
1841 strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1846 ice_load_pkg_type(struct ice_hw *hw)
1848 enum ice_pkg_type package_type;
1850 /* store the activated package type (OS default or Comms) */
1851 if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1853 package_type = ICE_PKG_TYPE_OS_DEFAULT;
1854 else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1856 package_type = ICE_PKG_TYPE_COMMS;
1858 package_type = ICE_PKG_TYPE_UNKNOWN;
1860 PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s",
1861 hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1862 hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1863 hw->active_pkg_name);
1865 return package_type;
1868 static int ice_load_pkg(struct rte_eth_dev *dev)
1870 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1871 char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1877 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1878 struct ice_adapter *ad =
1879 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1881 ice_pkg_file_search_path(pci_dev, pkg_file);
1883 file = fopen(pkg_file, "rb");
1885 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1889 err = stat(pkg_file, &fstat);
1891 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1896 buf_len = fstat.st_size;
1897 buf = rte_malloc(NULL, buf_len, 0);
1900 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1906 err = fread(buf, buf_len, 1, file);
1908 PMD_INIT_LOG(ERR, "failed to read package data\n");
1916 err = ice_copy_and_init_pkg(hw, buf, buf_len);
1918 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1922 /* store the loaded pkg type info */
1923 ad->active_pkg_type = ice_load_pkg_type(hw);
1925 err = ice_init_hw_tbls(hw);
1927 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1928 goto fail_init_tbls;
1934 rte_free(hw->pkg_copy);
1941 ice_base_queue_get(struct ice_pf *pf)
1944 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1946 reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1947 if (reg & PFLAN_RX_QALLOC_VALID_M) {
1948 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1950 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1956 parse_bool(const char *key, const char *value, void *args)
1958 int *i = (int *)args;
1962 num = strtoul(value, &end, 10);
1964 if (num != 0 && num != 1) {
1965 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1966 "value must be 0 or 1",
1975 static int ice_parse_devargs(struct rte_eth_dev *dev)
1977 struct ice_adapter *ad =
1978 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1979 struct rte_devargs *devargs = dev->device->devargs;
1980 struct rte_kvargs *kvlist;
1983 if (devargs == NULL)
1986 kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1987 if (kvlist == NULL) {
1988 PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1992 ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1993 memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1994 sizeof(ad->devargs.proto_xtr));
1996 ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1997 &handle_proto_xtr_arg, &ad->devargs);
2001 ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
2002 &parse_bool, &ad->devargs.safe_mode_support);
2006 ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
2007 &parse_bool, &ad->devargs.pipe_mode_support);
2012 rte_kvargs_free(kvlist);
2016 /* Forward LLDP packets to default VSI by set switch rules */
2018 ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on)
2020 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2021 struct ice_fltr_list_entry *s_list_itr = NULL;
2022 struct LIST_HEAD_TYPE list_head;
2025 INIT_LIST_HEAD(&list_head);
2027 s_list_itr = (struct ice_fltr_list_entry *)
2028 ice_malloc(hw, sizeof(*s_list_itr));
2031 s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2032 s_list_itr->fltr_info.vsi_handle = vsi->idx;
2033 s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
2034 RTE_ETHER_TYPE_LLDP;
2035 s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2036 s_list_itr->fltr_info.flag = ICE_FLTR_RX;
2037 s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
2038 LIST_ADD(&s_list_itr->list_entry, &list_head);
2040 ret = ice_add_eth_mac(hw, &list_head);
2042 ret = ice_remove_eth_mac(hw, &list_head);
2044 rte_free(s_list_itr);
2048 static enum ice_status
2049 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
2050 uint16_t num, uint16_t desc_id,
2051 uint16_t *prof_buf, uint16_t *num_prof)
2053 struct ice_aqc_res_elem *resp_buf;
2056 bool res_shared = 1;
2057 struct ice_aq_desc aq_desc;
2058 struct ice_sq_cd *cd = NULL;
2059 struct ice_aqc_get_allocd_res_desc *cmd =
2060 &aq_desc.params.get_res_desc;
2062 buf_len = sizeof(*resp_buf) * num;
2063 resp_buf = ice_malloc(hw, buf_len);
2067 ice_fill_dflt_direct_cmd_desc(&aq_desc,
2068 ice_aqc_opc_get_allocd_res_desc);
2070 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2071 ICE_AQC_RES_TYPE_M) | (res_shared ?
2072 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2073 cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
2075 ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
2077 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
2081 ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
2082 (*num_prof), ICE_NONDMA_TO_NONDMA);
2089 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
2093 uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
2094 uint16_t first_desc = 1;
2095 uint16_t num_prof = 0;
2097 ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
2098 first_desc, prof_buf, &num_prof);
2100 PMD_INIT_LOG(ERR, "Failed to get fxp resource");
2104 for (prof_id = 0; prof_id < num_prof; prof_id++) {
2105 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
2107 PMD_INIT_LOG(ERR, "Failed to free fxp resource");
2115 ice_reset_fxp_resource(struct ice_hw *hw)
2119 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
2121 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
2125 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
2127 PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
2135 ice_rss_ctx_init(struct ice_pf *pf)
2137 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
2138 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
2140 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2141 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2143 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2144 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2148 ice_get_supported_rxdid(struct ice_hw *hw)
2150 uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
2154 supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
2156 for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2157 regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
2158 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2159 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2160 supported_rxdid |= BIT(i);
2162 return supported_rxdid;
2166 ice_dev_init(struct rte_eth_dev *dev)
2168 struct rte_pci_device *pci_dev;
2169 struct rte_intr_handle *intr_handle;
2170 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2171 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2172 struct ice_adapter *ad =
2173 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2174 struct ice_vsi *vsi;
2177 dev->dev_ops = &ice_eth_dev_ops;
2178 dev->rx_queue_count = ice_rx_queue_count;
2179 dev->rx_descriptor_status = ice_rx_descriptor_status;
2180 dev->tx_descriptor_status = ice_tx_descriptor_status;
2181 dev->rx_pkt_burst = ice_recv_pkts;
2182 dev->tx_pkt_burst = ice_xmit_pkts;
2183 dev->tx_pkt_prepare = ice_prep_pkts;
2185 /* for secondary processes, we don't initialise any further as primary
2186 * has already done this work.
2188 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2189 ice_set_rx_function(dev);
2190 ice_set_tx_function(dev);
2194 ice_set_default_ptype_table(dev);
2195 pci_dev = RTE_DEV_TO_PCI(dev->device);
2196 intr_handle = &pci_dev->intr_handle;
2198 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2199 pf->adapter->eth_dev = dev;
2200 pf->dev_data = dev->data;
2201 hw->back = pf->adapter;
2202 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2203 hw->vendor_id = pci_dev->id.vendor_id;
2204 hw->device_id = pci_dev->id.device_id;
2205 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2206 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2207 hw->bus.device = pci_dev->addr.devid;
2208 hw->bus.func = pci_dev->addr.function;
2210 ret = ice_parse_devargs(dev);
2212 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2216 ice_init_controlq_parameter(hw);
2218 ret = ice_init_hw(hw);
2220 PMD_INIT_LOG(ERR, "Failed to initialize HW");
2224 ret = ice_load_pkg(dev);
2226 if (ad->devargs.safe_mode_support == 0) {
2227 PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2228 "Use safe-mode-support=1 to enter Safe Mode");
2232 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2233 "Entering Safe Mode");
2234 ad->is_safe_mode = 1;
2237 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2238 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2239 hw->api_maj_ver, hw->api_min_ver);
2241 ice_pf_sw_init(dev);
2242 ret = ice_init_mac_address(dev);
2244 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2248 /* Pass the information to the rte_eth_dev_close() that it should also
2249 * release the private port resources.
2251 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2253 ret = ice_res_pool_init(&pf->msix_pool, 1,
2254 hw->func_caps.common_cap.num_msix_vectors - 1);
2256 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2257 goto err_msix_pool_init;
2260 ret = ice_pf_setup(pf);
2262 PMD_INIT_LOG(ERR, "Failed to setup PF");
2266 ret = ice_send_driver_ver(hw);
2268 PMD_INIT_LOG(ERR, "Failed to send driver version");
2274 /* Disable double vlan by default */
2275 ice_vsi_config_double_vlan(vsi, false);
2277 ret = ice_aq_stop_lldp(hw, true, false, NULL);
2278 if (ret != ICE_SUCCESS)
2279 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2280 ret = ice_init_dcb(hw, true);
2281 if (ret != ICE_SUCCESS)
2282 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2283 /* Forward LLDP packets to default VSI */
2284 ret = ice_vsi_config_sw_lldp(vsi, true);
2285 if (ret != ICE_SUCCESS)
2286 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2287 /* register callback func to eal lib */
2288 rte_intr_callback_register(intr_handle,
2289 ice_interrupt_handler, dev);
2291 ice_pf_enable_irq0(hw);
2293 /* enable uio intr after callback register */
2294 rte_intr_enable(intr_handle);
2296 /* get base queue pairs index in the device */
2297 ice_base_queue_get(pf);
2299 /* Initialize RSS context for gtpu_eh */
2300 ice_rss_ctx_init(pf);
2302 if (!ad->is_safe_mode) {
2303 ret = ice_flow_init(ad);
2305 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2310 ret = ice_reset_fxp_resource(hw);
2312 PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2316 pf->supported_rxdid = ice_get_supported_rxdid(hw);
2321 ice_res_pool_destroy(&pf->msix_pool);
2323 rte_free(dev->data->mac_addrs);
2324 dev->data->mac_addrs = NULL;
2326 ice_sched_cleanup_all(hw);
2327 rte_free(hw->port_info);
2328 ice_shutdown_all_ctrlq(hw);
2329 rte_free(pf->proto_xtr);
2335 ice_release_vsi(struct ice_vsi *vsi)
2338 struct ice_vsi_ctx vsi_ctx;
2339 enum ice_status ret;
2345 hw = ICE_VSI_TO_HW(vsi);
2347 ice_remove_all_mac_vlan_filters(vsi);
2349 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2351 vsi_ctx.vsi_num = vsi->vsi_id;
2352 vsi_ctx.info = vsi->info;
2353 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2354 if (ret != ICE_SUCCESS) {
2355 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2359 rte_free(vsi->rss_lut);
2360 rte_free(vsi->rss_key);
2366 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2368 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2369 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2370 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2371 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2372 uint16_t msix_intr, i;
2374 /* disable interrupt and also clear all the exist config */
2375 for (i = 0; i < vsi->nb_qps; i++) {
2376 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2377 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2381 if (rte_intr_allow_others(intr_handle))
2383 for (i = 0; i < vsi->nb_msix; i++) {
2384 msix_intr = vsi->msix_intr + i;
2385 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2386 GLINT_DYN_CTL_WB_ON_ITR_M);
2390 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2394 ice_dev_stop(struct rte_eth_dev *dev)
2396 struct rte_eth_dev_data *data = dev->data;
2397 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2398 struct ice_vsi *main_vsi = pf->main_vsi;
2399 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2400 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2403 /* avoid stopping again */
2404 if (pf->adapter_stopped)
2407 /* stop and clear all Rx queues */
2408 for (i = 0; i < data->nb_rx_queues; i++)
2409 ice_rx_queue_stop(dev, i);
2411 /* stop and clear all Tx queues */
2412 for (i = 0; i < data->nb_tx_queues; i++)
2413 ice_tx_queue_stop(dev, i);
2415 /* disable all queue interrupts */
2416 ice_vsi_disable_queues_intr(main_vsi);
2418 if (pf->init_link_up)
2419 ice_dev_set_link_up(dev);
2421 ice_dev_set_link_down(dev);
2423 /* Clean datapath event and queue/vec mapping */
2424 rte_intr_efd_disable(intr_handle);
2425 if (intr_handle->intr_vec) {
2426 rte_free(intr_handle->intr_vec);
2427 intr_handle->intr_vec = NULL;
2430 pf->adapter_stopped = true;
2434 ice_dev_close(struct rte_eth_dev *dev)
2436 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2437 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2438 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2439 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2440 struct ice_adapter *ad =
2441 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2443 /* Since stop will make link down, then the link event will be
2444 * triggered, disable the irq firstly to avoid the port_infoe etc
2445 * resources deallocation causing the interrupt service thread
2448 ice_pf_disable_irq0(hw);
2452 if (!ad->is_safe_mode)
2453 ice_flow_uninit(ad);
2455 /* release all queue resource */
2456 ice_free_queues(dev);
2458 ice_res_pool_destroy(&pf->msix_pool);
2459 ice_release_vsi(pf->main_vsi);
2460 ice_sched_cleanup_all(hw);
2461 ice_free_hw_tbls(hw);
2462 rte_free(hw->port_info);
2463 hw->port_info = NULL;
2464 ice_shutdown_all_ctrlq(hw);
2465 rte_free(pf->proto_xtr);
2466 pf->proto_xtr = NULL;
2468 dev->dev_ops = NULL;
2469 dev->rx_pkt_burst = NULL;
2470 dev->tx_pkt_burst = NULL;
2472 rte_free(dev->data->mac_addrs);
2473 dev->data->mac_addrs = NULL;
2475 /* disable uio intr before callback unregister */
2476 rte_intr_disable(intr_handle);
2478 /* unregister callback func from eal lib */
2479 rte_intr_callback_unregister(intr_handle,
2480 ice_interrupt_handler, dev);
2484 ice_dev_uninit(struct rte_eth_dev *dev)
2492 ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
2494 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2495 struct ice_vsi *vsi = pf->main_vsi;
2497 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
2498 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2499 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2500 pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr;
2501 pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld;
2502 pf->gtpu_hash_ctx.ipv4_udp.symm = symm;
2503 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2504 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2505 pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr;
2506 pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld;
2507 pf->gtpu_hash_ctx.ipv6_udp.symm = symm;
2508 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2509 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2510 pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr;
2511 pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld;
2512 pf->gtpu_hash_ctx.ipv4_tcp.symm = symm;
2513 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2514 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2515 pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr;
2516 pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld;
2517 pf->gtpu_hash_ctx.ipv6_tcp.symm = symm;
2518 } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
2519 pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr;
2520 pf->gtpu_hash_ctx.ipv4.hash_fld = fld;
2521 pf->gtpu_hash_ctx.ipv4.symm = symm;
2522 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2523 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2524 } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
2525 pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr;
2526 pf->gtpu_hash_ctx.ipv6.hash_fld = fld;
2527 pf->gtpu_hash_ctx.ipv6.symm = symm;
2528 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2529 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2533 if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
2534 ICE_FLOW_SEG_HDR_GTPU_UP)) {
2535 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2536 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2537 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
2538 ice_add_rss_cfg(hw, vsi->idx,
2539 pf->gtpu_hash_ctx.ipv4.hash_fld,
2540 pf->gtpu_hash_ctx.ipv4.pkt_hdr,
2541 pf->gtpu_hash_ctx.ipv4.symm);
2542 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
2544 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2545 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2546 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
2547 ice_add_rss_cfg(hw, vsi->idx,
2548 pf->gtpu_hash_ctx.ipv6.hash_fld,
2549 pf->gtpu_hash_ctx.ipv6.pkt_hdr,
2550 pf->gtpu_hash_ctx.ipv6.symm);
2551 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
2553 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2554 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2555 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
2556 ice_add_rss_cfg(hw, vsi->idx,
2557 pf->gtpu_hash_ctx.ipv4.hash_fld,
2558 pf->gtpu_hash_ctx.ipv4.pkt_hdr,
2559 pf->gtpu_hash_ctx.ipv4.symm);
2560 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
2562 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2563 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2564 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
2565 ice_add_rss_cfg(hw, vsi->idx,
2566 pf->gtpu_hash_ctx.ipv6.hash_fld,
2567 pf->gtpu_hash_ctx.ipv6.pkt_hdr,
2568 pf->gtpu_hash_ctx.ipv6.symm);
2569 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
2578 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2580 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2581 struct ice_vsi *vsi = pf->main_vsi;
2583 if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
2584 ICE_FLOW_SEG_HDR_GTPU_UP)) {
2585 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2586 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2587 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
2588 ice_rem_rss_cfg(hw, vsi->idx,
2589 pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
2590 pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
2591 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2594 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
2595 ice_rem_rss_cfg(hw, vsi->idx,
2596 pf->gtpu_hash_ctx.ipv4.hash_fld,
2597 pf->gtpu_hash_ctx.ipv4.pkt_hdr);
2598 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
2600 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2601 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2602 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
2603 ice_rem_rss_cfg(hw, vsi->idx,
2604 pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
2605 pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
2606 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2609 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
2610 ice_rem_rss_cfg(hw, vsi->idx,
2611 pf->gtpu_hash_ctx.ipv6.hash_fld,
2612 pf->gtpu_hash_ctx.ipv6.pkt_hdr);
2613 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
2615 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2616 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2617 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
2618 ice_rem_rss_cfg(hw, vsi->idx,
2619 pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
2620 pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
2621 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2624 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
2625 ice_rem_rss_cfg(hw, vsi->idx,
2626 pf->gtpu_hash_ctx.ipv4.hash_fld,
2627 pf->gtpu_hash_ctx.ipv4.pkt_hdr);
2628 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
2630 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2631 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2632 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
2633 ice_rem_rss_cfg(hw, vsi->idx,
2634 pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
2635 pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
2636 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2639 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
2640 ice_rem_rss_cfg(hw, vsi->idx,
2641 pf->gtpu_hash_ctx.ipv6.hash_fld,
2642 pf->gtpu_hash_ctx.ipv6.pkt_hdr);
2643 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
2645 } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
2646 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
2647 ice_rem_rss_cfg(hw, vsi->idx,
2648 pf->gtpu_hash_ctx.ipv4.hash_fld,
2649 pf->gtpu_hash_ctx.ipv4.pkt_hdr);
2650 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
2653 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
2654 ice_rem_rss_cfg(hw, vsi->idx,
2655 pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
2656 pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
2657 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2660 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
2661 ice_rem_rss_cfg(hw, vsi->idx,
2662 pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
2663 pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
2664 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2666 } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
2667 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
2668 ice_rem_rss_cfg(hw, vsi->idx,
2669 pf->gtpu_hash_ctx.ipv6.hash_fld,
2670 pf->gtpu_hash_ctx.ipv6.pkt_hdr);
2671 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
2674 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
2675 ice_rem_rss_cfg(hw, vsi->idx,
2676 pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
2677 pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
2678 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2681 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
2682 ice_rem_rss_cfg(hw, vsi->idx,
2683 pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
2684 pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
2685 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2694 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2696 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
2697 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2698 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2699 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2700 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2701 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2702 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2703 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2704 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2705 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2706 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2707 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2708 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2709 } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
2710 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
2711 } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
2712 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
2720 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2721 uint64_t fld, uint32_t hdr)
2723 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2726 ret = ice_rem_rss_cfg(hw, vsi_id, fld, hdr);
2727 if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2728 PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2730 ret = ice_rem_rss_cfg_post(pf, hdr);
2732 PMD_DRV_LOG(ERR, "remove rss cfg post failed\n");
2738 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2739 uint64_t fld, uint32_t hdr, bool symm)
2741 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2744 ret = ice_add_rss_cfg_pre(pf, hdr);
2746 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2748 ret = ice_add_rss_cfg(hw, vsi_id, fld, hdr, symm);
2750 PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2752 ret = ice_add_rss_cfg_post(pf, hdr, fld, symm);
2754 PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2760 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2762 struct ice_vsi *vsi = pf->main_vsi;
2765 /* Configure RSS for IPv4 with src/dst addr as input set */
2766 if (rss_hf & ETH_RSS_IPV4) {
2767 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2768 ICE_FLOW_SEG_HDR_IPV4 |
2769 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2771 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
2775 /* Configure RSS for IPv6 with src/dst addr as input set */
2776 if (rss_hf & ETH_RSS_IPV6) {
2777 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2778 ICE_FLOW_SEG_HDR_IPV6 |
2779 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2781 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
2785 /* Configure RSS for udp4 with src/dst addr and port as input set */
2786 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2787 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2788 ICE_FLOW_SEG_HDR_UDP |
2789 ICE_FLOW_SEG_HDR_IPV4 |
2790 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2792 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
2796 /* Configure RSS for udp6 with src/dst addr and port as input set */
2797 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2798 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2799 ICE_FLOW_SEG_HDR_UDP |
2800 ICE_FLOW_SEG_HDR_IPV6 |
2801 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2803 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
2807 /* Configure RSS for tcp4 with src/dst addr and port as input set */
2808 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2809 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2810 ICE_FLOW_SEG_HDR_TCP |
2811 ICE_FLOW_SEG_HDR_IPV4 |
2812 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2814 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
2818 /* Configure RSS for tcp6 with src/dst addr and port as input set */
2819 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2820 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
2821 ICE_FLOW_SEG_HDR_TCP |
2822 ICE_FLOW_SEG_HDR_IPV6 |
2823 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2825 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
2829 /* Configure RSS for sctp4 with src/dst addr and port as input set */
2830 if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2831 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2832 ICE_FLOW_SEG_HDR_SCTP |
2833 ICE_FLOW_SEG_HDR_IPV4 |
2834 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2836 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2840 /* Configure RSS for sctp6 with src/dst addr and port as input set */
2841 if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2842 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2843 ICE_FLOW_SEG_HDR_SCTP |
2844 ICE_FLOW_SEG_HDR_IPV6 |
2845 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2847 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2851 if (rss_hf & ETH_RSS_IPV4) {
2852 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2853 ICE_FLOW_SEG_HDR_GTPU_IP |
2854 ICE_FLOW_SEG_HDR_IPV4 |
2855 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2857 PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d",
2860 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2861 ICE_FLOW_SEG_HDR_GTPU_EH |
2862 ICE_FLOW_SEG_HDR_IPV4 |
2863 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2865 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
2868 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2869 ICE_FLOW_SEG_HDR_PPPOE |
2870 ICE_FLOW_SEG_HDR_IPV4 |
2871 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2873 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
2877 if (rss_hf & ETH_RSS_IPV6) {
2878 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2879 ICE_FLOW_SEG_HDR_GTPU_IP |
2880 ICE_FLOW_SEG_HDR_IPV6 |
2881 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2883 PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d",
2886 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2887 ICE_FLOW_SEG_HDR_GTPU_EH |
2888 ICE_FLOW_SEG_HDR_IPV6 |
2889 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2891 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
2894 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2895 ICE_FLOW_SEG_HDR_PPPOE |
2896 ICE_FLOW_SEG_HDR_IPV6 |
2897 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2899 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
2903 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2904 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2905 ICE_FLOW_SEG_HDR_GTPU_IP |
2906 ICE_FLOW_SEG_HDR_UDP |
2907 ICE_FLOW_SEG_HDR_IPV4 |
2908 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2910 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d",
2913 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2914 ICE_FLOW_SEG_HDR_GTPU_EH |
2915 ICE_FLOW_SEG_HDR_UDP |
2916 ICE_FLOW_SEG_HDR_IPV4 |
2917 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2919 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
2922 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2923 ICE_FLOW_SEG_HDR_PPPOE |
2924 ICE_FLOW_SEG_HDR_UDP |
2925 ICE_FLOW_SEG_HDR_IPV4 |
2926 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2928 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
2932 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2933 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2934 ICE_FLOW_SEG_HDR_GTPU_IP |
2935 ICE_FLOW_SEG_HDR_UDP |
2936 ICE_FLOW_SEG_HDR_IPV6 |
2937 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2939 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d",
2942 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2943 ICE_FLOW_SEG_HDR_GTPU_EH |
2944 ICE_FLOW_SEG_HDR_UDP |
2945 ICE_FLOW_SEG_HDR_IPV6 |
2946 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2948 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
2951 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2952 ICE_FLOW_SEG_HDR_PPPOE |
2953 ICE_FLOW_SEG_HDR_UDP |
2954 ICE_FLOW_SEG_HDR_IPV6 |
2955 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2957 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
2961 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2962 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2963 ICE_FLOW_SEG_HDR_GTPU_IP |
2964 ICE_FLOW_SEG_HDR_TCP |
2965 ICE_FLOW_SEG_HDR_IPV4 |
2966 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2968 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d",
2971 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2972 ICE_FLOW_SEG_HDR_GTPU_EH |
2973 ICE_FLOW_SEG_HDR_TCP |
2974 ICE_FLOW_SEG_HDR_IPV4 |
2975 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2977 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
2980 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2981 ICE_FLOW_SEG_HDR_PPPOE |
2982 ICE_FLOW_SEG_HDR_TCP |
2983 ICE_FLOW_SEG_HDR_IPV4 |
2984 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2986 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
2990 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2991 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
2992 ICE_FLOW_SEG_HDR_GTPU_IP |
2993 ICE_FLOW_SEG_HDR_TCP |
2994 ICE_FLOW_SEG_HDR_IPV6 |
2995 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2997 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d",
3000 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
3001 ICE_FLOW_SEG_HDR_GTPU_EH |
3002 ICE_FLOW_SEG_HDR_TCP |
3003 ICE_FLOW_SEG_HDR_IPV6 |
3004 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3006 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
3009 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
3010 ICE_FLOW_SEG_HDR_PPPOE |
3011 ICE_FLOW_SEG_HDR_TCP |
3012 ICE_FLOW_SEG_HDR_IPV6 |
3013 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3015 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
3019 if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
3020 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
3021 ICE_FLOW_SEG_HDR_GTPU_IP |
3022 ICE_FLOW_SEG_HDR_SCTP |
3023 ICE_FLOW_SEG_HDR_IPV4 |
3024 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3026 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_SCTP rss flow fail %d",
3029 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
3030 ICE_FLOW_SEG_HDR_GTPU_EH |
3031 ICE_FLOW_SEG_HDR_SCTP |
3032 ICE_FLOW_SEG_HDR_IPV4 |
3033 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3035 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_SCTP rss flow fail %d",
3039 if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
3040 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
3041 ICE_FLOW_SEG_HDR_GTPU_IP |
3042 ICE_FLOW_SEG_HDR_SCTP |
3043 ICE_FLOW_SEG_HDR_IPV6 |
3044 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3046 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_SCTP rss flow fail %d",
3049 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
3050 ICE_FLOW_SEG_HDR_GTPU_EH |
3051 ICE_FLOW_SEG_HDR_SCTP |
3052 ICE_FLOW_SEG_HDR_IPV6 |
3053 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3055 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_SCTP rss flow fail %d",
3060 static int ice_init_rss(struct ice_pf *pf)
3062 struct ice_hw *hw = ICE_PF_TO_HW(pf);
3063 struct ice_vsi *vsi = pf->main_vsi;
3064 struct rte_eth_dev *dev = pf->adapter->eth_dev;
3065 struct rte_eth_rss_conf *rss_conf;
3066 struct ice_aqc_get_set_rss_keys key;
3069 bool is_safe_mode = pf->adapter->is_safe_mode;
3072 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
3073 nb_q = dev->data->nb_rx_queues;
3074 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3075 vsi->rss_lut_size = pf->hash_lut_size;
3078 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3082 if (!vsi->rss_key) {
3083 vsi->rss_key = rte_zmalloc(NULL,
3084 vsi->rss_key_size, 0);
3085 if (vsi->rss_key == NULL) {
3086 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3090 if (!vsi->rss_lut) {
3091 vsi->rss_lut = rte_zmalloc(NULL,
3092 vsi->rss_lut_size, 0);
3093 if (vsi->rss_lut == NULL) {
3094 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3095 rte_free(vsi->rss_key);
3096 vsi->rss_key = NULL;
3100 /* configure RSS key */
3101 if (!rss_conf->rss_key) {
3102 /* Calculate the default hash key */
3103 for (i = 0; i <= vsi->rss_key_size; i++)
3104 vsi->rss_key[i] = (uint8_t)rte_rand();
3106 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3107 RTE_MIN(rss_conf->rss_key_len,
3108 vsi->rss_key_size));
3110 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3111 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3115 /* init RSS LUT table */
3116 for (i = 0; i < vsi->rss_lut_size; i++)
3117 vsi->rss_lut[i] = i % nb_q;
3119 ret = ice_aq_set_rss_lut(hw, vsi->idx,
3120 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
3121 vsi->rss_lut, vsi->rss_lut_size);
3125 /* Enable registers for symmetric_toeplitz function. */
3126 reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3127 reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3128 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3129 ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3131 /* RSS hash configuration */
3132 ice_rss_hash_set(pf, rss_conf->rss_hf);
3136 rte_free(vsi->rss_key);
3137 vsi->rss_key = NULL;
3138 rte_free(vsi->rss_lut);
3139 vsi->rss_lut = NULL;
3144 ice_dev_configure(struct rte_eth_dev *dev)
3146 struct ice_adapter *ad =
3147 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3148 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3151 /* Initialize to TRUE. If any of Rx queues doesn't meet the
3152 * bulk allocation or vector Rx preconditions we will reset it.
3154 ad->rx_bulk_alloc_allowed = true;
3155 ad->tx_simple_allowed = true;
3157 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
3158 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
3160 ret = ice_init_rss(pf);
3162 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3170 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3171 int base_queue, int nb_queue)
3173 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3174 uint32_t val, val_tx;
3177 for (i = 0; i < nb_queue; i++) {
3179 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3180 (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3181 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3182 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3184 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3185 base_queue + i, msix_vect);
3186 /* set ITR0 value */
3187 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
3188 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3189 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3194 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3196 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3197 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3198 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3199 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3200 uint16_t msix_vect = vsi->msix_intr;
3201 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
3202 uint16_t queue_idx = 0;
3206 /* clear Rx/Tx queue interrupt */
3207 for (i = 0; i < vsi->nb_used_qps; i++) {
3208 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3209 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3212 /* PF bind interrupt */
3213 if (rte_intr_dp_is_en(intr_handle)) {
3218 for (i = 0; i < vsi->nb_used_qps; i++) {
3220 if (!rte_intr_allow_others(intr_handle))
3221 msix_vect = ICE_MISC_VEC_ID;
3223 /* uio mapping all queue to one msix_vect */
3224 __vsi_queues_bind_intr(vsi, msix_vect,
3225 vsi->base_queue + i,
3226 vsi->nb_used_qps - i);
3228 for (; !!record && i < vsi->nb_used_qps; i++)
3229 intr_handle->intr_vec[queue_idx + i] =
3234 /* vfio 1:1 queue/msix_vect mapping */
3235 __vsi_queues_bind_intr(vsi, msix_vect,
3236 vsi->base_queue + i, 1);
3239 intr_handle->intr_vec[queue_idx + i] = msix_vect;
3247 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3249 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3250 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3251 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3252 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3253 uint16_t msix_intr, i;
3255 if (rte_intr_allow_others(intr_handle))
3256 for (i = 0; i < vsi->nb_used_qps; i++) {
3257 msix_intr = vsi->msix_intr + i;
3258 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3259 GLINT_DYN_CTL_INTENA_M |
3260 GLINT_DYN_CTL_CLEARPBA_M |
3261 GLINT_DYN_CTL_ITR_INDX_M |
3262 GLINT_DYN_CTL_WB_ON_ITR_M);
3265 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3266 GLINT_DYN_CTL_INTENA_M |
3267 GLINT_DYN_CTL_CLEARPBA_M |
3268 GLINT_DYN_CTL_ITR_INDX_M |
3269 GLINT_DYN_CTL_WB_ON_ITR_M);
3273 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3275 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3276 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3277 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3278 struct ice_vsi *vsi = pf->main_vsi;
3279 uint32_t intr_vector = 0;
3281 rte_intr_disable(intr_handle);
3283 /* check and configure queue intr-vector mapping */
3284 if ((rte_intr_cap_multiple(intr_handle) ||
3285 !RTE_ETH_DEV_SRIOV(dev).active) &&
3286 dev->data->dev_conf.intr_conf.rxq != 0) {
3287 intr_vector = dev->data->nb_rx_queues;
3288 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3289 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3290 ICE_MAX_INTR_QUEUE_NUM);
3293 if (rte_intr_efd_enable(intr_handle, intr_vector))
3297 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3298 intr_handle->intr_vec =
3299 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
3301 if (!intr_handle->intr_vec) {
3303 "Failed to allocate %d rx_queues intr_vec",
3304 dev->data->nb_rx_queues);
3309 /* Map queues with MSIX interrupt */
3310 vsi->nb_used_qps = dev->data->nb_rx_queues;
3311 ice_vsi_queues_bind_intr(vsi);
3313 /* Enable interrupts for all the queues */
3314 ice_vsi_enable_queues_intr(vsi);
3316 rte_intr_enable(intr_handle);
3322 ice_get_init_link_status(struct rte_eth_dev *dev)
3324 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3325 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3326 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3327 struct ice_link_status link_status;
3330 ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3331 &link_status, NULL);
3332 if (ret != ICE_SUCCESS) {
3333 PMD_DRV_LOG(ERR, "Failed to get link info");
3334 pf->init_link_up = false;
3338 if (link_status.link_info & ICE_AQ_LINK_UP)
3339 pf->init_link_up = true;
3343 ice_dev_start(struct rte_eth_dev *dev)
3345 struct rte_eth_dev_data *data = dev->data;
3346 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3347 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3348 struct ice_vsi *vsi = pf->main_vsi;
3349 uint16_t nb_rxq = 0;
3351 uint16_t max_frame_size;
3354 /* program Tx queues' context in hardware */
3355 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3356 ret = ice_tx_queue_start(dev, nb_txq);
3358 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3363 /* program Rx queues' context in hardware*/
3364 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3365 ret = ice_rx_queue_start(dev, nb_rxq);
3367 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3372 ice_set_rx_function(dev);
3373 ice_set_tx_function(dev);
3375 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
3376 ETH_VLAN_EXTEND_MASK;
3377 ret = ice_vlan_offload_set(dev, mask);
3379 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3383 /* enable Rx interrput and mapping Rx queue to interrupt vector */
3384 if (ice_rxq_intr_setup(dev))
3387 /* Enable receiving broadcast packets and transmitting packets */
3388 ret = ice_set_vsi_promisc(hw, vsi->idx,
3389 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3390 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3392 if (ret != ICE_SUCCESS)
3393 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3395 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3396 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3397 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3398 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3399 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3400 ICE_AQ_LINK_EVENT_AN_COMPLETED |
3401 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3403 if (ret != ICE_SUCCESS)
3404 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3406 ice_get_init_link_status(dev);
3408 ice_dev_set_link_up(dev);
3410 /* Call get_link_info aq commond to enable/disable LSE */
3411 ice_link_update(dev, 0);
3413 pf->adapter_stopped = false;
3415 /* Set the max frame size to default value*/
3416 max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3417 pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3420 /* Set the max frame size to HW*/
3421 ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3425 /* stop the started queues if failed to start all queues */
3427 for (i = 0; i < nb_rxq; i++)
3428 ice_rx_queue_stop(dev, i);
3430 for (i = 0; i < nb_txq; i++)
3431 ice_tx_queue_stop(dev, i);
3437 ice_dev_reset(struct rte_eth_dev *dev)
3441 if (dev->data->sriov.active)
3444 ret = ice_dev_uninit(dev);
3446 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3450 ret = ice_dev_init(dev);
3452 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3460 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3462 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3463 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3464 struct ice_vsi *vsi = pf->main_vsi;
3465 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3466 bool is_safe_mode = pf->adapter->is_safe_mode;
3470 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3471 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3472 dev_info->max_rx_queues = vsi->nb_qps;
3473 dev_info->max_tx_queues = vsi->nb_qps;
3474 dev_info->max_mac_addrs = vsi->max_macaddrs;
3475 dev_info->max_vfs = pci_dev->max_vfs;
3476 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3477 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3479 dev_info->rx_offload_capa =
3480 DEV_RX_OFFLOAD_VLAN_STRIP |
3481 DEV_RX_OFFLOAD_JUMBO_FRAME |
3482 DEV_RX_OFFLOAD_KEEP_CRC |
3483 DEV_RX_OFFLOAD_SCATTER |
3484 DEV_RX_OFFLOAD_VLAN_FILTER;
3485 dev_info->tx_offload_capa =
3486 DEV_TX_OFFLOAD_VLAN_INSERT |
3487 DEV_TX_OFFLOAD_TCP_TSO |
3488 DEV_TX_OFFLOAD_MULTI_SEGS |
3489 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3490 dev_info->flow_type_rss_offloads = 0;
3492 if (!is_safe_mode) {
3493 dev_info->rx_offload_capa |=
3494 DEV_RX_OFFLOAD_IPV4_CKSUM |
3495 DEV_RX_OFFLOAD_UDP_CKSUM |
3496 DEV_RX_OFFLOAD_TCP_CKSUM |
3497 DEV_RX_OFFLOAD_QINQ_STRIP |
3498 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3499 DEV_RX_OFFLOAD_VLAN_EXTEND |
3500 DEV_RX_OFFLOAD_RSS_HASH;
3501 dev_info->tx_offload_capa |=
3502 DEV_TX_OFFLOAD_QINQ_INSERT |
3503 DEV_TX_OFFLOAD_IPV4_CKSUM |
3504 DEV_TX_OFFLOAD_UDP_CKSUM |
3505 DEV_TX_OFFLOAD_TCP_CKSUM |
3506 DEV_TX_OFFLOAD_SCTP_CKSUM |
3507 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3508 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3509 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3512 dev_info->rx_queue_offload_capa = 0;
3513 dev_info->tx_queue_offload_capa = 0;
3515 dev_info->reta_size = pf->hash_lut_size;
3516 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3518 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3520 .pthresh = ICE_DEFAULT_RX_PTHRESH,
3521 .hthresh = ICE_DEFAULT_RX_HTHRESH,
3522 .wthresh = ICE_DEFAULT_RX_WTHRESH,
3524 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3529 dev_info->default_txconf = (struct rte_eth_txconf) {
3531 .pthresh = ICE_DEFAULT_TX_PTHRESH,
3532 .hthresh = ICE_DEFAULT_TX_HTHRESH,
3533 .wthresh = ICE_DEFAULT_TX_WTHRESH,
3535 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3536 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3540 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3541 .nb_max = ICE_MAX_RING_DESC,
3542 .nb_min = ICE_MIN_RING_DESC,
3543 .nb_align = ICE_ALIGN_RING_DESC,
3546 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3547 .nb_max = ICE_MAX_RING_DESC,
3548 .nb_min = ICE_MIN_RING_DESC,
3549 .nb_align = ICE_ALIGN_RING_DESC,
3552 dev_info->speed_capa = ETH_LINK_SPEED_10M |
3553 ETH_LINK_SPEED_100M |
3555 ETH_LINK_SPEED_2_5G |
3557 ETH_LINK_SPEED_10G |
3558 ETH_LINK_SPEED_20G |
3561 phy_type_low = hw->port_info->phy.phy_type_low;
3562 phy_type_high = hw->port_info->phy.phy_type_high;
3564 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3565 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3567 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3568 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3569 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3571 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3572 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3574 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3575 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3576 dev_info->default_rxportconf.nb_queues = 1;
3577 dev_info->default_txportconf.nb_queues = 1;
3578 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3579 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3585 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3586 struct rte_eth_link *link)
3588 struct rte_eth_link *dst = link;
3589 struct rte_eth_link *src = &dev->data->dev_link;
3591 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3592 *(uint64_t *)src) == 0)
3599 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3600 struct rte_eth_link *link)
3602 struct rte_eth_link *dst = &dev->data->dev_link;
3603 struct rte_eth_link *src = link;
3605 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3606 *(uint64_t *)src) == 0)
3613 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3615 #define CHECK_INTERVAL 100 /* 100ms */
3616 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
3617 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3618 struct ice_link_status link_status;
3619 struct rte_eth_link link, old;
3621 unsigned int rep_cnt = MAX_REPEAT_TIME;
3622 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3624 memset(&link, 0, sizeof(link));
3625 memset(&old, 0, sizeof(old));
3626 memset(&link_status, 0, sizeof(link_status));
3627 ice_atomic_read_link_status(dev, &old);
3630 /* Get link status information from hardware */
3631 status = ice_aq_get_link_info(hw->port_info, enable_lse,
3632 &link_status, NULL);
3633 if (status != ICE_SUCCESS) {
3634 link.link_speed = ETH_SPEED_NUM_100M;
3635 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3636 PMD_DRV_LOG(ERR, "Failed to get link info");
3640 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3641 if (!wait_to_complete || link.link_status)
3644 rte_delay_ms(CHECK_INTERVAL);
3645 } while (--rep_cnt);
3647 if (!link.link_status)
3650 /* Full-duplex operation at all supported speeds */
3651 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3653 /* Parse the link status */
3654 switch (link_status.link_speed) {
3655 case ICE_AQ_LINK_SPEED_10MB:
3656 link.link_speed = ETH_SPEED_NUM_10M;
3658 case ICE_AQ_LINK_SPEED_100MB:
3659 link.link_speed = ETH_SPEED_NUM_100M;
3661 case ICE_AQ_LINK_SPEED_1000MB:
3662 link.link_speed = ETH_SPEED_NUM_1G;
3664 case ICE_AQ_LINK_SPEED_2500MB:
3665 link.link_speed = ETH_SPEED_NUM_2_5G;
3667 case ICE_AQ_LINK_SPEED_5GB:
3668 link.link_speed = ETH_SPEED_NUM_5G;
3670 case ICE_AQ_LINK_SPEED_10GB:
3671 link.link_speed = ETH_SPEED_NUM_10G;
3673 case ICE_AQ_LINK_SPEED_20GB:
3674 link.link_speed = ETH_SPEED_NUM_20G;
3676 case ICE_AQ_LINK_SPEED_25GB:
3677 link.link_speed = ETH_SPEED_NUM_25G;
3679 case ICE_AQ_LINK_SPEED_40GB:
3680 link.link_speed = ETH_SPEED_NUM_40G;
3682 case ICE_AQ_LINK_SPEED_50GB:
3683 link.link_speed = ETH_SPEED_NUM_50G;
3685 case ICE_AQ_LINK_SPEED_100GB:
3686 link.link_speed = ETH_SPEED_NUM_100G;
3688 case ICE_AQ_LINK_SPEED_UNKNOWN:
3689 PMD_DRV_LOG(ERR, "Unknown link speed");
3690 link.link_speed = ETH_SPEED_NUM_UNKNOWN;
3693 PMD_DRV_LOG(ERR, "None link speed");
3694 link.link_speed = ETH_SPEED_NUM_NONE;
3698 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3699 ETH_LINK_SPEED_FIXED);
3702 ice_atomic_write_link_status(dev, &link);
3703 if (link.link_status == old.link_status)
3709 /* Force the physical link state by getting the current PHY capabilities from
3710 * hardware and setting the PHY config based on the determined capabilities. If
3711 * link changes, link event will be triggered because both the Enable Automatic
3712 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3714 static enum ice_status
3715 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3717 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3718 struct ice_aqc_get_phy_caps_data *pcaps;
3719 struct ice_port_info *pi;
3720 enum ice_status status;
3722 if (!hw || !hw->port_info)
3723 return ICE_ERR_PARAM;
3727 pcaps = (struct ice_aqc_get_phy_caps_data *)
3728 ice_malloc(hw, sizeof(*pcaps));
3730 return ICE_ERR_NO_MEMORY;
3732 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3737 /* No change in link */
3738 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3739 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3742 cfg.phy_type_low = pcaps->phy_type_low;
3743 cfg.phy_type_high = pcaps->phy_type_high;
3744 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3745 cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3746 cfg.eee_cap = pcaps->eee_cap;
3747 cfg.eeer_value = pcaps->eeer_value;
3748 cfg.link_fec_opt = pcaps->link_fec_options;
3750 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3752 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3754 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3757 ice_free(hw, pcaps);
3762 ice_dev_set_link_up(struct rte_eth_dev *dev)
3764 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3766 return ice_force_phys_link_state(hw, true);
3770 ice_dev_set_link_down(struct rte_eth_dev *dev)
3772 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3774 return ice_force_phys_link_state(hw, false);
3778 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3780 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3781 struct rte_eth_dev_data *dev_data = pf->dev_data;
3782 uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3784 /* check if mtu is within the allowed range */
3785 if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3788 /* mtu setting is forbidden if port is start */
3789 if (dev_data->dev_started) {
3791 "port %d must be stopped before configuration",
3796 if (frame_size > RTE_ETHER_MAX_LEN)
3797 dev_data->dev_conf.rxmode.offloads |=
3798 DEV_RX_OFFLOAD_JUMBO_FRAME;
3800 dev_data->dev_conf.rxmode.offloads &=
3801 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3803 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3808 static int ice_macaddr_set(struct rte_eth_dev *dev,
3809 struct rte_ether_addr *mac_addr)
3811 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3812 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3813 struct ice_vsi *vsi = pf->main_vsi;
3814 struct ice_mac_filter *f;
3818 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3819 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3823 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3824 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3829 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3833 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3834 if (ret != ICE_SUCCESS) {
3835 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3838 ret = ice_add_mac_filter(vsi, mac_addr);
3839 if (ret != ICE_SUCCESS) {
3840 PMD_DRV_LOG(ERR, "Failed to add mac filter");
3843 rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3845 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3846 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3847 if (ret != ICE_SUCCESS)
3848 PMD_DRV_LOG(ERR, "Failed to set manage mac");
3853 /* Add a MAC address, and update filters */
3855 ice_macaddr_add(struct rte_eth_dev *dev,
3856 struct rte_ether_addr *mac_addr,
3857 __rte_unused uint32_t index,
3858 __rte_unused uint32_t pool)
3860 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3861 struct ice_vsi *vsi = pf->main_vsi;
3864 ret = ice_add_mac_filter(vsi, mac_addr);
3865 if (ret != ICE_SUCCESS) {
3866 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3873 /* Remove a MAC address, and update filters */
3875 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3877 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3878 struct ice_vsi *vsi = pf->main_vsi;
3879 struct rte_eth_dev_data *data = dev->data;
3880 struct rte_ether_addr *macaddr;
3883 macaddr = &data->mac_addrs[index];
3884 ret = ice_remove_mac_filter(vsi, macaddr);
3886 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3892 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3894 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3895 struct ice_vsi *vsi = pf->main_vsi;
3898 PMD_INIT_FUNC_TRACE();
3901 ret = ice_add_vlan_filter(vsi, vlan_id);
3903 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3907 ret = ice_remove_vlan_filter(vsi, vlan_id);
3909 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3917 /* Configure vlan filter on or off */
3919 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
3921 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3922 struct ice_vsi_ctx ctxt;
3923 uint8_t sec_flags, sw_flags2;
3926 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3927 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
3928 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3931 vsi->info.sec_flags |= sec_flags;
3932 vsi->info.sw_flags2 |= sw_flags2;
3934 vsi->info.sec_flags &= ~sec_flags;
3935 vsi->info.sw_flags2 &= ~sw_flags2;
3937 vsi->info.sw_id = hw->port_info->sw_id;
3938 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3939 ctxt.info.valid_sections =
3940 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3941 ICE_AQ_VSI_PROP_SECURITY_VALID);
3942 ctxt.vsi_num = vsi->vsi_id;
3944 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3946 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
3947 on ? "enable" : "disable");
3950 vsi->info.valid_sections |=
3951 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3952 ICE_AQ_VSI_PROP_SECURITY_VALID);
3955 /* consist with other drivers, allow untagged packet when vlan filter on */
3957 ret = ice_add_vlan_filter(vsi, 0);
3959 ret = ice_remove_vlan_filter(vsi, 0);
3965 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
3967 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3968 struct ice_vsi_ctx ctxt;
3972 /* Check if it has been already on or off */
3973 if (vsi->info.valid_sections &
3974 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
3976 if ((vsi->info.vlan_flags &
3977 ICE_AQ_VSI_VLAN_EMOD_M) ==
3978 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
3979 return 0; /* already on */
3981 if ((vsi->info.vlan_flags &
3982 ICE_AQ_VSI_VLAN_EMOD_M) ==
3983 ICE_AQ_VSI_VLAN_EMOD_NOTHING)
3984 return 0; /* already off */
3989 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3991 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3992 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
3993 vsi->info.vlan_flags |= vlan_flags;
3994 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3995 ctxt.info.valid_sections =
3996 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3997 ctxt.vsi_num = vsi->vsi_id;
3998 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4000 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
4001 on ? "enable" : "disable");
4005 vsi->info.valid_sections |=
4006 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4012 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4014 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4015 struct ice_vsi *vsi = pf->main_vsi;
4016 struct rte_eth_rxmode *rxmode;
4018 rxmode = &dev->data->dev_conf.rxmode;
4019 if (mask & ETH_VLAN_FILTER_MASK) {
4020 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4021 ice_vsi_config_vlan_filter(vsi, true);
4023 ice_vsi_config_vlan_filter(vsi, false);
4026 if (mask & ETH_VLAN_STRIP_MASK) {
4027 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4028 ice_vsi_config_vlan_stripping(vsi, true);
4030 ice_vsi_config_vlan_stripping(vsi, false);
4033 if (mask & ETH_VLAN_EXTEND_MASK) {
4034 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
4035 ice_vsi_config_double_vlan(vsi, true);
4037 ice_vsi_config_double_vlan(vsi, false);
4044 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4046 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4047 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4053 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4054 ret = ice_aq_get_rss_lut(hw, vsi->idx,
4055 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
4057 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4061 uint64_t *lut_dw = (uint64_t *)lut;
4062 uint16_t i, lut_size_dw = lut_size / 4;
4064 for (i = 0; i < lut_size_dw; i++)
4065 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4072 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4081 pf = ICE_VSI_TO_PF(vsi);
4082 hw = ICE_VSI_TO_HW(vsi);
4084 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4085 ret = ice_aq_set_rss_lut(hw, vsi->idx,
4086 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
4088 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4092 uint64_t *lut_dw = (uint64_t *)lut;
4093 uint16_t i, lut_size_dw = lut_size / 4;
4095 for (i = 0; i < lut_size_dw; i++)
4096 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4105 ice_rss_reta_update(struct rte_eth_dev *dev,
4106 struct rte_eth_rss_reta_entry64 *reta_conf,
4109 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4110 uint16_t i, lut_size = pf->hash_lut_size;
4111 uint16_t idx, shift;
4115 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4116 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4117 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4119 "The size of hash lookup table configured (%d)"
4120 "doesn't match the number hardware can "
4121 "supported (128, 512, 2048)",
4126 /* It MUST use the current LUT size to get the RSS lookup table,
4127 * otherwise if will fail with -100 error code.
4129 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0);
4131 PMD_DRV_LOG(ERR, "No memory can be allocated");
4134 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4138 for (i = 0; i < reta_size; i++) {
4139 idx = i / RTE_RETA_GROUP_SIZE;
4140 shift = i % RTE_RETA_GROUP_SIZE;
4141 if (reta_conf[idx].mask & (1ULL << shift))
4142 lut[i] = reta_conf[idx].reta[shift];
4144 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4145 if (ret == 0 && lut_size != reta_size) {
4147 "The size of hash lookup table is changed from (%d) to (%d)",
4148 lut_size, reta_size);
4149 pf->hash_lut_size = reta_size;
4159 ice_rss_reta_query(struct rte_eth_dev *dev,
4160 struct rte_eth_rss_reta_entry64 *reta_conf,
4163 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4164 uint16_t i, lut_size = pf->hash_lut_size;
4165 uint16_t idx, shift;
4169 if (reta_size != lut_size) {
4171 "The size of hash lookup table configured (%d)"
4172 "doesn't match the number hardware can "
4174 reta_size, lut_size);
4178 lut = rte_zmalloc(NULL, reta_size, 0);
4180 PMD_DRV_LOG(ERR, "No memory can be allocated");
4184 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4188 for (i = 0; i < reta_size; i++) {
4189 idx = i / RTE_RETA_GROUP_SIZE;
4190 shift = i % RTE_RETA_GROUP_SIZE;
4191 if (reta_conf[idx].mask & (1ULL << shift))
4192 reta_conf[idx].reta[shift] = lut[i];
4202 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4204 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4207 if (!key || key_len == 0) {
4208 PMD_DRV_LOG(DEBUG, "No key to be configured");
4210 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4212 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4216 struct ice_aqc_get_set_rss_keys *key_dw =
4217 (struct ice_aqc_get_set_rss_keys *)key;
4219 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4221 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4229 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4231 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4234 if (!key || !key_len)
4237 ret = ice_aq_get_rss_key
4239 (struct ice_aqc_get_set_rss_keys *)key);
4241 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4244 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4250 ice_rss_hash_update(struct rte_eth_dev *dev,
4251 struct rte_eth_rss_conf *rss_conf)
4253 enum ice_status status = ICE_SUCCESS;
4254 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4255 struct ice_vsi *vsi = pf->main_vsi;
4258 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4262 if (rss_conf->rss_hf == 0)
4265 /* RSS hash configuration */
4266 ice_rss_hash_set(pf, rss_conf->rss_hf);
4272 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4273 struct rte_eth_rss_conf *rss_conf)
4275 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4276 struct ice_vsi *vsi = pf->main_vsi;
4278 ice_get_rss_key(vsi, rss_conf->rss_key,
4279 &rss_conf->rss_key_len);
4281 /* TODO: default set to 0 as hf config is not supported now */
4282 rss_conf->rss_hf = 0;
4287 ice_promisc_enable(struct rte_eth_dev *dev)
4289 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4290 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4291 struct ice_vsi *vsi = pf->main_vsi;
4292 enum ice_status status;
4296 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4297 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4299 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4301 case ICE_ERR_ALREADY_EXISTS:
4302 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4306 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4314 ice_promisc_disable(struct rte_eth_dev *dev)
4316 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4317 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4318 struct ice_vsi *vsi = pf->main_vsi;
4319 enum ice_status status;
4323 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4324 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4326 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4327 if (status != ICE_SUCCESS) {
4328 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4336 ice_allmulti_enable(struct rte_eth_dev *dev)
4338 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4339 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4340 struct ice_vsi *vsi = pf->main_vsi;
4341 enum ice_status status;
4345 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4347 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4350 case ICE_ERR_ALREADY_EXISTS:
4351 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4355 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4363 ice_allmulti_disable(struct rte_eth_dev *dev)
4365 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4366 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4367 struct ice_vsi *vsi = pf->main_vsi;
4368 enum ice_status status;
4372 if (dev->data->promiscuous == 1)
4373 return 0; /* must remain in all_multicast mode */
4375 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4377 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4378 if (status != ICE_SUCCESS) {
4379 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4386 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4389 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4390 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4391 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4395 msix_intr = intr_handle->intr_vec[queue_id];
4397 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4398 GLINT_DYN_CTL_ITR_INDX_M;
4399 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4401 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4402 rte_intr_ack(&pci_dev->intr_handle);
4407 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4410 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4411 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4412 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4415 msix_intr = intr_handle->intr_vec[queue_id];
4417 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4423 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4425 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4430 ver = hw->flash.orom.major;
4431 patch = hw->flash.orom.patch;
4432 build = hw->flash.orom.build;
4434 ret = snprintf(fw_version, fw_size,
4435 "%x.%02x 0x%08x %d.%d.%d",
4436 hw->flash.nvm.major,
4437 hw->flash.nvm.minor,
4438 hw->flash.nvm.eetrack,
4441 /* add the size of '\0' */
4443 if (fw_size < (u32)ret)
4450 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4453 struct ice_vsi_ctx ctxt;
4454 uint8_t vlan_flags = 0;
4457 if (!vsi || !info) {
4458 PMD_DRV_LOG(ERR, "invalid parameters");
4463 vsi->info.pvid = info->config.pvid;
4465 * If insert pvid is enabled, only tagged pkts are
4466 * allowed to be sent out.
4468 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
4469 ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
4472 if (info->config.reject.tagged == 0)
4473 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
4475 if (info->config.reject.untagged == 0)
4476 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
4478 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
4479 ICE_AQ_VSI_VLAN_MODE_M);
4480 vsi->info.vlan_flags |= vlan_flags;
4481 memset(&ctxt, 0, sizeof(ctxt));
4482 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4483 ctxt.info.valid_sections =
4484 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4485 ctxt.vsi_num = vsi->vsi_id;
4487 hw = ICE_VSI_TO_HW(vsi);
4488 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4489 if (ret != ICE_SUCCESS) {
4491 "update VSI for VLAN insert failed, err %d",
4496 vsi->info.valid_sections |=
4497 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4503 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4505 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4506 struct ice_vsi *vsi = pf->main_vsi;
4507 struct rte_eth_dev_data *data = pf->dev_data;
4508 struct ice_vsi_vlan_pvid_info info;
4511 memset(&info, 0, sizeof(info));
4514 info.config.pvid = pvid;
4516 info.config.reject.tagged =
4517 data->dev_conf.txmode.hw_vlan_reject_tagged;
4518 info.config.reject.untagged =
4519 data->dev_conf.txmode.hw_vlan_reject_untagged;
4522 ret = ice_vsi_vlan_pvid_set(vsi, &info);
4524 PMD_DRV_LOG(ERR, "Failed to set pvid.");
4532 ice_get_eeprom_length(struct rte_eth_dev *dev)
4534 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4536 return hw->flash.flash_size;
4540 ice_get_eeprom(struct rte_eth_dev *dev,
4541 struct rte_dev_eeprom_info *eeprom)
4543 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4544 enum ice_status status = ICE_SUCCESS;
4545 uint8_t *data = eeprom->data;
4547 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4549 status = ice_acquire_nvm(hw, ICE_RES_READ);
4551 PMD_DRV_LOG(ERR, "acquire nvm failed.");
4555 status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4558 ice_release_nvm(hw);
4561 PMD_DRV_LOG(ERR, "EEPROM read failed.");
4569 ice_stat_update_32(struct ice_hw *hw,
4577 new_data = (uint64_t)ICE_READ_REG(hw, reg);
4581 if (new_data >= *offset)
4582 *stat = (uint64_t)(new_data - *offset);
4584 *stat = (uint64_t)((new_data +
4585 ((uint64_t)1 << ICE_32_BIT_WIDTH))
4590 ice_stat_update_40(struct ice_hw *hw,
4599 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4600 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4606 if (new_data >= *offset)
4607 *stat = new_data - *offset;
4609 *stat = (uint64_t)((new_data +
4610 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4613 *stat &= ICE_40_BIT_MASK;
4616 /* Get all the statistics of a VSI */
4618 ice_update_vsi_stats(struct ice_vsi *vsi)
4620 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4621 struct ice_eth_stats *nes = &vsi->eth_stats;
4622 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4623 int idx = rte_le_to_cpu_16(vsi->vsi_id);
4625 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4626 vsi->offset_loaded, &oes->rx_bytes,
4628 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4629 vsi->offset_loaded, &oes->rx_unicast,
4631 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4632 vsi->offset_loaded, &oes->rx_multicast,
4633 &nes->rx_multicast);
4634 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4635 vsi->offset_loaded, &oes->rx_broadcast,
4636 &nes->rx_broadcast);
4637 /* enlarge the limitation when rx_bytes overflowed */
4638 if (vsi->offset_loaded) {
4639 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
4640 nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4641 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
4643 vsi->old_rx_bytes = nes->rx_bytes;
4644 /* exclude CRC bytes */
4645 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4646 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4648 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4649 &oes->rx_discards, &nes->rx_discards);
4650 /* GLV_REPC not supported */
4651 /* GLV_RMPC not supported */
4652 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4653 &oes->rx_unknown_protocol,
4654 &nes->rx_unknown_protocol);
4655 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4656 vsi->offset_loaded, &oes->tx_bytes,
4658 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4659 vsi->offset_loaded, &oes->tx_unicast,
4661 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4662 vsi->offset_loaded, &oes->tx_multicast,
4663 &nes->tx_multicast);
4664 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4665 vsi->offset_loaded, &oes->tx_broadcast,
4666 &nes->tx_broadcast);
4667 /* GLV_TDPC not supported */
4668 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4669 &oes->tx_errors, &nes->tx_errors);
4670 /* enlarge the limitation when tx_bytes overflowed */
4671 if (vsi->offset_loaded) {
4672 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
4673 nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4674 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
4676 vsi->old_tx_bytes = nes->tx_bytes;
4677 vsi->offset_loaded = true;
4679 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4681 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
4682 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
4683 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
4684 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
4685 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
4686 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4687 nes->rx_unknown_protocol);
4688 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
4689 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
4690 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
4691 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
4692 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
4693 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
4694 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4699 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4701 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4702 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4704 /* Get statistics of struct ice_eth_stats */
4705 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4706 GLPRT_GORCL(hw->port_info->lport),
4707 pf->offset_loaded, &os->eth.rx_bytes,
4709 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4710 GLPRT_UPRCL(hw->port_info->lport),
4711 pf->offset_loaded, &os->eth.rx_unicast,
4712 &ns->eth.rx_unicast);
4713 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4714 GLPRT_MPRCL(hw->port_info->lport),
4715 pf->offset_loaded, &os->eth.rx_multicast,
4716 &ns->eth.rx_multicast);
4717 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4718 GLPRT_BPRCL(hw->port_info->lport),
4719 pf->offset_loaded, &os->eth.rx_broadcast,
4720 &ns->eth.rx_broadcast);
4721 ice_stat_update_32(hw, PRTRPB_RDPC,
4722 pf->offset_loaded, &os->eth.rx_discards,
4723 &ns->eth.rx_discards);
4724 /* enlarge the limitation when rx_bytes overflowed */
4725 if (pf->offset_loaded) {
4726 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
4727 ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4728 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
4730 pf->old_rx_bytes = ns->eth.rx_bytes;
4732 /* Workaround: CRC size should not be included in byte statistics,
4733 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4736 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4737 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4739 /* GLPRT_REPC not supported */
4740 /* GLPRT_RMPC not supported */
4741 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4743 &os->eth.rx_unknown_protocol,
4744 &ns->eth.rx_unknown_protocol);
4745 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4746 GLPRT_GOTCL(hw->port_info->lport),
4747 pf->offset_loaded, &os->eth.tx_bytes,
4749 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4750 GLPRT_UPTCL(hw->port_info->lport),
4751 pf->offset_loaded, &os->eth.tx_unicast,
4752 &ns->eth.tx_unicast);
4753 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4754 GLPRT_MPTCL(hw->port_info->lport),
4755 pf->offset_loaded, &os->eth.tx_multicast,
4756 &ns->eth.tx_multicast);
4757 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4758 GLPRT_BPTCL(hw->port_info->lport),
4759 pf->offset_loaded, &os->eth.tx_broadcast,
4760 &ns->eth.tx_broadcast);
4761 /* enlarge the limitation when tx_bytes overflowed */
4762 if (pf->offset_loaded) {
4763 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
4764 ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4765 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
4767 pf->old_tx_bytes = ns->eth.tx_bytes;
4768 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4769 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4771 /* GLPRT_TEPC not supported */
4773 /* additional port specific stats */
4774 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4775 pf->offset_loaded, &os->tx_dropped_link_down,
4776 &ns->tx_dropped_link_down);
4777 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4778 pf->offset_loaded, &os->crc_errors,
4780 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4781 pf->offset_loaded, &os->illegal_bytes,
4782 &ns->illegal_bytes);
4783 /* GLPRT_ERRBC not supported */
4784 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4785 pf->offset_loaded, &os->mac_local_faults,
4786 &ns->mac_local_faults);
4787 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4788 pf->offset_loaded, &os->mac_remote_faults,
4789 &ns->mac_remote_faults);
4791 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4792 pf->offset_loaded, &os->rx_len_errors,
4793 &ns->rx_len_errors);
4795 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4796 pf->offset_loaded, &os->link_xon_rx,
4798 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
4799 pf->offset_loaded, &os->link_xoff_rx,
4801 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
4802 pf->offset_loaded, &os->link_xon_tx,
4804 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
4805 pf->offset_loaded, &os->link_xoff_tx,
4807 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
4808 GLPRT_PRC64L(hw->port_info->lport),
4809 pf->offset_loaded, &os->rx_size_64,
4811 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
4812 GLPRT_PRC127L(hw->port_info->lport),
4813 pf->offset_loaded, &os->rx_size_127,
4815 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
4816 GLPRT_PRC255L(hw->port_info->lport),
4817 pf->offset_loaded, &os->rx_size_255,
4819 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
4820 GLPRT_PRC511L(hw->port_info->lport),
4821 pf->offset_loaded, &os->rx_size_511,
4823 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
4824 GLPRT_PRC1023L(hw->port_info->lport),
4825 pf->offset_loaded, &os->rx_size_1023,
4827 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
4828 GLPRT_PRC1522L(hw->port_info->lport),
4829 pf->offset_loaded, &os->rx_size_1522,
4831 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
4832 GLPRT_PRC9522L(hw->port_info->lport),
4833 pf->offset_loaded, &os->rx_size_big,
4835 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
4836 pf->offset_loaded, &os->rx_undersize,
4838 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
4839 pf->offset_loaded, &os->rx_fragments,
4841 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
4842 pf->offset_loaded, &os->rx_oversize,
4844 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
4845 pf->offset_loaded, &os->rx_jabber,
4847 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
4848 GLPRT_PTC64L(hw->port_info->lport),
4849 pf->offset_loaded, &os->tx_size_64,
4851 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
4852 GLPRT_PTC127L(hw->port_info->lport),
4853 pf->offset_loaded, &os->tx_size_127,
4855 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
4856 GLPRT_PTC255L(hw->port_info->lport),
4857 pf->offset_loaded, &os->tx_size_255,
4859 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
4860 GLPRT_PTC511L(hw->port_info->lport),
4861 pf->offset_loaded, &os->tx_size_511,
4863 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
4864 GLPRT_PTC1023L(hw->port_info->lport),
4865 pf->offset_loaded, &os->tx_size_1023,
4867 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
4868 GLPRT_PTC1522L(hw->port_info->lport),
4869 pf->offset_loaded, &os->tx_size_1522,
4871 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
4872 GLPRT_PTC9522L(hw->port_info->lport),
4873 pf->offset_loaded, &os->tx_size_big,
4876 /* GLPRT_MSPDC not supported */
4877 /* GLPRT_XEC not supported */
4879 pf->offset_loaded = true;
4882 ice_update_vsi_stats(pf->main_vsi);
4885 /* Get all statistics of a port */
4887 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
4889 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4890 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4891 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4893 /* call read registers - updates values, now write them to struct */
4894 ice_read_stats_registers(pf, hw);
4896 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
4897 pf->main_vsi->eth_stats.rx_multicast +
4898 pf->main_vsi->eth_stats.rx_broadcast -
4899 pf->main_vsi->eth_stats.rx_discards;
4900 stats->opackets = ns->eth.tx_unicast +
4901 ns->eth.tx_multicast +
4902 ns->eth.tx_broadcast;
4903 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
4904 stats->obytes = ns->eth.tx_bytes;
4905 stats->oerrors = ns->eth.tx_errors +
4906 pf->main_vsi->eth_stats.tx_errors;
4909 stats->imissed = ns->eth.rx_discards +
4910 pf->main_vsi->eth_stats.rx_discards;
4911 stats->ierrors = ns->crc_errors +
4913 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
4915 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
4916 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
4917 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
4918 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
4919 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
4920 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
4921 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
4922 pf->main_vsi->eth_stats.rx_discards);
4923 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4924 ns->eth.rx_unknown_protocol);
4925 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
4926 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
4927 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
4928 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
4929 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
4930 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
4931 pf->main_vsi->eth_stats.tx_discards);
4932 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
4934 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
4935 ns->tx_dropped_link_down);
4936 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
4937 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
4939 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
4940 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
4941 ns->mac_local_faults);
4942 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
4943 ns->mac_remote_faults);
4944 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
4945 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
4946 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
4947 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
4948 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
4949 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
4950 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
4951 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
4952 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
4953 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
4954 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
4955 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
4956 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
4957 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
4958 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
4959 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
4960 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
4961 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
4962 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
4963 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
4964 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
4965 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
4966 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
4967 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
4971 /* Reset the statistics */
4973 ice_stats_reset(struct rte_eth_dev *dev)
4975 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4976 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4978 /* Mark PF and VSI stats to update the offset, aka "reset" */
4979 pf->offset_loaded = false;
4981 pf->main_vsi->offset_loaded = false;
4983 /* read the stats, reading current register values into offset */
4984 ice_read_stats_registers(pf, hw);
4990 ice_xstats_calc_num(void)
4994 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5000 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5003 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5004 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5007 struct ice_hw_port_stats *hw_stats = &pf->stats;
5009 count = ice_xstats_calc_num();
5013 ice_read_stats_registers(pf, hw);
5020 /* Get stats from ice_eth_stats struct */
5021 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5022 xstats[count].value =
5023 *(uint64_t *)((char *)&hw_stats->eth +
5024 ice_stats_strings[i].offset);
5025 xstats[count].id = count;
5029 /* Get individiual stats from ice_hw_port struct */
5030 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5031 xstats[count].value =
5032 *(uint64_t *)((char *)hw_stats +
5033 ice_hw_port_strings[i].offset);
5034 xstats[count].id = count;
5041 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5042 struct rte_eth_xstat_name *xstats_names,
5043 __rte_unused unsigned int limit)
5045 unsigned int count = 0;
5049 return ice_xstats_calc_num();
5051 /* Note: limit checked in rte_eth_xstats_names() */
5053 /* Get stats from ice_eth_stats struct */
5054 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5055 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5056 sizeof(xstats_names[count].name));
5060 /* Get individiual stats from ice_hw_port struct */
5061 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5062 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5063 sizeof(xstats_names[count].name));
5071 ice_dev_filter_ctrl(struct rte_eth_dev *dev,
5072 enum rte_filter_type filter_type,
5073 enum rte_filter_op filter_op,
5081 switch (filter_type) {
5082 case RTE_ETH_FILTER_GENERIC:
5083 if (filter_op != RTE_ETH_FILTER_GET)
5085 *(const void **)arg = &ice_flow_ops;
5088 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5097 /* Add UDP tunneling port */
5099 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5100 struct rte_eth_udp_tunnel *udp_tunnel)
5103 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5105 if (udp_tunnel == NULL)
5108 switch (udp_tunnel->prot_type) {
5109 case RTE_TUNNEL_TYPE_VXLAN:
5110 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5113 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5121 /* Delete UDP tunneling port */
5123 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5124 struct rte_eth_udp_tunnel *udp_tunnel)
5127 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5129 if (udp_tunnel == NULL)
5132 switch (udp_tunnel->prot_type) {
5133 case RTE_TUNNEL_TYPE_VXLAN:
5134 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5137 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5146 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5147 struct rte_pci_device *pci_dev)
5149 return rte_eth_dev_pci_generic_probe(pci_dev,
5150 sizeof(struct ice_adapter),
5155 ice_pci_remove(struct rte_pci_device *pci_dev)
5157 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5160 static struct rte_pci_driver rte_ice_pmd = {
5161 .id_table = pci_id_ice_map,
5162 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5163 .probe = ice_pci_probe,
5164 .remove = ice_pci_remove,
5168 * Driver initialization routine.
5169 * Invoked once at EAL init time.
5170 * Register itself as the [Poll Mode] Driver of PCI devices.
5172 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5173 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5174 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5175 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5176 ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5177 ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5178 ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
5180 RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE);
5181 RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE);
5182 #ifdef RTE_LIBRTE_ICE_DEBUG_RX
5183 RTE_LOG_REGISTER(ice_logtype_rx, pmd.net.ice.rx, DEBUG);
5185 #ifdef RTE_LIBRTE_ICE_DEBUG_TX
5186 RTE_LOG_REGISTER(ice_logtype_tx, pmd.net.ice.tx, DEBUG);
5188 #ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE
5189 RTE_LOG_REGISTER(ice_logtype_tx_free, pmd.net.ice.tx_free, DEBUG);