1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "base/ice_common.h"
18 #include "rte_pmd_ice.h"
19 #include "ice_ethdev.h"
21 #include "ice_generic_flow.h"
24 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
25 #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support"
26 #define ICE_FLOW_MARK_SUPPORT_ARG "flow-mark-support"
27 #define ICE_PROTO_XTR_ARG "proto_xtr"
29 static const char * const ice_valid_args[] = {
30 ICE_SAFE_MODE_SUPPORT_ARG,
31 ICE_PIPELINE_MODE_SUPPORT_ARG,
32 ICE_FLOW_MARK_SUPPORT_ARG,
37 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
38 .name = "ice_dynfield_proto_xtr_metadata",
39 .size = sizeof(uint32_t),
40 .align = __alignof__(uint32_t),
44 struct proto_xtr_ol_flag {
45 const struct rte_mbuf_dynflag param;
50 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
52 .param = { .name = "ice_dynflag_proto_xtr_vlan" },
53 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
55 .param = { .name = "ice_dynflag_proto_xtr_ipv4" },
56 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
58 .param = { .name = "ice_dynflag_proto_xtr_ipv6" },
59 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
60 [PROTO_XTR_IPV6_FLOW] = {
61 .param = { .name = "ice_dynflag_proto_xtr_ipv6_flow" },
62 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
64 .param = { .name = "ice_dynflag_proto_xtr_tcp" },
65 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
68 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
70 #define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package"
71 #define ICE_COMMS_PKG_NAME "ICE COMMS Package"
72 #define ICE_MAX_RES_DESC_NUM 1024
74 static int ice_dev_configure(struct rte_eth_dev *dev);
75 static int ice_dev_start(struct rte_eth_dev *dev);
76 static void ice_dev_stop(struct rte_eth_dev *dev);
77 static void ice_dev_close(struct rte_eth_dev *dev);
78 static int ice_dev_reset(struct rte_eth_dev *dev);
79 static int ice_dev_info_get(struct rte_eth_dev *dev,
80 struct rte_eth_dev_info *dev_info);
81 static int ice_link_update(struct rte_eth_dev *dev,
82 int wait_to_complete);
83 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
84 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
86 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
87 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
88 static int ice_rss_reta_update(struct rte_eth_dev *dev,
89 struct rte_eth_rss_reta_entry64 *reta_conf,
91 static int ice_rss_reta_query(struct rte_eth_dev *dev,
92 struct rte_eth_rss_reta_entry64 *reta_conf,
94 static int ice_rss_hash_update(struct rte_eth_dev *dev,
95 struct rte_eth_rss_conf *rss_conf);
96 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
97 struct rte_eth_rss_conf *rss_conf);
98 static int ice_promisc_enable(struct rte_eth_dev *dev);
99 static int ice_promisc_disable(struct rte_eth_dev *dev);
100 static int ice_allmulti_enable(struct rte_eth_dev *dev);
101 static int ice_allmulti_disable(struct rte_eth_dev *dev);
102 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
105 static int ice_macaddr_set(struct rte_eth_dev *dev,
106 struct rte_ether_addr *mac_addr);
107 static int ice_macaddr_add(struct rte_eth_dev *dev,
108 struct rte_ether_addr *mac_addr,
109 __rte_unused uint32_t index,
111 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
112 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
114 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
116 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
118 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
119 uint16_t pvid, int on);
120 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
121 static int ice_get_eeprom(struct rte_eth_dev *dev,
122 struct rte_dev_eeprom_info *eeprom);
123 static int ice_stats_get(struct rte_eth_dev *dev,
124 struct rte_eth_stats *stats);
125 static int ice_stats_reset(struct rte_eth_dev *dev);
126 static int ice_xstats_get(struct rte_eth_dev *dev,
127 struct rte_eth_xstat *xstats, unsigned int n);
128 static int ice_xstats_get_names(struct rte_eth_dev *dev,
129 struct rte_eth_xstat_name *xstats_names,
131 static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
132 enum rte_filter_type filter_type,
133 enum rte_filter_op filter_op,
135 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
136 struct rte_eth_udp_tunnel *udp_tunnel);
137 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
138 struct rte_eth_udp_tunnel *udp_tunnel);
140 static const struct rte_pci_id pci_id_ice_map[] = {
141 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
142 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
143 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
144 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
145 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
146 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
147 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
148 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
149 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
150 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
151 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
152 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
153 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
154 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
155 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
156 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
157 { .vendor_id = 0, /* sentinel */ },
160 static const struct eth_dev_ops ice_eth_dev_ops = {
161 .dev_configure = ice_dev_configure,
162 .dev_start = ice_dev_start,
163 .dev_stop = ice_dev_stop,
164 .dev_close = ice_dev_close,
165 .dev_reset = ice_dev_reset,
166 .dev_set_link_up = ice_dev_set_link_up,
167 .dev_set_link_down = ice_dev_set_link_down,
168 .rx_queue_start = ice_rx_queue_start,
169 .rx_queue_stop = ice_rx_queue_stop,
170 .tx_queue_start = ice_tx_queue_start,
171 .tx_queue_stop = ice_tx_queue_stop,
172 .rx_queue_setup = ice_rx_queue_setup,
173 .rx_queue_release = ice_rx_queue_release,
174 .tx_queue_setup = ice_tx_queue_setup,
175 .tx_queue_release = ice_tx_queue_release,
176 .dev_infos_get = ice_dev_info_get,
177 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
178 .link_update = ice_link_update,
179 .mtu_set = ice_mtu_set,
180 .mac_addr_set = ice_macaddr_set,
181 .mac_addr_add = ice_macaddr_add,
182 .mac_addr_remove = ice_macaddr_remove,
183 .vlan_filter_set = ice_vlan_filter_set,
184 .vlan_offload_set = ice_vlan_offload_set,
185 .reta_update = ice_rss_reta_update,
186 .reta_query = ice_rss_reta_query,
187 .rss_hash_update = ice_rss_hash_update,
188 .rss_hash_conf_get = ice_rss_hash_conf_get,
189 .promiscuous_enable = ice_promisc_enable,
190 .promiscuous_disable = ice_promisc_disable,
191 .allmulticast_enable = ice_allmulti_enable,
192 .allmulticast_disable = ice_allmulti_disable,
193 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
194 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
195 .fw_version_get = ice_fw_version_get,
196 .vlan_pvid_set = ice_vlan_pvid_set,
197 .rxq_info_get = ice_rxq_info_get,
198 .txq_info_get = ice_txq_info_get,
199 .rx_burst_mode_get = ice_rx_burst_mode_get,
200 .tx_burst_mode_get = ice_tx_burst_mode_get,
201 .get_eeprom_length = ice_get_eeprom_length,
202 .get_eeprom = ice_get_eeprom,
203 .rx_queue_count = ice_rx_queue_count,
204 .rx_descriptor_status = ice_rx_descriptor_status,
205 .tx_descriptor_status = ice_tx_descriptor_status,
206 .stats_get = ice_stats_get,
207 .stats_reset = ice_stats_reset,
208 .xstats_get = ice_xstats_get,
209 .xstats_get_names = ice_xstats_get_names,
210 .xstats_reset = ice_stats_reset,
211 .filter_ctrl = ice_dev_filter_ctrl,
212 .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add,
213 .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
214 .tx_done_cleanup = ice_tx_done_cleanup,
217 /* store statistics names and its offset in stats structure */
218 struct ice_xstats_name_off {
219 char name[RTE_ETH_XSTATS_NAME_SIZE];
223 static const struct ice_xstats_name_off ice_stats_strings[] = {
224 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
225 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
226 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
227 {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
228 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
229 rx_unknown_protocol)},
230 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
231 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
232 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
233 {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
236 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
237 sizeof(ice_stats_strings[0]))
239 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
240 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
241 tx_dropped_link_down)},
242 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
243 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
245 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
246 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
248 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
250 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
252 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
253 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
254 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
255 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
256 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
257 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
259 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
261 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
263 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
265 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
267 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
269 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
271 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
273 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
274 mac_short_pkt_dropped)},
275 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
277 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
278 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
279 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
281 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
283 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
285 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
287 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
289 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
293 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
294 sizeof(ice_hw_port_strings[0]))
297 ice_init_controlq_parameter(struct ice_hw *hw)
299 /* fields for adminq */
300 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
301 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
302 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
303 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
305 /* fields for mailboxq, DPDK used as PF host */
306 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
307 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
308 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
309 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
313 lookup_proto_xtr_type(const char *xtr_name)
317 enum proto_xtr_type type;
319 { "vlan", PROTO_XTR_VLAN },
320 { "ipv4", PROTO_XTR_IPV4 },
321 { "ipv6", PROTO_XTR_IPV6 },
322 { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
323 { "tcp", PROTO_XTR_TCP },
327 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
328 if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
329 return xtr_type_map[i].type;
336 * Parse elem, the elem could be single number/range or '(' ')' group
337 * 1) A single number elem, it's just a simple digit. e.g. 9
338 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
339 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
340 * Within group elem, '-' used for a range separator;
341 * ',' used for a single number.
344 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
346 const char *str = input;
351 while (isblank(*str))
354 if (!isdigit(*str) && *str != '(')
357 /* process single number or single range of number */
360 idx = strtoul(str, &end, 10);
361 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
364 while (isblank(*end))
370 /* process single <number>-<number> */
373 while (isblank(*end))
379 idx = strtoul(end, &end, 10);
380 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
384 while (isblank(*end))
391 for (idx = RTE_MIN(min, max);
392 idx <= RTE_MAX(min, max); idx++)
393 devargs->proto_xtr[idx] = xtr_type;
398 /* process set within bracket */
400 while (isblank(*str))
405 min = ICE_MAX_QUEUE_NUM;
407 /* go ahead to the first digit */
408 while (isblank(*str))
413 /* get the digit value */
415 idx = strtoul(str, &end, 10);
416 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
419 /* go ahead to separator '-',',' and ')' */
420 while (isblank(*end))
423 if (min == ICE_MAX_QUEUE_NUM)
425 else /* avoid continuous '-' */
427 } else if (*end == ',' || *end == ')') {
429 if (min == ICE_MAX_QUEUE_NUM)
432 for (idx = RTE_MIN(min, max);
433 idx <= RTE_MAX(min, max); idx++)
434 devargs->proto_xtr[idx] = xtr_type;
436 min = ICE_MAX_QUEUE_NUM;
442 } while (*end != ')' && *end != '\0');
448 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
450 const char *queue_start;
455 while (isblank(*queues))
458 if (*queues != '[') {
459 xtr_type = lookup_proto_xtr_type(queues);
463 devargs->proto_xtr_dflt = xtr_type;
470 while (isblank(*queues))
475 queue_start = queues;
477 /* go across a complete bracket */
478 if (*queue_start == '(') {
479 queues += strcspn(queues, ")");
484 /* scan the separator ':' */
485 queues += strcspn(queues, ":");
486 if (*queues++ != ':')
488 while (isblank(*queues))
491 for (idx = 0; ; idx++) {
492 if (isblank(queues[idx]) ||
493 queues[idx] == ',' ||
494 queues[idx] == ']' ||
498 if (idx > sizeof(xtr_name) - 2)
501 xtr_name[idx] = queues[idx];
503 xtr_name[idx] = '\0';
504 xtr_type = lookup_proto_xtr_type(xtr_name);
510 while (isblank(*queues) || *queues == ',' || *queues == ']')
513 if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
515 } while (*queues != '\0');
521 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
524 struct ice_devargs *devargs = extra_args;
526 if (value == NULL || extra_args == NULL)
529 if (parse_queue_proto_xtr(value, devargs) < 0) {
531 "The protocol extraction parameter is wrong : '%s'",
540 ice_proto_xtr_support(struct ice_hw *hw)
542 #define FLX_REG(val, fld, idx) \
543 (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
544 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
550 { ICE_RXDID_COMMS_AUX_VLAN, ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O },
551 { ICE_RXDID_COMMS_AUX_IPV4, ICE_PROT_IPV4_OF_OR_S,
552 ICE_PROT_IPV4_OF_OR_S },
553 { ICE_RXDID_COMMS_AUX_IPV6, ICE_PROT_IPV6_OF_OR_S,
554 ICE_PROT_IPV6_OF_OR_S },
555 { ICE_RXDID_COMMS_AUX_IPV6_FLOW, ICE_PROT_IPV6_OF_OR_S,
556 ICE_PROT_IPV6_OF_OR_S },
557 { ICE_RXDID_COMMS_AUX_TCP, ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
561 for (i = 0; i < RTE_DIM(xtr_sets); i++) {
562 uint32_t rxdid = xtr_sets[i].rxdid;
565 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
566 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
568 if (FLX_REG(v, PROT_MDID, 4) != xtr_sets[i].protid_0 ||
569 FLX_REG(v, RXDID_OPCODE, 4) != ICE_RX_OPC_EXTRACT)
573 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
574 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
576 if (FLX_REG(v, PROT_MDID, 5) != xtr_sets[i].protid_1 ||
577 FLX_REG(v, RXDID_OPCODE, 5) != ICE_RX_OPC_EXTRACT)
586 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
589 struct pool_entry *entry;
594 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
597 "Failed to allocate memory for resource pool");
601 /* queue heap initialize */
602 pool->num_free = num;
605 LIST_INIT(&pool->alloc_list);
606 LIST_INIT(&pool->free_list);
608 /* Initialize element */
612 LIST_INSERT_HEAD(&pool->free_list, entry, next);
617 ice_res_pool_alloc(struct ice_res_pool_info *pool,
620 struct pool_entry *entry, *valid_entry;
623 PMD_INIT_LOG(ERR, "Invalid parameter");
627 if (pool->num_free < num) {
628 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
629 num, pool->num_free);
634 /* Lookup in free list and find most fit one */
635 LIST_FOREACH(entry, &pool->free_list, next) {
636 if (entry->len >= num) {
638 if (entry->len == num) {
643 valid_entry->len > entry->len)
648 /* Not find one to satisfy the request, return */
650 PMD_INIT_LOG(ERR, "No valid entry found");
654 * The entry have equal queue number as requested,
655 * remove it from alloc_list.
657 if (valid_entry->len == num) {
658 LIST_REMOVE(valid_entry, next);
661 * The entry have more numbers than requested,
662 * create a new entry for alloc_list and minus its
663 * queue base and number in free_list.
665 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
668 "Failed to allocate memory for "
672 entry->base = valid_entry->base;
674 valid_entry->base += num;
675 valid_entry->len -= num;
679 /* Insert it into alloc list, not sorted */
680 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
682 pool->num_free -= valid_entry->len;
683 pool->num_alloc += valid_entry->len;
685 return valid_entry->base + pool->base;
689 ice_res_pool_destroy(struct ice_res_pool_info *pool)
691 struct pool_entry *entry, *next_entry;
696 for (entry = LIST_FIRST(&pool->alloc_list);
697 entry && (next_entry = LIST_NEXT(entry, next), 1);
698 entry = next_entry) {
699 LIST_REMOVE(entry, next);
703 for (entry = LIST_FIRST(&pool->free_list);
704 entry && (next_entry = LIST_NEXT(entry, next), 1);
705 entry = next_entry) {
706 LIST_REMOVE(entry, next);
713 LIST_INIT(&pool->alloc_list);
714 LIST_INIT(&pool->free_list);
718 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
720 /* Set VSI LUT selection */
721 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
722 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
723 /* Set Hash scheme */
724 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
725 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
727 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
730 static enum ice_status
731 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
732 struct ice_aqc_vsi_props *info,
733 uint8_t enabled_tcmap)
735 uint16_t bsf, qp_idx;
737 /* default tc 0 now. Multi-TC supporting need to be done later.
738 * Configure TC and queue mapping parameters, for enabled TC,
739 * allocate qpnum_per_tc queues to this traffic.
741 if (enabled_tcmap != 0x01) {
742 PMD_INIT_LOG(ERR, "only TC0 is supported");
746 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
747 bsf = rte_bsf32(vsi->nb_qps);
748 /* Adjust the queue number to actual queues that can be applied */
749 vsi->nb_qps = 0x1 << bsf;
752 /* Set tc and queue mapping with VSI */
753 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
754 ICE_AQ_VSI_TC_Q_OFFSET_S) |
755 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
757 /* Associate queue number with VSI */
758 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
759 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
760 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
761 info->valid_sections |=
762 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
763 /* Set the info.ingress_table and info.egress_table
764 * for UP translate table. Now just set it to 1:1 map by default
765 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
767 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
768 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
769 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
770 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
775 ice_init_mac_address(struct rte_eth_dev *dev)
777 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
779 if (!rte_is_unicast_ether_addr
780 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
781 PMD_INIT_LOG(ERR, "Invalid MAC address");
786 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
787 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
789 dev->data->mac_addrs =
790 rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0);
791 if (!dev->data->mac_addrs) {
793 "Failed to allocate memory to store mac address");
796 /* store it to dev data */
798 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
799 &dev->data->mac_addrs[0]);
803 /* Find out specific MAC filter */
804 static struct ice_mac_filter *
805 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
807 struct ice_mac_filter *f;
809 TAILQ_FOREACH(f, &vsi->mac_list, next) {
810 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
818 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
820 struct ice_fltr_list_entry *m_list_itr = NULL;
821 struct ice_mac_filter *f;
822 struct LIST_HEAD_TYPE list_head;
823 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
826 /* If it's added and configured, return */
827 f = ice_find_mac_filter(vsi, mac_addr);
829 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
833 INIT_LIST_HEAD(&list_head);
835 m_list_itr = (struct ice_fltr_list_entry *)
836 ice_malloc(hw, sizeof(*m_list_itr));
841 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
842 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
843 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
844 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
845 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
846 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
847 m_list_itr->fltr_info.vsi_handle = vsi->idx;
849 LIST_ADD(&m_list_itr->list_entry, &list_head);
852 ret = ice_add_mac(hw, &list_head);
853 if (ret != ICE_SUCCESS) {
854 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
858 /* Add the mac addr into mac list */
859 f = rte_zmalloc(NULL, sizeof(*f), 0);
861 PMD_DRV_LOG(ERR, "failed to allocate memory");
865 rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
866 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
872 rte_free(m_list_itr);
877 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
879 struct ice_fltr_list_entry *m_list_itr = NULL;
880 struct ice_mac_filter *f;
881 struct LIST_HEAD_TYPE list_head;
882 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
885 /* Can't find it, return an error */
886 f = ice_find_mac_filter(vsi, mac_addr);
890 INIT_LIST_HEAD(&list_head);
892 m_list_itr = (struct ice_fltr_list_entry *)
893 ice_malloc(hw, sizeof(*m_list_itr));
898 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
899 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
900 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
901 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
902 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
903 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
904 m_list_itr->fltr_info.vsi_handle = vsi->idx;
906 LIST_ADD(&m_list_itr->list_entry, &list_head);
908 /* remove the mac filter */
909 ret = ice_remove_mac(hw, &list_head);
910 if (ret != ICE_SUCCESS) {
911 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
916 /* Remove the mac addr from mac list */
917 TAILQ_REMOVE(&vsi->mac_list, f, next);
923 rte_free(m_list_itr);
927 /* Find out specific VLAN filter */
928 static struct ice_vlan_filter *
929 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
931 struct ice_vlan_filter *f;
933 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
934 if (vlan_id == f->vlan_info.vlan_id)
942 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
944 struct ice_fltr_list_entry *v_list_itr = NULL;
945 struct ice_vlan_filter *f;
946 struct LIST_HEAD_TYPE list_head;
950 if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)
953 hw = ICE_VSI_TO_HW(vsi);
955 /* If it's added and configured, return. */
956 f = ice_find_vlan_filter(vsi, vlan_id);
958 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
962 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
965 INIT_LIST_HEAD(&list_head);
967 v_list_itr = (struct ice_fltr_list_entry *)
968 ice_malloc(hw, sizeof(*v_list_itr));
973 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
974 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
975 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
976 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
977 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
978 v_list_itr->fltr_info.vsi_handle = vsi->idx;
980 LIST_ADD(&v_list_itr->list_entry, &list_head);
983 ret = ice_add_vlan(hw, &list_head);
984 if (ret != ICE_SUCCESS) {
985 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
990 /* Add vlan into vlan list */
991 f = rte_zmalloc(NULL, sizeof(*f), 0);
993 PMD_DRV_LOG(ERR, "failed to allocate memory");
997 f->vlan_info.vlan_id = vlan_id;
998 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1004 rte_free(v_list_itr);
1009 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
1011 struct ice_fltr_list_entry *v_list_itr = NULL;
1012 struct ice_vlan_filter *f;
1013 struct LIST_HEAD_TYPE list_head;
1018 * Vlan 0 is the generic filter for untagged packets
1019 * and can't be removed.
1021 if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
1024 hw = ICE_VSI_TO_HW(vsi);
1026 /* Can't find it, return an error */
1027 f = ice_find_vlan_filter(vsi, vlan_id);
1031 INIT_LIST_HEAD(&list_head);
1033 v_list_itr = (struct ice_fltr_list_entry *)
1034 ice_malloc(hw, sizeof(*v_list_itr));
1040 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
1041 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1042 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1043 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1044 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1045 v_list_itr->fltr_info.vsi_handle = vsi->idx;
1047 LIST_ADD(&v_list_itr->list_entry, &list_head);
1049 /* remove the vlan filter */
1050 ret = ice_remove_vlan(hw, &list_head);
1051 if (ret != ICE_SUCCESS) {
1052 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1057 /* Remove the vlan id from vlan list */
1058 TAILQ_REMOVE(&vsi->vlan_list, f, next);
1064 rte_free(v_list_itr);
1069 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1071 struct ice_mac_filter *m_f;
1072 struct ice_vlan_filter *v_f;
1075 if (!vsi || !vsi->mac_num)
1078 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
1079 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1080 if (ret != ICE_SUCCESS) {
1086 if (vsi->vlan_num == 0)
1089 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
1090 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
1091 if (ret != ICE_SUCCESS) {
1102 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
1104 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1105 struct ice_vsi_ctx ctxt;
1109 /* Check if it has been already on or off */
1110 if (vsi->info.valid_sections &
1111 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1113 if ((vsi->info.outer_tag_flags &
1114 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
1115 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
1116 return 0; /* already on */
1118 if (!(vsi->info.outer_tag_flags &
1119 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
1120 return 0; /* already off */
1125 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
1128 /* clear global insertion and use per packet insertion */
1129 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
1130 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
1131 vsi->info.outer_tag_flags |= qinq_flags;
1132 /* use default vlan type 0x8100 */
1133 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1134 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1135 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1136 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1137 ctxt.info.valid_sections =
1138 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1139 ctxt.vsi_num = vsi->vsi_id;
1140 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1143 "Update VSI failed to %s qinq stripping",
1144 on ? "enable" : "disable");
1148 vsi->info.valid_sections |=
1149 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1155 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
1157 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1158 struct ice_vsi_ctx ctxt;
1162 /* Check if it has been already on or off */
1163 if (vsi->info.valid_sections &
1164 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1166 if ((vsi->info.outer_tag_flags &
1167 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1168 ICE_AQ_VSI_OUTER_TAG_COPY)
1169 return 0; /* already on */
1171 if ((vsi->info.outer_tag_flags &
1172 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1173 ICE_AQ_VSI_OUTER_TAG_NOTHING)
1174 return 0; /* already off */
1179 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
1181 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
1182 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
1183 vsi->info.outer_tag_flags |= qinq_flags;
1184 /* use default vlan type 0x8100 */
1185 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1186 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1187 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1188 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1189 ctxt.info.valid_sections =
1190 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1191 ctxt.vsi_num = vsi->vsi_id;
1192 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1195 "Update VSI failed to %s qinq stripping",
1196 on ? "enable" : "disable");
1200 vsi->info.valid_sections |=
1201 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1207 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
1211 ret = ice_vsi_config_qinq_stripping(vsi, on);
1213 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
1215 ret = ice_vsi_config_qinq_insertion(vsi, on);
1217 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
1224 ice_pf_enable_irq0(struct ice_hw *hw)
1226 /* reset the registers */
1227 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1228 ICE_READ_REG(hw, PFINT_OICR);
1231 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1232 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1233 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1235 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1236 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1237 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1238 PFINT_OICR_CTL_ITR_INDX_M) |
1239 PFINT_OICR_CTL_CAUSE_ENA_M);
1241 ICE_WRITE_REG(hw, PFINT_FW_CTL,
1242 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1243 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1244 PFINT_FW_CTL_ITR_INDX_M) |
1245 PFINT_FW_CTL_CAUSE_ENA_M);
1247 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1250 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1251 GLINT_DYN_CTL_INTENA_M |
1252 GLINT_DYN_CTL_CLEARPBA_M |
1253 GLINT_DYN_CTL_ITR_INDX_M);
1260 ice_pf_disable_irq0(struct ice_hw *hw)
1262 /* Disable all interrupt types */
1263 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1269 ice_handle_aq_msg(struct rte_eth_dev *dev)
1271 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1272 struct ice_ctl_q_info *cq = &hw->adminq;
1273 struct ice_rq_event_info event;
1274 uint16_t pending, opcode;
1277 event.buf_len = ICE_AQ_MAX_BUF_LEN;
1278 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1279 if (!event.msg_buf) {
1280 PMD_DRV_LOG(ERR, "Failed to allocate mem");
1286 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1288 if (ret != ICE_SUCCESS) {
1290 "Failed to read msg from AdminQ, "
1292 hw->adminq.sq_last_status);
1295 opcode = rte_le_to_cpu_16(event.desc.opcode);
1298 case ice_aqc_opc_get_link_status:
1299 ret = ice_link_update(dev, 0);
1301 _rte_eth_dev_callback_process
1302 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1305 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1310 rte_free(event.msg_buf);
1315 * Interrupt handler triggered by NIC for handling
1316 * specific interrupt.
1319 * Pointer to interrupt handle.
1321 * The address of parameter (struct rte_eth_dev *) regsitered before.
1327 ice_interrupt_handler(void *param)
1329 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1330 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1338 uint32_t int_fw_ctl;
1341 /* Disable interrupt */
1342 ice_pf_disable_irq0(hw);
1344 /* read out interrupt causes */
1345 oicr = ICE_READ_REG(hw, PFINT_OICR);
1347 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1350 /* No interrupt event indicated */
1351 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1352 PMD_DRV_LOG(INFO, "No interrupt event");
1357 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1358 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1359 ice_handle_aq_msg(dev);
1362 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1363 PMD_DRV_LOG(INFO, "OICR: link state change event");
1364 ret = ice_link_update(dev, 0);
1366 _rte_eth_dev_callback_process
1367 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1371 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1372 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1373 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1374 if (reg & GL_MDET_TX_PQM_VALID_M) {
1375 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1376 GL_MDET_TX_PQM_PF_NUM_S;
1377 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1378 GL_MDET_TX_PQM_MAL_TYPE_S;
1379 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1380 GL_MDET_TX_PQM_QNUM_S;
1382 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1383 "%d by PQM on TX queue %d PF# %d",
1384 event, queue, pf_num);
1387 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1388 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1389 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1390 GL_MDET_TX_TCLAN_PF_NUM_S;
1391 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1392 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1393 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1394 GL_MDET_TX_TCLAN_QNUM_S;
1396 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1397 "%d by TCLAN on TX queue %d PF# %d",
1398 event, queue, pf_num);
1402 /* Enable interrupt */
1403 ice_pf_enable_irq0(hw);
1404 rte_intr_ack(dev->intr_handle);
1408 ice_init_proto_xtr(struct rte_eth_dev *dev)
1410 struct ice_adapter *ad =
1411 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1412 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1413 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1414 const struct proto_xtr_ol_flag *ol_flag;
1415 bool proto_xtr_enable = false;
1419 if (!ice_proto_xtr_support(hw)) {
1420 PMD_DRV_LOG(NOTICE, "Protocol extraction is not supported");
1424 pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1425 if (unlikely(pf->proto_xtr == NULL)) {
1426 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1430 for (i = 0; i < pf->lan_nb_qps; i++) {
1431 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1432 ad->devargs.proto_xtr[i] :
1433 ad->devargs.proto_xtr_dflt;
1435 if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1436 uint8_t type = pf->proto_xtr[i];
1438 ice_proto_xtr_ol_flag_params[type].required = true;
1439 proto_xtr_enable = true;
1443 if (likely(!proto_xtr_enable))
1446 offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1447 if (unlikely(offset == -1)) {
1449 "Protocol extraction metadata is disabled in mbuf with error %d",
1455 "Protocol extraction metadata offset in mbuf is : %d",
1457 rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1459 for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1460 ol_flag = &ice_proto_xtr_ol_flag_params[i];
1462 if (!ol_flag->required)
1465 offset = rte_mbuf_dynflag_register(&ol_flag->param);
1466 if (unlikely(offset == -1)) {
1468 "Protocol extraction offload '%s' failed to register with error %d",
1469 ol_flag->param.name, -rte_errno);
1471 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1476 "Protocol extraction offload '%s' offset in mbuf is : %d",
1477 ol_flag->param.name, offset);
1478 *ol_flag->ol_flag = 1ULL << offset;
1482 /* Initialize SW parameters of PF */
1484 ice_pf_sw_init(struct rte_eth_dev *dev)
1486 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1487 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1490 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1491 hw->func_caps.common_cap.num_rxq);
1493 pf->lan_nb_qps = pf->lan_nb_qp_max;
1495 ice_init_proto_xtr(dev);
1497 if (hw->func_caps.fd_fltr_guar > 0 ||
1498 hw->func_caps.fd_fltr_best_effort > 0) {
1499 pf->flags |= ICE_FLAG_FDIR;
1500 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1501 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1503 pf->fdir_nb_qps = 0;
1505 pf->fdir_qp_offset = 0;
1511 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1513 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1514 struct ice_vsi *vsi = NULL;
1515 struct ice_vsi_ctx vsi_ctx;
1517 struct rte_ether_addr broadcast = {
1518 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1519 struct rte_ether_addr mac_addr;
1520 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1521 uint8_t tc_bitmap = 0x1;
1524 /* hw->num_lports = 1 in NIC mode */
1525 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1529 vsi->idx = pf->next_vsi_idx;
1532 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1533 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1534 vsi->vlan_anti_spoof_on = 0;
1535 vsi->vlan_filter_on = 1;
1536 TAILQ_INIT(&vsi->mac_list);
1537 TAILQ_INIT(&vsi->vlan_list);
1539 /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1540 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1541 ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1542 hw->func_caps.common_cap.rss_table_size;
1543 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1545 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1548 vsi->nb_qps = pf->lan_nb_qps;
1549 vsi->base_queue = 1;
1550 ice_vsi_config_default_rss(&vsi_ctx.info);
1551 vsi_ctx.alloc_from_pool = true;
1552 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1553 /* switch_id is queried by get_switch_config aq, which is done
1556 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1557 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1558 /* Allow all untagged or tagged packets */
1559 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1560 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1561 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1562 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1565 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1566 ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1567 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1568 cfg = ICE_AQ_VSI_FD_ENABLE;
1569 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1570 vsi_ctx.info.max_fd_fltr_dedicated =
1571 rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1572 vsi_ctx.info.max_fd_fltr_shared =
1573 rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1575 /* Enable VLAN/UP trip */
1576 ret = ice_vsi_config_tc_queue_mapping(vsi,
1581 "tc queue mapping with vsi failed, "
1589 vsi->nb_qps = pf->fdir_nb_qps;
1590 vsi->base_queue = ICE_FDIR_QUEUE_ID;
1591 vsi_ctx.alloc_from_pool = true;
1592 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1594 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1595 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1596 cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1597 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1598 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1599 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1600 ret = ice_vsi_config_tc_queue_mapping(vsi,
1605 "tc queue mapping with vsi failed, "
1612 /* for other types of VSI */
1613 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1617 /* VF has MSIX interrupt in VF range, don't allocate here */
1618 if (type == ICE_VSI_PF) {
1619 ret = ice_res_pool_alloc(&pf->msix_pool,
1620 RTE_MIN(vsi->nb_qps,
1621 RTE_MAX_RXTX_INTR_VEC_ID));
1623 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1626 vsi->msix_intr = ret;
1627 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1628 } else if (type == ICE_VSI_CTRL) {
1629 ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1631 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1634 vsi->msix_intr = ret;
1640 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1641 if (ret != ICE_SUCCESS) {
1642 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1645 /* store vsi information is SW structure */
1646 vsi->vsi_id = vsi_ctx.vsi_num;
1647 vsi->info = vsi_ctx.info;
1648 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1649 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1651 if (type == ICE_VSI_PF) {
1652 /* MAC configuration */
1653 rte_ether_addr_copy((struct rte_ether_addr *)
1654 hw->port_info->mac.perm_addr,
1657 rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1658 ret = ice_add_mac_filter(vsi, &mac_addr);
1659 if (ret != ICE_SUCCESS)
1660 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1662 rte_ether_addr_copy(&broadcast, &mac_addr);
1663 ret = ice_add_mac_filter(vsi, &mac_addr);
1664 if (ret != ICE_SUCCESS)
1665 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1668 /* At the beginning, only TC0. */
1669 /* What we need here is the maximam number of the TX queues.
1670 * Currently vsi->nb_qps means it.
1671 * Correct it if any change.
1673 max_txqs[0] = vsi->nb_qps;
1674 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1675 tc_bitmap, max_txqs);
1676 if (ret != ICE_SUCCESS)
1677 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1687 ice_send_driver_ver(struct ice_hw *hw)
1689 struct ice_driver_ver dv;
1691 /* we don't have driver version use 0 for dummy */
1695 dv.subbuild_ver = 0;
1696 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1698 return ice_aq_send_driver_ver(hw, &dv, NULL);
1702 ice_pf_setup(struct ice_pf *pf)
1704 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1705 struct ice_vsi *vsi;
1708 /* Clear all stats counters */
1709 pf->offset_loaded = false;
1710 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1711 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1712 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1713 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1715 /* force guaranteed filter pool for PF */
1716 ice_alloc_fd_guar_item(hw, &unused,
1717 hw->func_caps.fd_fltr_guar);
1718 /* force shared filter pool for PF */
1719 ice_alloc_fd_shrd_item(hw, &unused,
1720 hw->func_caps.fd_fltr_best_effort);
1722 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1724 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1733 /* PCIe configuration space setting */
1734 #define PCI_CFG_SPACE_SIZE 256
1735 #define PCI_CFG_SPACE_EXP_SIZE 4096
1736 #define PCI_EXT_CAP_ID(header) (int)((header) & 0x0000ffff)
1737 #define PCI_EXT_CAP_NEXT(header) (((header) >> 20) & 0xffc)
1738 #define PCI_EXT_CAP_ID_DSN 0x03
1741 ice_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
1745 int pos = PCI_CFG_SPACE_SIZE;
1747 /* minimum 8 bytes per capability */
1748 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1750 if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1751 PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1756 * If we have no capabilities, this is indicated by cap ID,
1757 * cap version and next pointer all being 0.
1763 if (PCI_EXT_CAP_ID(header) == cap)
1766 pos = PCI_EXT_CAP_NEXT(header);
1768 if (pos < PCI_CFG_SPACE_SIZE)
1771 if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1772 PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1781 * Extract device serial number from PCIe Configuration Space and
1782 * determine the pkg file path according to the DSN.
1785 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
1788 char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1789 uint32_t dsn_low, dsn_high;
1790 memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1792 pos = ice_pci_find_next_ext_capability(pci_dev, PCI_EXT_CAP_ID_DSN);
1795 rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4);
1796 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8);
1797 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1798 "ice-%08x%08x.pkg", dsn_high, dsn_low);
1800 PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
1804 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1805 ICE_MAX_PKG_FILENAME_SIZE);
1806 if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1809 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1810 ICE_MAX_PKG_FILENAME_SIZE);
1811 if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1815 strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1816 if (!access(pkg_file, 0))
1818 strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1823 ice_load_pkg_type(struct ice_hw *hw)
1825 enum ice_pkg_type package_type;
1827 /* store the activated package type (OS default or Comms) */
1828 if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1830 package_type = ICE_PKG_TYPE_OS_DEFAULT;
1831 else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1833 package_type = ICE_PKG_TYPE_COMMS;
1835 package_type = ICE_PKG_TYPE_UNKNOWN;
1837 PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s",
1838 hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1839 hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1840 hw->active_pkg_name);
1842 return package_type;
1845 static int ice_load_pkg(struct rte_eth_dev *dev)
1847 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1848 char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1854 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1855 struct ice_adapter *ad =
1856 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1858 ice_pkg_file_search_path(pci_dev, pkg_file);
1860 file = fopen(pkg_file, "rb");
1862 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1866 err = stat(pkg_file, &fstat);
1868 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1873 buf_len = fstat.st_size;
1874 buf = rte_malloc(NULL, buf_len, 0);
1877 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1883 err = fread(buf, buf_len, 1, file);
1885 PMD_INIT_LOG(ERR, "failed to read package data\n");
1893 err = ice_copy_and_init_pkg(hw, buf, buf_len);
1895 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1899 /* store the loaded pkg type info */
1900 ad->active_pkg_type = ice_load_pkg_type(hw);
1902 err = ice_init_hw_tbls(hw);
1904 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1905 goto fail_init_tbls;
1911 rte_free(hw->pkg_copy);
1918 ice_base_queue_get(struct ice_pf *pf)
1921 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1923 reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1924 if (reg & PFLAN_RX_QALLOC_VALID_M) {
1925 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1927 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1933 parse_bool(const char *key, const char *value, void *args)
1935 int *i = (int *)args;
1939 num = strtoul(value, &end, 10);
1941 if (num != 0 && num != 1) {
1942 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1943 "value must be 0 or 1",
1952 static int ice_parse_devargs(struct rte_eth_dev *dev)
1954 struct ice_adapter *ad =
1955 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1956 struct rte_devargs *devargs = dev->device->devargs;
1957 struct rte_kvargs *kvlist;
1960 if (devargs == NULL)
1963 kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1964 if (kvlist == NULL) {
1965 PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1969 ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1970 memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1971 sizeof(ad->devargs.proto_xtr));
1973 ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1974 &handle_proto_xtr_arg, &ad->devargs);
1978 ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1979 &parse_bool, &ad->devargs.safe_mode_support);
1983 ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1984 &parse_bool, &ad->devargs.pipe_mode_support);
1988 ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG,
1989 &parse_bool, &ad->devargs.flow_mark_support);
1994 rte_kvargs_free(kvlist);
1998 /* Forward LLDP packets to default VSI by set switch rules */
2000 ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on)
2002 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2003 struct ice_fltr_list_entry *s_list_itr = NULL;
2004 struct LIST_HEAD_TYPE list_head;
2007 INIT_LIST_HEAD(&list_head);
2009 s_list_itr = (struct ice_fltr_list_entry *)
2010 ice_malloc(hw, sizeof(*s_list_itr));
2013 s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2014 s_list_itr->fltr_info.vsi_handle = vsi->idx;
2015 s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
2016 RTE_ETHER_TYPE_LLDP;
2017 s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2018 s_list_itr->fltr_info.flag = ICE_FLTR_RX;
2019 s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
2020 LIST_ADD(&s_list_itr->list_entry, &list_head);
2022 ret = ice_add_eth_mac(hw, &list_head);
2024 ret = ice_remove_eth_mac(hw, &list_head);
2026 rte_free(s_list_itr);
2030 static enum ice_status
2031 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
2032 uint16_t num, uint16_t desc_id,
2033 uint16_t *prof_buf, uint16_t *num_prof)
2035 struct ice_aqc_get_allocd_res_desc_resp *resp_buf;
2038 bool res_shared = 1;
2039 struct ice_aq_desc aq_desc;
2040 struct ice_sq_cd *cd = NULL;
2041 struct ice_aqc_get_allocd_res_desc *cmd =
2042 &aq_desc.params.get_res_desc;
2044 buf_len = sizeof(resp_buf->elem) * num;
2045 resp_buf = ice_malloc(hw, buf_len);
2049 ice_fill_dflt_direct_cmd_desc(&aq_desc,
2050 ice_aqc_opc_get_allocd_res_desc);
2052 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2053 ICE_AQC_RES_TYPE_M) | (res_shared ?
2054 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2055 cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
2057 ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
2059 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
2063 ice_memcpy(prof_buf, resp_buf->elem, sizeof(resp_buf->elem) *
2064 (*num_prof), ICE_NONDMA_TO_NONDMA);
2071 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
2075 uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
2076 uint16_t first_desc = 1;
2077 uint16_t num_prof = 0;
2079 ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
2080 first_desc, prof_buf, &num_prof);
2082 PMD_INIT_LOG(ERR, "Failed to get fxp resource");
2086 for (prof_id = 0; prof_id < num_prof; prof_id++) {
2087 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
2089 PMD_INIT_LOG(ERR, "Failed to free fxp resource");
2097 ice_reset_fxp_resource(struct ice_hw *hw)
2101 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
2103 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
2107 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
2109 PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
2117 ice_rss_ctx_init(struct ice_pf *pf)
2119 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
2120 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
2122 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2123 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2125 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2126 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2130 ice_dev_init(struct rte_eth_dev *dev)
2132 struct rte_pci_device *pci_dev;
2133 struct rte_intr_handle *intr_handle;
2134 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2135 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2136 struct ice_adapter *ad =
2137 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2138 struct ice_vsi *vsi;
2141 dev->dev_ops = &ice_eth_dev_ops;
2142 dev->rx_pkt_burst = ice_recv_pkts;
2143 dev->tx_pkt_burst = ice_xmit_pkts;
2144 dev->tx_pkt_prepare = ice_prep_pkts;
2146 /* for secondary processes, we don't initialise any further as primary
2147 * has already done this work.
2149 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2150 ice_set_rx_function(dev);
2151 ice_set_tx_function(dev);
2155 ice_set_default_ptype_table(dev);
2156 pci_dev = RTE_DEV_TO_PCI(dev->device);
2157 intr_handle = &pci_dev->intr_handle;
2159 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2160 pf->adapter->eth_dev = dev;
2161 pf->dev_data = dev->data;
2162 hw->back = pf->adapter;
2163 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2164 hw->vendor_id = pci_dev->id.vendor_id;
2165 hw->device_id = pci_dev->id.device_id;
2166 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2167 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2168 hw->bus.device = pci_dev->addr.devid;
2169 hw->bus.func = pci_dev->addr.function;
2171 ret = ice_parse_devargs(dev);
2173 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2177 ice_init_controlq_parameter(hw);
2179 ret = ice_init_hw(hw);
2181 PMD_INIT_LOG(ERR, "Failed to initialize HW");
2185 ret = ice_load_pkg(dev);
2187 if (ad->devargs.safe_mode_support == 0) {
2188 PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2189 "Use safe-mode-support=1 to enter Safe Mode");
2193 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2194 "Entering Safe Mode");
2195 ad->is_safe_mode = 1;
2198 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2199 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2200 hw->api_maj_ver, hw->api_min_ver);
2202 ice_pf_sw_init(dev);
2203 ret = ice_init_mac_address(dev);
2205 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2209 /* Pass the information to the rte_eth_dev_close() that it should also
2210 * release the private port resources.
2212 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2214 ret = ice_res_pool_init(&pf->msix_pool, 1,
2215 hw->func_caps.common_cap.num_msix_vectors - 1);
2217 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2218 goto err_msix_pool_init;
2221 ret = ice_pf_setup(pf);
2223 PMD_INIT_LOG(ERR, "Failed to setup PF");
2227 ret = ice_send_driver_ver(hw);
2229 PMD_INIT_LOG(ERR, "Failed to send driver version");
2235 /* Disable double vlan by default */
2236 ice_vsi_config_double_vlan(vsi, false);
2238 ret = ice_aq_stop_lldp(hw, true, false, NULL);
2239 if (ret != ICE_SUCCESS)
2240 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2241 ret = ice_init_dcb(hw, true);
2242 if (ret != ICE_SUCCESS)
2243 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2244 /* Forward LLDP packets to default VSI */
2245 ret = ice_vsi_config_sw_lldp(vsi, true);
2246 if (ret != ICE_SUCCESS)
2247 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2248 /* register callback func to eal lib */
2249 rte_intr_callback_register(intr_handle,
2250 ice_interrupt_handler, dev);
2252 ice_pf_enable_irq0(hw);
2254 /* enable uio intr after callback register */
2255 rte_intr_enable(intr_handle);
2257 /* get base queue pairs index in the device */
2258 ice_base_queue_get(pf);
2260 /* Initialize RSS context for gtpu_eh */
2261 ice_rss_ctx_init(pf);
2263 if (!ad->is_safe_mode) {
2264 ret = ice_flow_init(ad);
2266 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2271 ret = ice_reset_fxp_resource(hw);
2273 PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2280 ice_res_pool_destroy(&pf->msix_pool);
2282 rte_free(dev->data->mac_addrs);
2283 dev->data->mac_addrs = NULL;
2285 ice_sched_cleanup_all(hw);
2286 rte_free(hw->port_info);
2287 ice_shutdown_all_ctrlq(hw);
2288 rte_free(pf->proto_xtr);
2294 ice_release_vsi(struct ice_vsi *vsi)
2297 struct ice_vsi_ctx vsi_ctx;
2298 enum ice_status ret;
2304 hw = ICE_VSI_TO_HW(vsi);
2306 ice_remove_all_mac_vlan_filters(vsi);
2308 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2310 vsi_ctx.vsi_num = vsi->vsi_id;
2311 vsi_ctx.info = vsi->info;
2312 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2313 if (ret != ICE_SUCCESS) {
2314 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2318 rte_free(vsi->rss_lut);
2319 rte_free(vsi->rss_key);
2325 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2327 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2328 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2329 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2330 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2331 uint16_t msix_intr, i;
2333 /* disable interrupt and also clear all the exist config */
2334 for (i = 0; i < vsi->nb_qps; i++) {
2335 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2336 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2340 if (rte_intr_allow_others(intr_handle))
2342 for (i = 0; i < vsi->nb_msix; i++) {
2343 msix_intr = vsi->msix_intr + i;
2344 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2345 GLINT_DYN_CTL_WB_ON_ITR_M);
2349 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2353 ice_dev_stop(struct rte_eth_dev *dev)
2355 struct rte_eth_dev_data *data = dev->data;
2356 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2357 struct ice_vsi *main_vsi = pf->main_vsi;
2358 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2359 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2362 /* avoid stopping again */
2363 if (pf->adapter_stopped)
2366 /* stop and clear all Rx queues */
2367 for (i = 0; i < data->nb_rx_queues; i++)
2368 ice_rx_queue_stop(dev, i);
2370 /* stop and clear all Tx queues */
2371 for (i = 0; i < data->nb_tx_queues; i++)
2372 ice_tx_queue_stop(dev, i);
2374 /* disable all queue interrupts */
2375 ice_vsi_disable_queues_intr(main_vsi);
2377 if (pf->init_link_up)
2378 ice_dev_set_link_up(dev);
2380 ice_dev_set_link_down(dev);
2382 /* Clean datapath event and queue/vec mapping */
2383 rte_intr_efd_disable(intr_handle);
2384 if (intr_handle->intr_vec) {
2385 rte_free(intr_handle->intr_vec);
2386 intr_handle->intr_vec = NULL;
2389 pf->adapter_stopped = true;
2393 ice_dev_close(struct rte_eth_dev *dev)
2395 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2396 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2397 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2398 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2399 struct ice_adapter *ad =
2400 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2402 /* Since stop will make link down, then the link event will be
2403 * triggered, disable the irq firstly to avoid the port_infoe etc
2404 * resources deallocation causing the interrupt service thread
2407 ice_pf_disable_irq0(hw);
2411 if (!ad->is_safe_mode)
2412 ice_flow_uninit(ad);
2414 /* release all queue resource */
2415 ice_free_queues(dev);
2417 ice_res_pool_destroy(&pf->msix_pool);
2418 ice_release_vsi(pf->main_vsi);
2419 ice_sched_cleanup_all(hw);
2420 ice_free_hw_tbls(hw);
2421 rte_free(hw->port_info);
2422 hw->port_info = NULL;
2423 ice_shutdown_all_ctrlq(hw);
2424 rte_free(pf->proto_xtr);
2425 pf->proto_xtr = NULL;
2427 dev->dev_ops = NULL;
2428 dev->rx_pkt_burst = NULL;
2429 dev->tx_pkt_burst = NULL;
2431 rte_free(dev->data->mac_addrs);
2432 dev->data->mac_addrs = NULL;
2434 /* disable uio intr before callback unregister */
2435 rte_intr_disable(intr_handle);
2437 /* unregister callback func from eal lib */
2438 rte_intr_callback_unregister(intr_handle,
2439 ice_interrupt_handler, dev);
2443 ice_dev_uninit(struct rte_eth_dev *dev)
2451 ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
2453 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2454 struct ice_vsi *vsi = pf->main_vsi;
2456 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
2457 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2458 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2459 pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr;
2460 pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld;
2461 pf->gtpu_hash_ctx.ipv4_udp.symm = symm;
2462 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2463 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2464 pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr;
2465 pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld;
2466 pf->gtpu_hash_ctx.ipv6_udp.symm = symm;
2467 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2468 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2469 pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr;
2470 pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld;
2471 pf->gtpu_hash_ctx.ipv4_tcp.symm = symm;
2472 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2473 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2474 pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr;
2475 pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld;
2476 pf->gtpu_hash_ctx.ipv6_tcp.symm = symm;
2477 } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
2478 pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr;
2479 pf->gtpu_hash_ctx.ipv4.hash_fld = fld;
2480 pf->gtpu_hash_ctx.ipv4.symm = symm;
2481 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2482 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2483 } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
2484 pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr;
2485 pf->gtpu_hash_ctx.ipv6.hash_fld = fld;
2486 pf->gtpu_hash_ctx.ipv6.symm = symm;
2487 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2488 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2492 if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
2493 ICE_FLOW_SEG_HDR_GTPU_UP)) {
2494 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2495 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2496 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
2497 ice_add_rss_cfg(hw, vsi->idx,
2498 pf->gtpu_hash_ctx.ipv4.hash_fld,
2499 pf->gtpu_hash_ctx.ipv4.pkt_hdr,
2500 pf->gtpu_hash_ctx.ipv4.symm);
2501 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
2503 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2504 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2505 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
2506 ice_add_rss_cfg(hw, vsi->idx,
2507 pf->gtpu_hash_ctx.ipv6.hash_fld,
2508 pf->gtpu_hash_ctx.ipv6.pkt_hdr,
2509 pf->gtpu_hash_ctx.ipv6.symm);
2510 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
2512 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2513 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2514 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
2515 ice_add_rss_cfg(hw, vsi->idx,
2516 pf->gtpu_hash_ctx.ipv4.hash_fld,
2517 pf->gtpu_hash_ctx.ipv4.pkt_hdr,
2518 pf->gtpu_hash_ctx.ipv4.symm);
2519 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
2521 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2522 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2523 if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
2524 ice_add_rss_cfg(hw, vsi->idx,
2525 pf->gtpu_hash_ctx.ipv6.hash_fld,
2526 pf->gtpu_hash_ctx.ipv6.pkt_hdr,
2527 pf->gtpu_hash_ctx.ipv6.symm);
2528 ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
2537 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2539 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2540 struct ice_vsi *vsi = pf->main_vsi;
2542 if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
2543 ICE_FLOW_SEG_HDR_GTPU_UP)) {
2544 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2545 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2546 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
2547 ice_rem_rss_cfg(hw, vsi->idx,
2548 pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
2549 pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
2550 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2553 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
2554 ice_rem_rss_cfg(hw, vsi->idx,
2555 pf->gtpu_hash_ctx.ipv4.hash_fld,
2556 pf->gtpu_hash_ctx.ipv4.pkt_hdr);
2557 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
2559 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2560 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2561 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
2562 ice_rem_rss_cfg(hw, vsi->idx,
2563 pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
2564 pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
2565 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2568 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
2569 ice_rem_rss_cfg(hw, vsi->idx,
2570 pf->gtpu_hash_ctx.ipv6.hash_fld,
2571 pf->gtpu_hash_ctx.ipv6.pkt_hdr);
2572 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
2574 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2575 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2576 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
2577 ice_rem_rss_cfg(hw, vsi->idx,
2578 pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
2579 pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
2580 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2583 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
2584 ice_rem_rss_cfg(hw, vsi->idx,
2585 pf->gtpu_hash_ctx.ipv4.hash_fld,
2586 pf->gtpu_hash_ctx.ipv4.pkt_hdr);
2587 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
2589 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2590 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2591 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
2592 ice_rem_rss_cfg(hw, vsi->idx,
2593 pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
2594 pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
2595 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2598 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
2599 ice_rem_rss_cfg(hw, vsi->idx,
2600 pf->gtpu_hash_ctx.ipv6.hash_fld,
2601 pf->gtpu_hash_ctx.ipv6.pkt_hdr);
2602 ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
2604 } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
2605 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
2606 ice_rem_rss_cfg(hw, vsi->idx,
2607 pf->gtpu_hash_ctx.ipv4.hash_fld,
2608 pf->gtpu_hash_ctx.ipv4.pkt_hdr);
2609 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
2612 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
2613 ice_rem_rss_cfg(hw, vsi->idx,
2614 pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
2615 pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
2616 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2619 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
2620 ice_rem_rss_cfg(hw, vsi->idx,
2621 pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
2622 pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
2623 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2625 } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
2626 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
2627 ice_rem_rss_cfg(hw, vsi->idx,
2628 pf->gtpu_hash_ctx.ipv6.hash_fld,
2629 pf->gtpu_hash_ctx.ipv6.pkt_hdr);
2630 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
2633 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
2634 ice_rem_rss_cfg(hw, vsi->idx,
2635 pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
2636 pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
2637 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2640 if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
2641 ice_rem_rss_cfg(hw, vsi->idx,
2642 pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
2643 pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
2644 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2653 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2655 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
2656 if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2657 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2658 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
2659 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2660 (hdr & ICE_FLOW_SEG_HDR_UDP)) {
2661 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
2662 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
2663 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2664 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
2665 } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
2666 (hdr & ICE_FLOW_SEG_HDR_TCP)) {
2667 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
2668 } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
2669 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
2670 } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
2671 ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
2679 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2680 uint64_t fld, uint32_t hdr)
2682 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2685 ret = ice_rem_rss_cfg(hw, vsi_id, fld, hdr);
2686 if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2687 PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2689 ret = ice_rem_rss_cfg_post(pf, hdr);
2691 PMD_DRV_LOG(ERR, "remove rss cfg post failed\n");
2697 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2698 uint64_t fld, uint32_t hdr, bool symm)
2700 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2703 ret = ice_add_rss_cfg_pre(pf, hdr);
2705 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2707 ret = ice_add_rss_cfg(hw, vsi_id, fld, hdr, symm);
2709 PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2711 ret = ice_add_rss_cfg_post(pf, hdr, fld, symm);
2713 PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2719 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2721 struct ice_vsi *vsi = pf->main_vsi;
2724 /* Configure RSS for IPv4 with src/dst addr as input set */
2725 if (rss_hf & ETH_RSS_IPV4) {
2726 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2727 ICE_FLOW_SEG_HDR_IPV4 |
2728 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2730 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
2734 /* Configure RSS for IPv6 with src/dst addr as input set */
2735 if (rss_hf & ETH_RSS_IPV6) {
2736 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2737 ICE_FLOW_SEG_HDR_IPV6 |
2738 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2740 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
2744 /* Configure RSS for udp4 with src/dst addr and port as input set */
2745 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2746 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2747 ICE_FLOW_SEG_HDR_UDP |
2748 ICE_FLOW_SEG_HDR_IPV4 |
2749 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2751 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
2755 /* Configure RSS for udp6 with src/dst addr and port as input set */
2756 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2757 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2758 ICE_FLOW_SEG_HDR_UDP |
2759 ICE_FLOW_SEG_HDR_IPV6 |
2760 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2762 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
2766 /* Configure RSS for tcp4 with src/dst addr and port as input set */
2767 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2768 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2769 ICE_FLOW_SEG_HDR_TCP |
2770 ICE_FLOW_SEG_HDR_IPV4 |
2771 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2773 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
2777 /* Configure RSS for tcp6 with src/dst addr and port as input set */
2778 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2779 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
2780 ICE_FLOW_SEG_HDR_TCP |
2781 ICE_FLOW_SEG_HDR_IPV6 |
2782 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2784 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
2788 /* Configure RSS for sctp4 with src/dst addr and port as input set */
2789 if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2790 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2791 ICE_FLOW_SEG_HDR_SCTP |
2792 ICE_FLOW_SEG_HDR_IPV4 |
2793 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2795 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2799 /* Configure RSS for sctp6 with src/dst addr and port as input set */
2800 if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2801 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2802 ICE_FLOW_SEG_HDR_SCTP |
2803 ICE_FLOW_SEG_HDR_IPV6 |
2804 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2806 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2810 if (rss_hf & ETH_RSS_IPV4) {
2811 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2812 ICE_FLOW_SEG_HDR_GTPU_IP |
2813 ICE_FLOW_SEG_HDR_IPV4 |
2814 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2816 PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d",
2819 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2820 ICE_FLOW_SEG_HDR_GTPU_EH |
2821 ICE_FLOW_SEG_HDR_IPV4 |
2822 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2824 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
2827 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
2828 ICE_FLOW_SEG_HDR_PPPOE |
2829 ICE_FLOW_SEG_HDR_IPV4 |
2830 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2832 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
2836 if (rss_hf & ETH_RSS_IPV6) {
2837 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2838 ICE_FLOW_SEG_HDR_GTPU_IP |
2839 ICE_FLOW_SEG_HDR_IPV6 |
2840 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2842 PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d",
2845 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2846 ICE_FLOW_SEG_HDR_GTPU_EH |
2847 ICE_FLOW_SEG_HDR_IPV6 |
2848 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2850 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
2853 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
2854 ICE_FLOW_SEG_HDR_PPPOE |
2855 ICE_FLOW_SEG_HDR_IPV6 |
2856 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2858 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
2862 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2863 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2864 ICE_FLOW_SEG_HDR_GTPU_IP |
2865 ICE_FLOW_SEG_HDR_UDP |
2866 ICE_FLOW_SEG_HDR_IPV4 |
2867 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2869 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d",
2872 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2873 ICE_FLOW_SEG_HDR_GTPU_EH |
2874 ICE_FLOW_SEG_HDR_UDP |
2875 ICE_FLOW_SEG_HDR_IPV4 |
2876 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2878 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
2881 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
2882 ICE_FLOW_SEG_HDR_PPPOE |
2883 ICE_FLOW_SEG_HDR_UDP |
2884 ICE_FLOW_SEG_HDR_IPV4 |
2885 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2887 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
2891 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2892 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2893 ICE_FLOW_SEG_HDR_GTPU_IP |
2894 ICE_FLOW_SEG_HDR_UDP |
2895 ICE_FLOW_SEG_HDR_IPV6 |
2896 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2898 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d",
2901 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2902 ICE_FLOW_SEG_HDR_GTPU_EH |
2903 ICE_FLOW_SEG_HDR_UDP |
2904 ICE_FLOW_SEG_HDR_IPV6 |
2905 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2907 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
2910 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
2911 ICE_FLOW_SEG_HDR_PPPOE |
2912 ICE_FLOW_SEG_HDR_UDP |
2913 ICE_FLOW_SEG_HDR_IPV6 |
2914 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2916 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
2920 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2921 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2922 ICE_FLOW_SEG_HDR_GTPU_IP |
2923 ICE_FLOW_SEG_HDR_TCP |
2924 ICE_FLOW_SEG_HDR_IPV4 |
2925 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2927 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d",
2930 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2931 ICE_FLOW_SEG_HDR_GTPU_EH |
2932 ICE_FLOW_SEG_HDR_TCP |
2933 ICE_FLOW_SEG_HDR_IPV4 |
2934 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2936 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
2939 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
2940 ICE_FLOW_SEG_HDR_PPPOE |
2941 ICE_FLOW_SEG_HDR_TCP |
2942 ICE_FLOW_SEG_HDR_IPV4 |
2943 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2945 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
2949 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2950 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
2951 ICE_FLOW_SEG_HDR_GTPU_IP |
2952 ICE_FLOW_SEG_HDR_TCP |
2953 ICE_FLOW_SEG_HDR_IPV6 |
2954 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2956 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d",
2959 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
2960 ICE_FLOW_SEG_HDR_GTPU_EH |
2961 ICE_FLOW_SEG_HDR_TCP |
2962 ICE_FLOW_SEG_HDR_IPV6 |
2963 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2965 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
2968 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
2969 ICE_FLOW_SEG_HDR_PPPOE |
2970 ICE_FLOW_SEG_HDR_TCP |
2971 ICE_FLOW_SEG_HDR_IPV6 |
2972 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2974 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
2978 if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2979 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
2980 ICE_FLOW_SEG_HDR_GTPU_IP |
2981 ICE_FLOW_SEG_HDR_SCTP |
2982 ICE_FLOW_SEG_HDR_IPV4 |
2983 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2985 PMD_DRV_LOG(ERR, "%s GTPU_IPV4_SCTP rss flow fail %d",
2988 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
2989 ICE_FLOW_SEG_HDR_GTPU_EH |
2990 ICE_FLOW_SEG_HDR_SCTP |
2991 ICE_FLOW_SEG_HDR_IPV4 |
2992 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2994 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_SCTP rss flow fail %d",
2998 if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2999 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
3000 ICE_FLOW_SEG_HDR_GTPU_IP |
3001 ICE_FLOW_SEG_HDR_SCTP |
3002 ICE_FLOW_SEG_HDR_IPV6 |
3003 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3005 PMD_DRV_LOG(ERR, "%s GTPU_IPV6_SCTP rss flow fail %d",
3008 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
3009 ICE_FLOW_SEG_HDR_GTPU_EH |
3010 ICE_FLOW_SEG_HDR_SCTP |
3011 ICE_FLOW_SEG_HDR_IPV6 |
3012 ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
3014 PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_SCTP rss flow fail %d",
3019 static int ice_init_rss(struct ice_pf *pf)
3021 struct ice_hw *hw = ICE_PF_TO_HW(pf);
3022 struct ice_vsi *vsi = pf->main_vsi;
3023 struct rte_eth_dev *dev = pf->adapter->eth_dev;
3024 struct rte_eth_rss_conf *rss_conf;
3025 struct ice_aqc_get_set_rss_keys key;
3028 bool is_safe_mode = pf->adapter->is_safe_mode;
3031 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
3032 nb_q = dev->data->nb_rx_queues;
3033 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3034 vsi->rss_lut_size = pf->hash_lut_size;
3037 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3041 if (!vsi->rss_key) {
3042 vsi->rss_key = rte_zmalloc(NULL,
3043 vsi->rss_key_size, 0);
3044 if (vsi->rss_key == NULL) {
3045 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3049 if (!vsi->rss_lut) {
3050 vsi->rss_lut = rte_zmalloc(NULL,
3051 vsi->rss_lut_size, 0);
3052 if (vsi->rss_lut == NULL) {
3053 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3054 rte_free(vsi->rss_key);
3055 vsi->rss_key = NULL;
3059 /* configure RSS key */
3060 if (!rss_conf->rss_key) {
3061 /* Calculate the default hash key */
3062 for (i = 0; i <= vsi->rss_key_size; i++)
3063 vsi->rss_key[i] = (uint8_t)rte_rand();
3065 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3066 RTE_MIN(rss_conf->rss_key_len,
3067 vsi->rss_key_size));
3069 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3070 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3074 /* init RSS LUT table */
3075 for (i = 0; i < vsi->rss_lut_size; i++)
3076 vsi->rss_lut[i] = i % nb_q;
3078 ret = ice_aq_set_rss_lut(hw, vsi->idx,
3079 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
3080 vsi->rss_lut, vsi->rss_lut_size);
3084 /* Enable registers for symmetric_toeplitz function. */
3085 reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3086 reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3087 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3088 ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3090 /* RSS hash configuration */
3091 ice_rss_hash_set(pf, rss_conf->rss_hf);
3095 rte_free(vsi->rss_key);
3096 vsi->rss_key = NULL;
3097 rte_free(vsi->rss_lut);
3098 vsi->rss_lut = NULL;
3103 ice_dev_configure(struct rte_eth_dev *dev)
3105 struct ice_adapter *ad =
3106 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3107 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3110 /* Initialize to TRUE. If any of Rx queues doesn't meet the
3111 * bulk allocation or vector Rx preconditions we will reset it.
3113 ad->rx_bulk_alloc_allowed = true;
3114 ad->tx_simple_allowed = true;
3116 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
3117 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
3119 ret = ice_init_rss(pf);
3121 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3129 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3130 int base_queue, int nb_queue)
3132 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3133 uint32_t val, val_tx;
3136 for (i = 0; i < nb_queue; i++) {
3138 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3139 (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3140 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3141 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3143 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3144 base_queue + i, msix_vect);
3145 /* set ITR0 value */
3146 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
3147 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3148 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3153 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3155 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3156 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3157 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3158 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3159 uint16_t msix_vect = vsi->msix_intr;
3160 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
3161 uint16_t queue_idx = 0;
3165 /* clear Rx/Tx queue interrupt */
3166 for (i = 0; i < vsi->nb_used_qps; i++) {
3167 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3168 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3171 /* PF bind interrupt */
3172 if (rte_intr_dp_is_en(intr_handle)) {
3177 for (i = 0; i < vsi->nb_used_qps; i++) {
3179 if (!rte_intr_allow_others(intr_handle))
3180 msix_vect = ICE_MISC_VEC_ID;
3182 /* uio mapping all queue to one msix_vect */
3183 __vsi_queues_bind_intr(vsi, msix_vect,
3184 vsi->base_queue + i,
3185 vsi->nb_used_qps - i);
3187 for (; !!record && i < vsi->nb_used_qps; i++)
3188 intr_handle->intr_vec[queue_idx + i] =
3193 /* vfio 1:1 queue/msix_vect mapping */
3194 __vsi_queues_bind_intr(vsi, msix_vect,
3195 vsi->base_queue + i, 1);
3198 intr_handle->intr_vec[queue_idx + i] = msix_vect;
3206 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3208 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3209 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3210 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3211 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3212 uint16_t msix_intr, i;
3214 if (rte_intr_allow_others(intr_handle))
3215 for (i = 0; i < vsi->nb_used_qps; i++) {
3216 msix_intr = vsi->msix_intr + i;
3217 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3218 GLINT_DYN_CTL_INTENA_M |
3219 GLINT_DYN_CTL_CLEARPBA_M |
3220 GLINT_DYN_CTL_ITR_INDX_M |
3221 GLINT_DYN_CTL_WB_ON_ITR_M);
3224 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3225 GLINT_DYN_CTL_INTENA_M |
3226 GLINT_DYN_CTL_CLEARPBA_M |
3227 GLINT_DYN_CTL_ITR_INDX_M |
3228 GLINT_DYN_CTL_WB_ON_ITR_M);
3232 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3234 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3235 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3236 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3237 struct ice_vsi *vsi = pf->main_vsi;
3238 uint32_t intr_vector = 0;
3240 rte_intr_disable(intr_handle);
3242 /* check and configure queue intr-vector mapping */
3243 if ((rte_intr_cap_multiple(intr_handle) ||
3244 !RTE_ETH_DEV_SRIOV(dev).active) &&
3245 dev->data->dev_conf.intr_conf.rxq != 0) {
3246 intr_vector = dev->data->nb_rx_queues;
3247 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3248 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3249 ICE_MAX_INTR_QUEUE_NUM);
3252 if (rte_intr_efd_enable(intr_handle, intr_vector))
3256 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3257 intr_handle->intr_vec =
3258 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
3260 if (!intr_handle->intr_vec) {
3262 "Failed to allocate %d rx_queues intr_vec",
3263 dev->data->nb_rx_queues);
3268 /* Map queues with MSIX interrupt */
3269 vsi->nb_used_qps = dev->data->nb_rx_queues;
3270 ice_vsi_queues_bind_intr(vsi);
3272 /* Enable interrupts for all the queues */
3273 ice_vsi_enable_queues_intr(vsi);
3275 rte_intr_enable(intr_handle);
3281 ice_get_init_link_status(struct rte_eth_dev *dev)
3283 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3284 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3285 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3286 struct ice_link_status link_status;
3289 ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3290 &link_status, NULL);
3291 if (ret != ICE_SUCCESS) {
3292 PMD_DRV_LOG(ERR, "Failed to get link info");
3293 pf->init_link_up = false;
3297 if (link_status.link_info & ICE_AQ_LINK_UP)
3298 pf->init_link_up = true;
3302 ice_dev_start(struct rte_eth_dev *dev)
3304 struct rte_eth_dev_data *data = dev->data;
3305 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3306 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3307 struct ice_vsi *vsi = pf->main_vsi;
3308 uint16_t nb_rxq = 0;
3310 uint16_t max_frame_size;
3313 /* program Tx queues' context in hardware */
3314 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3315 ret = ice_tx_queue_start(dev, nb_txq);
3317 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3322 /* program Rx queues' context in hardware*/
3323 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3324 ret = ice_rx_queue_start(dev, nb_rxq);
3326 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3331 ice_set_rx_function(dev);
3332 ice_set_tx_function(dev);
3334 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
3335 ETH_VLAN_EXTEND_MASK;
3336 ret = ice_vlan_offload_set(dev, mask);
3338 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3342 /* enable Rx interrput and mapping Rx queue to interrupt vector */
3343 if (ice_rxq_intr_setup(dev))
3346 /* Enable receiving broadcast packets and transmitting packets */
3347 ret = ice_set_vsi_promisc(hw, vsi->idx,
3348 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3349 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3351 if (ret != ICE_SUCCESS)
3352 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3354 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3355 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3356 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3357 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3358 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3359 ICE_AQ_LINK_EVENT_AN_COMPLETED |
3360 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3362 if (ret != ICE_SUCCESS)
3363 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3365 ice_get_init_link_status(dev);
3367 ice_dev_set_link_up(dev);
3369 /* Call get_link_info aq commond to enable/disable LSE */
3370 ice_link_update(dev, 0);
3372 pf->adapter_stopped = false;
3374 /* Set the max frame size to default value*/
3375 max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3376 pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3379 /* Set the max frame size to HW*/
3380 ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3384 /* stop the started queues if failed to start all queues */
3386 for (i = 0; i < nb_rxq; i++)
3387 ice_rx_queue_stop(dev, i);
3389 for (i = 0; i < nb_txq; i++)
3390 ice_tx_queue_stop(dev, i);
3396 ice_dev_reset(struct rte_eth_dev *dev)
3400 if (dev->data->sriov.active)
3403 ret = ice_dev_uninit(dev);
3405 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3409 ret = ice_dev_init(dev);
3411 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3419 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3421 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3422 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3423 struct ice_vsi *vsi = pf->main_vsi;
3424 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3425 bool is_safe_mode = pf->adapter->is_safe_mode;
3429 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3430 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3431 dev_info->max_rx_queues = vsi->nb_qps;
3432 dev_info->max_tx_queues = vsi->nb_qps;
3433 dev_info->max_mac_addrs = vsi->max_macaddrs;
3434 dev_info->max_vfs = pci_dev->max_vfs;
3435 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3436 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3438 dev_info->rx_offload_capa =
3439 DEV_RX_OFFLOAD_VLAN_STRIP |
3440 DEV_RX_OFFLOAD_JUMBO_FRAME |
3441 DEV_RX_OFFLOAD_KEEP_CRC |
3442 DEV_RX_OFFLOAD_SCATTER |
3443 DEV_RX_OFFLOAD_VLAN_FILTER;
3444 dev_info->tx_offload_capa =
3445 DEV_TX_OFFLOAD_VLAN_INSERT |
3446 DEV_TX_OFFLOAD_TCP_TSO |
3447 DEV_TX_OFFLOAD_MULTI_SEGS |
3448 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3449 dev_info->flow_type_rss_offloads = 0;
3451 if (!is_safe_mode) {
3452 dev_info->rx_offload_capa |=
3453 DEV_RX_OFFLOAD_IPV4_CKSUM |
3454 DEV_RX_OFFLOAD_UDP_CKSUM |
3455 DEV_RX_OFFLOAD_TCP_CKSUM |
3456 DEV_RX_OFFLOAD_QINQ_STRIP |
3457 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3458 DEV_RX_OFFLOAD_VLAN_EXTEND |
3459 DEV_RX_OFFLOAD_RSS_HASH;
3460 dev_info->tx_offload_capa |=
3461 DEV_TX_OFFLOAD_QINQ_INSERT |
3462 DEV_TX_OFFLOAD_IPV4_CKSUM |
3463 DEV_TX_OFFLOAD_UDP_CKSUM |
3464 DEV_TX_OFFLOAD_TCP_CKSUM |
3465 DEV_TX_OFFLOAD_SCTP_CKSUM |
3466 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3467 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3468 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3471 dev_info->rx_queue_offload_capa = 0;
3472 dev_info->tx_queue_offload_capa = 0;
3474 dev_info->reta_size = pf->hash_lut_size;
3475 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3477 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3479 .pthresh = ICE_DEFAULT_RX_PTHRESH,
3480 .hthresh = ICE_DEFAULT_RX_HTHRESH,
3481 .wthresh = ICE_DEFAULT_RX_WTHRESH,
3483 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3488 dev_info->default_txconf = (struct rte_eth_txconf) {
3490 .pthresh = ICE_DEFAULT_TX_PTHRESH,
3491 .hthresh = ICE_DEFAULT_TX_HTHRESH,
3492 .wthresh = ICE_DEFAULT_TX_WTHRESH,
3494 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3495 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3499 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3500 .nb_max = ICE_MAX_RING_DESC,
3501 .nb_min = ICE_MIN_RING_DESC,
3502 .nb_align = ICE_ALIGN_RING_DESC,
3505 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3506 .nb_max = ICE_MAX_RING_DESC,
3507 .nb_min = ICE_MIN_RING_DESC,
3508 .nb_align = ICE_ALIGN_RING_DESC,
3511 dev_info->speed_capa = ETH_LINK_SPEED_10M |
3512 ETH_LINK_SPEED_100M |
3514 ETH_LINK_SPEED_2_5G |
3516 ETH_LINK_SPEED_10G |
3517 ETH_LINK_SPEED_20G |
3520 phy_type_low = hw->port_info->phy.phy_type_low;
3521 phy_type_high = hw->port_info->phy.phy_type_high;
3523 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3524 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3526 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3527 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3528 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3530 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3531 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3533 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3534 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3535 dev_info->default_rxportconf.nb_queues = 1;
3536 dev_info->default_txportconf.nb_queues = 1;
3537 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3538 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3544 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3545 struct rte_eth_link *link)
3547 struct rte_eth_link *dst = link;
3548 struct rte_eth_link *src = &dev->data->dev_link;
3550 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3551 *(uint64_t *)src) == 0)
3558 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3559 struct rte_eth_link *link)
3561 struct rte_eth_link *dst = &dev->data->dev_link;
3562 struct rte_eth_link *src = link;
3564 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3565 *(uint64_t *)src) == 0)
3572 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3574 #define CHECK_INTERVAL 100 /* 100ms */
3575 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
3576 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3577 struct ice_link_status link_status;
3578 struct rte_eth_link link, old;
3580 unsigned int rep_cnt = MAX_REPEAT_TIME;
3581 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3583 memset(&link, 0, sizeof(link));
3584 memset(&old, 0, sizeof(old));
3585 memset(&link_status, 0, sizeof(link_status));
3586 ice_atomic_read_link_status(dev, &old);
3589 /* Get link status information from hardware */
3590 status = ice_aq_get_link_info(hw->port_info, enable_lse,
3591 &link_status, NULL);
3592 if (status != ICE_SUCCESS) {
3593 link.link_speed = ETH_SPEED_NUM_100M;
3594 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3595 PMD_DRV_LOG(ERR, "Failed to get link info");
3599 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3600 if (!wait_to_complete || link.link_status)
3603 rte_delay_ms(CHECK_INTERVAL);
3604 } while (--rep_cnt);
3606 if (!link.link_status)
3609 /* Full-duplex operation at all supported speeds */
3610 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3612 /* Parse the link status */
3613 switch (link_status.link_speed) {
3614 case ICE_AQ_LINK_SPEED_10MB:
3615 link.link_speed = ETH_SPEED_NUM_10M;
3617 case ICE_AQ_LINK_SPEED_100MB:
3618 link.link_speed = ETH_SPEED_NUM_100M;
3620 case ICE_AQ_LINK_SPEED_1000MB:
3621 link.link_speed = ETH_SPEED_NUM_1G;
3623 case ICE_AQ_LINK_SPEED_2500MB:
3624 link.link_speed = ETH_SPEED_NUM_2_5G;
3626 case ICE_AQ_LINK_SPEED_5GB:
3627 link.link_speed = ETH_SPEED_NUM_5G;
3629 case ICE_AQ_LINK_SPEED_10GB:
3630 link.link_speed = ETH_SPEED_NUM_10G;
3632 case ICE_AQ_LINK_SPEED_20GB:
3633 link.link_speed = ETH_SPEED_NUM_20G;
3635 case ICE_AQ_LINK_SPEED_25GB:
3636 link.link_speed = ETH_SPEED_NUM_25G;
3638 case ICE_AQ_LINK_SPEED_40GB:
3639 link.link_speed = ETH_SPEED_NUM_40G;
3641 case ICE_AQ_LINK_SPEED_50GB:
3642 link.link_speed = ETH_SPEED_NUM_50G;
3644 case ICE_AQ_LINK_SPEED_100GB:
3645 link.link_speed = ETH_SPEED_NUM_100G;
3647 case ICE_AQ_LINK_SPEED_UNKNOWN:
3649 PMD_DRV_LOG(ERR, "Unknown link speed");
3650 link.link_speed = ETH_SPEED_NUM_NONE;
3654 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3655 ETH_LINK_SPEED_FIXED);
3658 ice_atomic_write_link_status(dev, &link);
3659 if (link.link_status == old.link_status)
3665 /* Force the physical link state by getting the current PHY capabilities from
3666 * hardware and setting the PHY config based on the determined capabilities. If
3667 * link changes, link event will be triggered because both the Enable Automatic
3668 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3670 static enum ice_status
3671 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3673 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3674 struct ice_aqc_get_phy_caps_data *pcaps;
3675 struct ice_port_info *pi;
3676 enum ice_status status;
3678 if (!hw || !hw->port_info)
3679 return ICE_ERR_PARAM;
3683 pcaps = (struct ice_aqc_get_phy_caps_data *)
3684 ice_malloc(hw, sizeof(*pcaps));
3686 return ICE_ERR_NO_MEMORY;
3688 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3693 /* No change in link */
3694 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3695 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3698 cfg.phy_type_low = pcaps->phy_type_low;
3699 cfg.phy_type_high = pcaps->phy_type_high;
3700 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3701 cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3702 cfg.eee_cap = pcaps->eee_cap;
3703 cfg.eeer_value = pcaps->eeer_value;
3704 cfg.link_fec_opt = pcaps->link_fec_options;
3706 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3708 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3710 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3713 ice_free(hw, pcaps);
3718 ice_dev_set_link_up(struct rte_eth_dev *dev)
3720 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3722 return ice_force_phys_link_state(hw, true);
3726 ice_dev_set_link_down(struct rte_eth_dev *dev)
3728 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3730 return ice_force_phys_link_state(hw, false);
3734 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3736 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3737 struct rte_eth_dev_data *dev_data = pf->dev_data;
3738 uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3740 /* check if mtu is within the allowed range */
3741 if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3744 /* mtu setting is forbidden if port is start */
3745 if (dev_data->dev_started) {
3747 "port %d must be stopped before configuration",
3752 if (frame_size > RTE_ETHER_MAX_LEN)
3753 dev_data->dev_conf.rxmode.offloads |=
3754 DEV_RX_OFFLOAD_JUMBO_FRAME;
3756 dev_data->dev_conf.rxmode.offloads &=
3757 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3759 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3764 static int ice_macaddr_set(struct rte_eth_dev *dev,
3765 struct rte_ether_addr *mac_addr)
3767 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3768 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3769 struct ice_vsi *vsi = pf->main_vsi;
3770 struct ice_mac_filter *f;
3774 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3775 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3779 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3780 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3785 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3789 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3790 if (ret != ICE_SUCCESS) {
3791 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3794 ret = ice_add_mac_filter(vsi, mac_addr);
3795 if (ret != ICE_SUCCESS) {
3796 PMD_DRV_LOG(ERR, "Failed to add mac filter");
3799 rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3801 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3802 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3803 if (ret != ICE_SUCCESS)
3804 PMD_DRV_LOG(ERR, "Failed to set manage mac");
3809 /* Add a MAC address, and update filters */
3811 ice_macaddr_add(struct rte_eth_dev *dev,
3812 struct rte_ether_addr *mac_addr,
3813 __rte_unused uint32_t index,
3814 __rte_unused uint32_t pool)
3816 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3817 struct ice_vsi *vsi = pf->main_vsi;
3820 ret = ice_add_mac_filter(vsi, mac_addr);
3821 if (ret != ICE_SUCCESS) {
3822 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3829 /* Remove a MAC address, and update filters */
3831 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3833 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3834 struct ice_vsi *vsi = pf->main_vsi;
3835 struct rte_eth_dev_data *data = dev->data;
3836 struct rte_ether_addr *macaddr;
3839 macaddr = &data->mac_addrs[index];
3840 ret = ice_remove_mac_filter(vsi, macaddr);
3842 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3848 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3850 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3851 struct ice_vsi *vsi = pf->main_vsi;
3854 PMD_INIT_FUNC_TRACE();
3857 ret = ice_add_vlan_filter(vsi, vlan_id);
3859 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3863 ret = ice_remove_vlan_filter(vsi, vlan_id);
3865 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3873 /* Configure vlan filter on or off */
3875 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
3877 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3878 struct ice_vsi_ctx ctxt;
3879 uint8_t sec_flags, sw_flags2;
3882 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3883 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
3884 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3887 vsi->info.sec_flags |= sec_flags;
3888 vsi->info.sw_flags2 |= sw_flags2;
3890 vsi->info.sec_flags &= ~sec_flags;
3891 vsi->info.sw_flags2 &= ~sw_flags2;
3893 vsi->info.sw_id = hw->port_info->sw_id;
3894 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3895 ctxt.info.valid_sections =
3896 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3897 ICE_AQ_VSI_PROP_SECURITY_VALID);
3898 ctxt.vsi_num = vsi->vsi_id;
3900 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3902 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
3903 on ? "enable" : "disable");
3906 vsi->info.valid_sections |=
3907 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3908 ICE_AQ_VSI_PROP_SECURITY_VALID);
3911 /* consist with other drivers, allow untagged packet when vlan filter on */
3913 ret = ice_add_vlan_filter(vsi, 0);
3915 ret = ice_remove_vlan_filter(vsi, 0);
3921 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
3923 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3924 struct ice_vsi_ctx ctxt;
3928 /* Check if it has been already on or off */
3929 if (vsi->info.valid_sections &
3930 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
3932 if ((vsi->info.vlan_flags &
3933 ICE_AQ_VSI_VLAN_EMOD_M) ==
3934 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
3935 return 0; /* already on */
3937 if ((vsi->info.vlan_flags &
3938 ICE_AQ_VSI_VLAN_EMOD_M) ==
3939 ICE_AQ_VSI_VLAN_EMOD_NOTHING)
3940 return 0; /* already off */
3945 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3947 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3948 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
3949 vsi->info.vlan_flags |= vlan_flags;
3950 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3951 ctxt.info.valid_sections =
3952 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3953 ctxt.vsi_num = vsi->vsi_id;
3954 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3956 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3957 on ? "enable" : "disable");
3961 vsi->info.valid_sections |=
3962 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3968 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3970 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3971 struct ice_vsi *vsi = pf->main_vsi;
3972 struct rte_eth_rxmode *rxmode;
3974 rxmode = &dev->data->dev_conf.rxmode;
3975 if (mask & ETH_VLAN_FILTER_MASK) {
3976 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3977 ice_vsi_config_vlan_filter(vsi, true);
3979 ice_vsi_config_vlan_filter(vsi, false);
3982 if (mask & ETH_VLAN_STRIP_MASK) {
3983 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3984 ice_vsi_config_vlan_stripping(vsi, true);
3986 ice_vsi_config_vlan_stripping(vsi, false);
3989 if (mask & ETH_VLAN_EXTEND_MASK) {
3990 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3991 ice_vsi_config_double_vlan(vsi, true);
3993 ice_vsi_config_double_vlan(vsi, false);
4000 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4002 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4003 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4009 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4010 ret = ice_aq_get_rss_lut(hw, vsi->idx,
4011 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
4013 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4017 uint64_t *lut_dw = (uint64_t *)lut;
4018 uint16_t i, lut_size_dw = lut_size / 4;
4020 for (i = 0; i < lut_size_dw; i++)
4021 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4028 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4037 pf = ICE_VSI_TO_PF(vsi);
4038 hw = ICE_VSI_TO_HW(vsi);
4040 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4041 ret = ice_aq_set_rss_lut(hw, vsi->idx,
4042 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
4044 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4048 uint64_t *lut_dw = (uint64_t *)lut;
4049 uint16_t i, lut_size_dw = lut_size / 4;
4051 for (i = 0; i < lut_size_dw; i++)
4052 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4061 ice_rss_reta_update(struct rte_eth_dev *dev,
4062 struct rte_eth_rss_reta_entry64 *reta_conf,
4065 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4066 uint16_t i, lut_size = pf->hash_lut_size;
4067 uint16_t idx, shift;
4071 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4072 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4073 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4075 "The size of hash lookup table configured (%d)"
4076 "doesn't match the number hardware can "
4077 "supported (128, 512, 2048)",
4082 /* It MUST use the current LUT size to get the RSS lookup table,
4083 * otherwise if will fail with -100 error code.
4085 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0);
4087 PMD_DRV_LOG(ERR, "No memory can be allocated");
4090 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4094 for (i = 0; i < reta_size; i++) {
4095 idx = i / RTE_RETA_GROUP_SIZE;
4096 shift = i % RTE_RETA_GROUP_SIZE;
4097 if (reta_conf[idx].mask & (1ULL << shift))
4098 lut[i] = reta_conf[idx].reta[shift];
4100 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4101 if (ret == 0 && lut_size != reta_size) {
4103 "The size of hash lookup table is changed from (%d) to (%d)",
4104 lut_size, reta_size);
4105 pf->hash_lut_size = reta_size;
4115 ice_rss_reta_query(struct rte_eth_dev *dev,
4116 struct rte_eth_rss_reta_entry64 *reta_conf,
4119 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4120 uint16_t i, lut_size = pf->hash_lut_size;
4121 uint16_t idx, shift;
4125 if (reta_size != lut_size) {
4127 "The size of hash lookup table configured (%d)"
4128 "doesn't match the number hardware can "
4130 reta_size, lut_size);
4134 lut = rte_zmalloc(NULL, reta_size, 0);
4136 PMD_DRV_LOG(ERR, "No memory can be allocated");
4140 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4144 for (i = 0; i < reta_size; i++) {
4145 idx = i / RTE_RETA_GROUP_SIZE;
4146 shift = i % RTE_RETA_GROUP_SIZE;
4147 if (reta_conf[idx].mask & (1ULL << shift))
4148 reta_conf[idx].reta[shift] = lut[i];
4158 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4160 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4163 if (!key || key_len == 0) {
4164 PMD_DRV_LOG(DEBUG, "No key to be configured");
4166 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4168 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4172 struct ice_aqc_get_set_rss_keys *key_dw =
4173 (struct ice_aqc_get_set_rss_keys *)key;
4175 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4177 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4185 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4187 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4190 if (!key || !key_len)
4193 ret = ice_aq_get_rss_key
4195 (struct ice_aqc_get_set_rss_keys *)key);
4197 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4200 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4206 ice_rss_hash_update(struct rte_eth_dev *dev,
4207 struct rte_eth_rss_conf *rss_conf)
4209 enum ice_status status = ICE_SUCCESS;
4210 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4211 struct ice_vsi *vsi = pf->main_vsi;
4214 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4218 if (rss_conf->rss_hf == 0)
4221 /* RSS hash configuration */
4222 ice_rss_hash_set(pf, rss_conf->rss_hf);
4228 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4229 struct rte_eth_rss_conf *rss_conf)
4231 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4232 struct ice_vsi *vsi = pf->main_vsi;
4234 ice_get_rss_key(vsi, rss_conf->rss_key,
4235 &rss_conf->rss_key_len);
4237 /* TODO: default set to 0 as hf config is not supported now */
4238 rss_conf->rss_hf = 0;
4243 ice_promisc_enable(struct rte_eth_dev *dev)
4245 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4246 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4247 struct ice_vsi *vsi = pf->main_vsi;
4248 enum ice_status status;
4252 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4253 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4255 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4257 case ICE_ERR_ALREADY_EXISTS:
4258 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4262 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4270 ice_promisc_disable(struct rte_eth_dev *dev)
4272 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4273 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4274 struct ice_vsi *vsi = pf->main_vsi;
4275 enum ice_status status;
4279 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4280 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4282 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4283 if (status != ICE_SUCCESS) {
4284 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4292 ice_allmulti_enable(struct rte_eth_dev *dev)
4294 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4295 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4296 struct ice_vsi *vsi = pf->main_vsi;
4297 enum ice_status status;
4301 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4303 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4306 case ICE_ERR_ALREADY_EXISTS:
4307 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4311 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4319 ice_allmulti_disable(struct rte_eth_dev *dev)
4321 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4322 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4323 struct ice_vsi *vsi = pf->main_vsi;
4324 enum ice_status status;
4328 if (dev->data->promiscuous == 1)
4329 return 0; /* must remain in all_multicast mode */
4331 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4333 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4334 if (status != ICE_SUCCESS) {
4335 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4342 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4345 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4346 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4347 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4351 msix_intr = intr_handle->intr_vec[queue_id];
4353 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4354 GLINT_DYN_CTL_ITR_INDX_M;
4355 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4357 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4358 rte_intr_ack(&pci_dev->intr_handle);
4363 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4366 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4367 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4368 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4371 msix_intr = intr_handle->intr_vec[queue_id];
4373 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4379 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4381 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4386 ver = hw->nvm.orom.major;
4387 patch = hw->nvm.orom.patch;
4388 build = hw->nvm.orom.build;
4390 ret = snprintf(fw_version, fw_size,
4391 "%d.%d 0x%08x %d.%d.%d",
4397 /* add the size of '\0' */
4399 if (fw_size < (u32)ret)
4406 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4409 struct ice_vsi_ctx ctxt;
4410 uint8_t vlan_flags = 0;
4413 if (!vsi || !info) {
4414 PMD_DRV_LOG(ERR, "invalid parameters");
4419 vsi->info.pvid = info->config.pvid;
4421 * If insert pvid is enabled, only tagged pkts are
4422 * allowed to be sent out.
4424 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
4425 ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
4428 if (info->config.reject.tagged == 0)
4429 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
4431 if (info->config.reject.untagged == 0)
4432 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
4434 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
4435 ICE_AQ_VSI_VLAN_MODE_M);
4436 vsi->info.vlan_flags |= vlan_flags;
4437 memset(&ctxt, 0, sizeof(ctxt));
4438 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4439 ctxt.info.valid_sections =
4440 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4441 ctxt.vsi_num = vsi->vsi_id;
4443 hw = ICE_VSI_TO_HW(vsi);
4444 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4445 if (ret != ICE_SUCCESS) {
4447 "update VSI for VLAN insert failed, err %d",
4452 vsi->info.valid_sections |=
4453 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4459 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4461 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4462 struct ice_vsi *vsi = pf->main_vsi;
4463 struct rte_eth_dev_data *data = pf->dev_data;
4464 struct ice_vsi_vlan_pvid_info info;
4467 memset(&info, 0, sizeof(info));
4470 info.config.pvid = pvid;
4472 info.config.reject.tagged =
4473 data->dev_conf.txmode.hw_vlan_reject_tagged;
4474 info.config.reject.untagged =
4475 data->dev_conf.txmode.hw_vlan_reject_untagged;
4478 ret = ice_vsi_vlan_pvid_set(vsi, &info);
4480 PMD_DRV_LOG(ERR, "Failed to set pvid.");
4488 ice_get_eeprom_length(struct rte_eth_dev *dev)
4490 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4492 return hw->nvm.flash_size;
4496 ice_get_eeprom(struct rte_eth_dev *dev,
4497 struct rte_dev_eeprom_info *eeprom)
4499 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4500 enum ice_status status = ICE_SUCCESS;
4501 uint8_t *data = eeprom->data;
4503 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4505 status = ice_acquire_nvm(hw, ICE_RES_READ);
4507 PMD_DRV_LOG(ERR, "acquire nvm failed.");
4511 status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4514 ice_release_nvm(hw);
4517 PMD_DRV_LOG(ERR, "EEPROM read failed.");
4525 ice_stat_update_32(struct ice_hw *hw,
4533 new_data = (uint64_t)ICE_READ_REG(hw, reg);
4537 if (new_data >= *offset)
4538 *stat = (uint64_t)(new_data - *offset);
4540 *stat = (uint64_t)((new_data +
4541 ((uint64_t)1 << ICE_32_BIT_WIDTH))
4546 ice_stat_update_40(struct ice_hw *hw,
4555 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4556 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4562 if (new_data >= *offset)
4563 *stat = new_data - *offset;
4565 *stat = (uint64_t)((new_data +
4566 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4569 *stat &= ICE_40_BIT_MASK;
4572 /* Get all the statistics of a VSI */
4574 ice_update_vsi_stats(struct ice_vsi *vsi)
4576 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4577 struct ice_eth_stats *nes = &vsi->eth_stats;
4578 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4579 int idx = rte_le_to_cpu_16(vsi->vsi_id);
4581 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4582 vsi->offset_loaded, &oes->rx_bytes,
4584 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4585 vsi->offset_loaded, &oes->rx_unicast,
4587 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4588 vsi->offset_loaded, &oes->rx_multicast,
4589 &nes->rx_multicast);
4590 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4591 vsi->offset_loaded, &oes->rx_broadcast,
4592 &nes->rx_broadcast);
4593 /* enlarge the limitation when rx_bytes overflowed */
4594 if (vsi->offset_loaded) {
4595 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
4596 nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4597 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
4599 vsi->old_rx_bytes = nes->rx_bytes;
4600 /* exclude CRC bytes */
4601 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4602 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4604 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4605 &oes->rx_discards, &nes->rx_discards);
4606 /* GLV_REPC not supported */
4607 /* GLV_RMPC not supported */
4608 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4609 &oes->rx_unknown_protocol,
4610 &nes->rx_unknown_protocol);
4611 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4612 vsi->offset_loaded, &oes->tx_bytes,
4614 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4615 vsi->offset_loaded, &oes->tx_unicast,
4617 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4618 vsi->offset_loaded, &oes->tx_multicast,
4619 &nes->tx_multicast);
4620 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4621 vsi->offset_loaded, &oes->tx_broadcast,
4622 &nes->tx_broadcast);
4623 /* GLV_TDPC not supported */
4624 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4625 &oes->tx_errors, &nes->tx_errors);
4626 /* enlarge the limitation when tx_bytes overflowed */
4627 if (vsi->offset_loaded) {
4628 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
4629 nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4630 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
4632 vsi->old_tx_bytes = nes->tx_bytes;
4633 vsi->offset_loaded = true;
4635 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4637 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
4638 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
4639 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
4640 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
4641 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
4642 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4643 nes->rx_unknown_protocol);
4644 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
4645 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
4646 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
4647 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
4648 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
4649 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
4650 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4655 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4657 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4658 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4660 /* Get statistics of struct ice_eth_stats */
4661 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4662 GLPRT_GORCL(hw->port_info->lport),
4663 pf->offset_loaded, &os->eth.rx_bytes,
4665 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4666 GLPRT_UPRCL(hw->port_info->lport),
4667 pf->offset_loaded, &os->eth.rx_unicast,
4668 &ns->eth.rx_unicast);
4669 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4670 GLPRT_MPRCL(hw->port_info->lport),
4671 pf->offset_loaded, &os->eth.rx_multicast,
4672 &ns->eth.rx_multicast);
4673 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4674 GLPRT_BPRCL(hw->port_info->lport),
4675 pf->offset_loaded, &os->eth.rx_broadcast,
4676 &ns->eth.rx_broadcast);
4677 ice_stat_update_32(hw, PRTRPB_RDPC,
4678 pf->offset_loaded, &os->eth.rx_discards,
4679 &ns->eth.rx_discards);
4680 /* enlarge the limitation when rx_bytes overflowed */
4681 if (pf->offset_loaded) {
4682 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
4683 ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4684 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
4686 pf->old_rx_bytes = ns->eth.rx_bytes;
4688 /* Workaround: CRC size should not be included in byte statistics,
4689 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4692 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4693 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4695 /* GLPRT_REPC not supported */
4696 /* GLPRT_RMPC not supported */
4697 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4699 &os->eth.rx_unknown_protocol,
4700 &ns->eth.rx_unknown_protocol);
4701 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4702 GLPRT_GOTCL(hw->port_info->lport),
4703 pf->offset_loaded, &os->eth.tx_bytes,
4705 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4706 GLPRT_UPTCL(hw->port_info->lport),
4707 pf->offset_loaded, &os->eth.tx_unicast,
4708 &ns->eth.tx_unicast);
4709 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4710 GLPRT_MPTCL(hw->port_info->lport),
4711 pf->offset_loaded, &os->eth.tx_multicast,
4712 &ns->eth.tx_multicast);
4713 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4714 GLPRT_BPTCL(hw->port_info->lport),
4715 pf->offset_loaded, &os->eth.tx_broadcast,
4716 &ns->eth.tx_broadcast);
4717 /* enlarge the limitation when tx_bytes overflowed */
4718 if (pf->offset_loaded) {
4719 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
4720 ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4721 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
4723 pf->old_tx_bytes = ns->eth.tx_bytes;
4724 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4725 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4727 /* GLPRT_TEPC not supported */
4729 /* additional port specific stats */
4730 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4731 pf->offset_loaded, &os->tx_dropped_link_down,
4732 &ns->tx_dropped_link_down);
4733 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4734 pf->offset_loaded, &os->crc_errors,
4736 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4737 pf->offset_loaded, &os->illegal_bytes,
4738 &ns->illegal_bytes);
4739 /* GLPRT_ERRBC not supported */
4740 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4741 pf->offset_loaded, &os->mac_local_faults,
4742 &ns->mac_local_faults);
4743 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4744 pf->offset_loaded, &os->mac_remote_faults,
4745 &ns->mac_remote_faults);
4747 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4748 pf->offset_loaded, &os->rx_len_errors,
4749 &ns->rx_len_errors);
4751 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4752 pf->offset_loaded, &os->link_xon_rx,
4754 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
4755 pf->offset_loaded, &os->link_xoff_rx,
4757 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
4758 pf->offset_loaded, &os->link_xon_tx,
4760 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
4761 pf->offset_loaded, &os->link_xoff_tx,
4763 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
4764 GLPRT_PRC64L(hw->port_info->lport),
4765 pf->offset_loaded, &os->rx_size_64,
4767 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
4768 GLPRT_PRC127L(hw->port_info->lport),
4769 pf->offset_loaded, &os->rx_size_127,
4771 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
4772 GLPRT_PRC255L(hw->port_info->lport),
4773 pf->offset_loaded, &os->rx_size_255,
4775 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
4776 GLPRT_PRC511L(hw->port_info->lport),
4777 pf->offset_loaded, &os->rx_size_511,
4779 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
4780 GLPRT_PRC1023L(hw->port_info->lport),
4781 pf->offset_loaded, &os->rx_size_1023,
4783 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
4784 GLPRT_PRC1522L(hw->port_info->lport),
4785 pf->offset_loaded, &os->rx_size_1522,
4787 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
4788 GLPRT_PRC9522L(hw->port_info->lport),
4789 pf->offset_loaded, &os->rx_size_big,
4791 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
4792 pf->offset_loaded, &os->rx_undersize,
4794 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
4795 pf->offset_loaded, &os->rx_fragments,
4797 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
4798 pf->offset_loaded, &os->rx_oversize,
4800 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
4801 pf->offset_loaded, &os->rx_jabber,
4803 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
4804 GLPRT_PTC64L(hw->port_info->lport),
4805 pf->offset_loaded, &os->tx_size_64,
4807 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
4808 GLPRT_PTC127L(hw->port_info->lport),
4809 pf->offset_loaded, &os->tx_size_127,
4811 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
4812 GLPRT_PTC255L(hw->port_info->lport),
4813 pf->offset_loaded, &os->tx_size_255,
4815 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
4816 GLPRT_PTC511L(hw->port_info->lport),
4817 pf->offset_loaded, &os->tx_size_511,
4819 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
4820 GLPRT_PTC1023L(hw->port_info->lport),
4821 pf->offset_loaded, &os->tx_size_1023,
4823 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
4824 GLPRT_PTC1522L(hw->port_info->lport),
4825 pf->offset_loaded, &os->tx_size_1522,
4827 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
4828 GLPRT_PTC9522L(hw->port_info->lport),
4829 pf->offset_loaded, &os->tx_size_big,
4832 /* GLPRT_MSPDC not supported */
4833 /* GLPRT_XEC not supported */
4835 pf->offset_loaded = true;
4838 ice_update_vsi_stats(pf->main_vsi);
4841 /* Get all statistics of a port */
4843 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
4845 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4846 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4847 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4849 /* call read registers - updates values, now write them to struct */
4850 ice_read_stats_registers(pf, hw);
4852 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
4853 pf->main_vsi->eth_stats.rx_multicast +
4854 pf->main_vsi->eth_stats.rx_broadcast -
4855 pf->main_vsi->eth_stats.rx_discards;
4856 stats->opackets = ns->eth.tx_unicast +
4857 ns->eth.tx_multicast +
4858 ns->eth.tx_broadcast;
4859 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
4860 stats->obytes = ns->eth.tx_bytes;
4861 stats->oerrors = ns->eth.tx_errors +
4862 pf->main_vsi->eth_stats.tx_errors;
4865 stats->imissed = ns->eth.rx_discards +
4866 pf->main_vsi->eth_stats.rx_discards;
4867 stats->ierrors = ns->crc_errors +
4869 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
4871 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
4872 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
4873 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
4874 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
4875 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
4876 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
4877 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
4878 pf->main_vsi->eth_stats.rx_discards);
4879 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4880 ns->eth.rx_unknown_protocol);
4881 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
4882 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
4883 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
4884 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
4885 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
4886 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
4887 pf->main_vsi->eth_stats.tx_discards);
4888 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
4890 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
4891 ns->tx_dropped_link_down);
4892 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
4893 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
4895 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
4896 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
4897 ns->mac_local_faults);
4898 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
4899 ns->mac_remote_faults);
4900 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
4901 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
4902 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
4903 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
4904 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
4905 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
4906 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
4907 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
4908 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
4909 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
4910 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
4911 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
4912 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
4913 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
4914 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
4915 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
4916 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
4917 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
4918 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
4919 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
4920 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
4921 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
4922 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
4923 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
4927 /* Reset the statistics */
4929 ice_stats_reset(struct rte_eth_dev *dev)
4931 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4932 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4934 /* Mark PF and VSI stats to update the offset, aka "reset" */
4935 pf->offset_loaded = false;
4937 pf->main_vsi->offset_loaded = false;
4939 /* read the stats, reading current register values into offset */
4940 ice_read_stats_registers(pf, hw);
4946 ice_xstats_calc_num(void)
4950 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
4956 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
4959 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4960 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4963 struct ice_hw_port_stats *hw_stats = &pf->stats;
4965 count = ice_xstats_calc_num();
4969 ice_read_stats_registers(pf, hw);
4976 /* Get stats from ice_eth_stats struct */
4977 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
4978 xstats[count].value =
4979 *(uint64_t *)((char *)&hw_stats->eth +
4980 ice_stats_strings[i].offset);
4981 xstats[count].id = count;
4985 /* Get individiual stats from ice_hw_port struct */
4986 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
4987 xstats[count].value =
4988 *(uint64_t *)((char *)hw_stats +
4989 ice_hw_port_strings[i].offset);
4990 xstats[count].id = count;
4997 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
4998 struct rte_eth_xstat_name *xstats_names,
4999 __rte_unused unsigned int limit)
5001 unsigned int count = 0;
5005 return ice_xstats_calc_num();
5007 /* Note: limit checked in rte_eth_xstats_names() */
5009 /* Get stats from ice_eth_stats struct */
5010 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5011 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5012 sizeof(xstats_names[count].name));
5016 /* Get individiual stats from ice_hw_port struct */
5017 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5018 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5019 sizeof(xstats_names[count].name));
5027 ice_dev_filter_ctrl(struct rte_eth_dev *dev,
5028 enum rte_filter_type filter_type,
5029 enum rte_filter_op filter_op,
5037 switch (filter_type) {
5038 case RTE_ETH_FILTER_GENERIC:
5039 if (filter_op != RTE_ETH_FILTER_GET)
5041 *(const void **)arg = &ice_flow_ops;
5044 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5053 /* Add UDP tunneling port */
5055 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5056 struct rte_eth_udp_tunnel *udp_tunnel)
5059 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5061 if (udp_tunnel == NULL)
5064 switch (udp_tunnel->prot_type) {
5065 case RTE_TUNNEL_TYPE_VXLAN:
5066 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5069 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5077 /* Delete UDP tunneling port */
5079 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5080 struct rte_eth_udp_tunnel *udp_tunnel)
5083 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5085 if (udp_tunnel == NULL)
5088 switch (udp_tunnel->prot_type) {
5089 case RTE_TUNNEL_TYPE_VXLAN:
5090 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5093 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5102 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5103 struct rte_pci_device *pci_dev)
5105 return rte_eth_dev_pci_generic_probe(pci_dev,
5106 sizeof(struct ice_adapter),
5111 ice_pci_remove(struct rte_pci_device *pci_dev)
5113 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5116 static struct rte_pci_driver rte_ice_pmd = {
5117 .id_table = pci_id_ice_map,
5118 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5119 .probe = ice_pci_probe,
5120 .remove = ice_pci_remove,
5124 * Driver initialization routine.
5125 * Invoked once at EAL init time.
5126 * Register itself as the [Poll Mode] Driver of PCI devices.
5128 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5129 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5130 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5131 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5132 ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp>"
5133 ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5134 ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
5135 ICE_FLOW_MARK_SUPPORT_ARG "=<0|1>");
5137 RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE);
5138 RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE);
5139 #ifdef RTE_LIBRTE_ICE_DEBUG_RX
5140 RTE_LOG_REGISTER(ice_logtype_rx, pmd.net.ice.rx, DEBUG);
5142 #ifdef RTE_LIBRTE_ICE_DEBUG_TX
5143 RTE_LOG_REGISTER(ice_logtype_tx, pmd.net.ice.tx, DEBUG);
5145 #ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE
5146 RTE_LOG_REGISTER(ice_logtype_tx_free, pmd.net.ice.tx_free, DEBUG);