1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "base/ice_common.h"
17 #include "ice_ethdev.h"
19 #include "ice_generic_flow.h"
22 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
23 #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support"
24 #define ICE_PROTO_XTR_ARG "proto_xtr"
26 static const char * const ice_valid_args[] = {
27 ICE_SAFE_MODE_SUPPORT_ARG,
28 ICE_PIPELINE_MODE_SUPPORT_ARG,
33 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
35 /* DDP package search path */
36 #define ICE_PKG_FILE_DEFAULT "/lib/firmware/intel/ice/ddp/ice.pkg"
37 #define ICE_PKG_FILE_UPDATES "/lib/firmware/updates/intel/ice/ddp/ice.pkg"
38 #define ICE_PKG_FILE_SEARCH_PATH_DEFAULT "/lib/firmware/intel/ice/ddp/"
39 #define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
41 #define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package"
42 #define ICE_COMMS_PKG_NAME "ICE COMMS Package"
43 #define ICE_MAX_PKG_FILENAME_SIZE 256
44 #define ICE_MAX_RES_DESC_NUM 1024
47 int ice_logtype_driver;
48 #ifdef RTE_LIBRTE_ICE_DEBUG_RX
51 #ifdef RTE_LIBRTE_ICE_DEBUG_TX
54 #ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE
55 int ice_logtype_tx_free;
58 static int ice_dev_configure(struct rte_eth_dev *dev);
59 static int ice_dev_start(struct rte_eth_dev *dev);
60 static void ice_dev_stop(struct rte_eth_dev *dev);
61 static void ice_dev_close(struct rte_eth_dev *dev);
62 static int ice_dev_reset(struct rte_eth_dev *dev);
63 static int ice_dev_info_get(struct rte_eth_dev *dev,
64 struct rte_eth_dev_info *dev_info);
65 static int ice_link_update(struct rte_eth_dev *dev,
66 int wait_to_complete);
67 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
68 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
70 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
71 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
72 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
73 enum rte_vlan_type vlan_type,
75 static int ice_rss_reta_update(struct rte_eth_dev *dev,
76 struct rte_eth_rss_reta_entry64 *reta_conf,
78 static int ice_rss_reta_query(struct rte_eth_dev *dev,
79 struct rte_eth_rss_reta_entry64 *reta_conf,
81 static int ice_rss_hash_update(struct rte_eth_dev *dev,
82 struct rte_eth_rss_conf *rss_conf);
83 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
84 struct rte_eth_rss_conf *rss_conf);
85 static int ice_promisc_enable(struct rte_eth_dev *dev);
86 static int ice_promisc_disable(struct rte_eth_dev *dev);
87 static int ice_allmulti_enable(struct rte_eth_dev *dev);
88 static int ice_allmulti_disable(struct rte_eth_dev *dev);
89 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
92 static int ice_macaddr_set(struct rte_eth_dev *dev,
93 struct rte_ether_addr *mac_addr);
94 static int ice_macaddr_add(struct rte_eth_dev *dev,
95 struct rte_ether_addr *mac_addr,
96 __rte_unused uint32_t index,
98 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
99 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
101 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
103 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
105 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
106 uint16_t pvid, int on);
107 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
108 static int ice_get_eeprom(struct rte_eth_dev *dev,
109 struct rte_dev_eeprom_info *eeprom);
110 static int ice_stats_get(struct rte_eth_dev *dev,
111 struct rte_eth_stats *stats);
112 static int ice_stats_reset(struct rte_eth_dev *dev);
113 static int ice_xstats_get(struct rte_eth_dev *dev,
114 struct rte_eth_xstat *xstats, unsigned int n);
115 static int ice_xstats_get_names(struct rte_eth_dev *dev,
116 struct rte_eth_xstat_name *xstats_names,
118 static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
119 enum rte_filter_type filter_type,
120 enum rte_filter_op filter_op,
122 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
123 struct rte_eth_udp_tunnel *udp_tunnel);
124 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
125 struct rte_eth_udp_tunnel *udp_tunnel);
127 static const struct rte_pci_id pci_id_ice_map[] = {
128 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
129 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
130 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
131 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
132 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
133 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
134 { .vendor_id = 0, /* sentinel */ },
137 static const struct eth_dev_ops ice_eth_dev_ops = {
138 .dev_configure = ice_dev_configure,
139 .dev_start = ice_dev_start,
140 .dev_stop = ice_dev_stop,
141 .dev_close = ice_dev_close,
142 .dev_reset = ice_dev_reset,
143 .dev_set_link_up = ice_dev_set_link_up,
144 .dev_set_link_down = ice_dev_set_link_down,
145 .rx_queue_start = ice_rx_queue_start,
146 .rx_queue_stop = ice_rx_queue_stop,
147 .tx_queue_start = ice_tx_queue_start,
148 .tx_queue_stop = ice_tx_queue_stop,
149 .rx_queue_setup = ice_rx_queue_setup,
150 .rx_queue_release = ice_rx_queue_release,
151 .tx_queue_setup = ice_tx_queue_setup,
152 .tx_queue_release = ice_tx_queue_release,
153 .dev_infos_get = ice_dev_info_get,
154 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
155 .link_update = ice_link_update,
156 .mtu_set = ice_mtu_set,
157 .mac_addr_set = ice_macaddr_set,
158 .mac_addr_add = ice_macaddr_add,
159 .mac_addr_remove = ice_macaddr_remove,
160 .vlan_filter_set = ice_vlan_filter_set,
161 .vlan_offload_set = ice_vlan_offload_set,
162 .vlan_tpid_set = ice_vlan_tpid_set,
163 .reta_update = ice_rss_reta_update,
164 .reta_query = ice_rss_reta_query,
165 .rss_hash_update = ice_rss_hash_update,
166 .rss_hash_conf_get = ice_rss_hash_conf_get,
167 .promiscuous_enable = ice_promisc_enable,
168 .promiscuous_disable = ice_promisc_disable,
169 .allmulticast_enable = ice_allmulti_enable,
170 .allmulticast_disable = ice_allmulti_disable,
171 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
172 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
173 .fw_version_get = ice_fw_version_get,
174 .vlan_pvid_set = ice_vlan_pvid_set,
175 .rxq_info_get = ice_rxq_info_get,
176 .txq_info_get = ice_txq_info_get,
177 .rx_burst_mode_get = ice_rx_burst_mode_get,
178 .tx_burst_mode_get = ice_tx_burst_mode_get,
179 .get_eeprom_length = ice_get_eeprom_length,
180 .get_eeprom = ice_get_eeprom,
181 .rx_queue_count = ice_rx_queue_count,
182 .rx_descriptor_status = ice_rx_descriptor_status,
183 .tx_descriptor_status = ice_tx_descriptor_status,
184 .stats_get = ice_stats_get,
185 .stats_reset = ice_stats_reset,
186 .xstats_get = ice_xstats_get,
187 .xstats_get_names = ice_xstats_get_names,
188 .xstats_reset = ice_stats_reset,
189 .filter_ctrl = ice_dev_filter_ctrl,
190 .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add,
191 .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
194 /* store statistics names and its offset in stats structure */
195 struct ice_xstats_name_off {
196 char name[RTE_ETH_XSTATS_NAME_SIZE];
200 static const struct ice_xstats_name_off ice_stats_strings[] = {
201 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
202 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
203 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
204 {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
205 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
206 rx_unknown_protocol)},
207 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
208 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
209 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
210 {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
213 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
214 sizeof(ice_stats_strings[0]))
216 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
217 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
218 tx_dropped_link_down)},
219 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
220 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
222 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
223 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
225 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
227 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
229 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
230 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
231 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
232 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
233 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
234 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
236 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
238 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
240 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
242 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
244 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
246 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
248 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
250 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
251 mac_short_pkt_dropped)},
252 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
254 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
255 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
256 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
258 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
260 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
262 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
264 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
266 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
270 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
271 sizeof(ice_hw_port_strings[0]))
274 ice_init_controlq_parameter(struct ice_hw *hw)
276 /* fields for adminq */
277 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
278 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
279 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
280 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
282 /* fields for mailboxq, DPDK used as PF host */
283 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
284 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
285 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
286 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
290 lookup_proto_xtr_type(const char *xtr_name)
294 enum proto_xtr_type type;
296 { "vlan", PROTO_XTR_VLAN },
297 { "ipv4", PROTO_XTR_IPV4 },
298 { "ipv6", PROTO_XTR_IPV6 },
299 { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
300 { "tcp", PROTO_XTR_TCP },
304 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
305 if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
306 return xtr_type_map[i].type;
313 * Parse elem, the elem could be single number/range or '(' ')' group
314 * 1) A single number elem, it's just a simple digit. e.g. 9
315 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
316 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
317 * Within group elem, '-' used for a range separator;
318 * ',' used for a single number.
321 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
323 const char *str = input;
328 while (isblank(*str))
331 if (!isdigit(*str) && *str != '(')
334 /* process single number or single range of number */
337 idx = strtoul(str, &end, 10);
338 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
341 while (isblank(*end))
347 /* process single <number>-<number> */
350 while (isblank(*end))
356 idx = strtoul(end, &end, 10);
357 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
361 while (isblank(*end))
368 for (idx = RTE_MIN(min, max);
369 idx <= RTE_MAX(min, max); idx++)
370 devargs->proto_xtr[idx] = xtr_type;
375 /* process set within bracket */
377 while (isblank(*str))
382 min = ICE_MAX_QUEUE_NUM;
384 /* go ahead to the first digit */
385 while (isblank(*str))
390 /* get the digit value */
392 idx = strtoul(str, &end, 10);
393 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
396 /* go ahead to separator '-',',' and ')' */
397 while (isblank(*end))
400 if (min == ICE_MAX_QUEUE_NUM)
402 else /* avoid continuous '-' */
404 } else if (*end == ',' || *end == ')') {
406 if (min == ICE_MAX_QUEUE_NUM)
409 for (idx = RTE_MIN(min, max);
410 idx <= RTE_MAX(min, max); idx++)
411 devargs->proto_xtr[idx] = xtr_type;
413 min = ICE_MAX_QUEUE_NUM;
419 } while (*end != ')' && *end != '\0');
425 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
427 const char *queue_start;
432 while (isblank(*queues))
435 if (*queues != '[') {
436 xtr_type = lookup_proto_xtr_type(queues);
440 devargs->proto_xtr_dflt = xtr_type;
447 while (isblank(*queues))
452 queue_start = queues;
454 /* go across a complete bracket */
455 if (*queue_start == '(') {
456 queues += strcspn(queues, ")");
461 /* scan the separator ':' */
462 queues += strcspn(queues, ":");
463 if (*queues++ != ':')
465 while (isblank(*queues))
468 for (idx = 0; ; idx++) {
469 if (isblank(queues[idx]) ||
470 queues[idx] == ',' ||
471 queues[idx] == ']' ||
475 if (idx > sizeof(xtr_name) - 2)
478 xtr_name[idx] = queues[idx];
480 xtr_name[idx] = '\0';
481 xtr_type = lookup_proto_xtr_type(xtr_name);
487 while (isblank(*queues) || *queues == ',' || *queues == ']')
490 if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
492 } while (*queues != '\0');
498 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
501 struct ice_devargs *devargs = extra_args;
503 if (value == NULL || extra_args == NULL)
506 if (parse_queue_proto_xtr(value, devargs) < 0) {
508 "The protocol extraction parameter is wrong : '%s'",
517 ice_proto_xtr_support(struct ice_hw *hw)
519 #define FLX_REG(val, fld, idx) \
520 (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
521 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
527 { ICE_RXDID_COMMS_AUX_VLAN, ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O },
528 { ICE_RXDID_COMMS_AUX_IPV4, ICE_PROT_IPV4_OF_OR_S,
529 ICE_PROT_IPV4_OF_OR_S },
530 { ICE_RXDID_COMMS_AUX_IPV6, ICE_PROT_IPV6_OF_OR_S,
531 ICE_PROT_IPV6_OF_OR_S },
532 { ICE_RXDID_COMMS_AUX_IPV6_FLOW, ICE_PROT_IPV6_OF_OR_S,
533 ICE_PROT_IPV6_OF_OR_S },
534 { ICE_RXDID_COMMS_AUX_TCP, ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
538 for (i = 0; i < RTE_DIM(xtr_sets); i++) {
539 uint32_t rxdid = xtr_sets[i].rxdid;
542 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
543 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
545 if (FLX_REG(v, PROT_MDID, 4) != xtr_sets[i].protid_0 ||
546 FLX_REG(v, RXDID_OPCODE, 4) != ICE_RX_OPC_EXTRACT)
550 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
551 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
553 if (FLX_REG(v, PROT_MDID, 5) != xtr_sets[i].protid_1 ||
554 FLX_REG(v, RXDID_OPCODE, 5) != ICE_RX_OPC_EXTRACT)
563 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
566 struct pool_entry *entry;
571 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
574 "Failed to allocate memory for resource pool");
578 /* queue heap initialize */
579 pool->num_free = num;
582 LIST_INIT(&pool->alloc_list);
583 LIST_INIT(&pool->free_list);
585 /* Initialize element */
589 LIST_INSERT_HEAD(&pool->free_list, entry, next);
594 ice_res_pool_alloc(struct ice_res_pool_info *pool,
597 struct pool_entry *entry, *valid_entry;
600 PMD_INIT_LOG(ERR, "Invalid parameter");
604 if (pool->num_free < num) {
605 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
606 num, pool->num_free);
611 /* Lookup in free list and find most fit one */
612 LIST_FOREACH(entry, &pool->free_list, next) {
613 if (entry->len >= num) {
615 if (entry->len == num) {
620 valid_entry->len > entry->len)
625 /* Not find one to satisfy the request, return */
627 PMD_INIT_LOG(ERR, "No valid entry found");
631 * The entry have equal queue number as requested,
632 * remove it from alloc_list.
634 if (valid_entry->len == num) {
635 LIST_REMOVE(valid_entry, next);
638 * The entry have more numbers than requested,
639 * create a new entry for alloc_list and minus its
640 * queue base and number in free_list.
642 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
645 "Failed to allocate memory for "
649 entry->base = valid_entry->base;
651 valid_entry->base += num;
652 valid_entry->len -= num;
656 /* Insert it into alloc list, not sorted */
657 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
659 pool->num_free -= valid_entry->len;
660 pool->num_alloc += valid_entry->len;
662 return valid_entry->base + pool->base;
666 ice_res_pool_destroy(struct ice_res_pool_info *pool)
668 struct pool_entry *entry, *next_entry;
673 for (entry = LIST_FIRST(&pool->alloc_list);
674 entry && (next_entry = LIST_NEXT(entry, next), 1);
675 entry = next_entry) {
676 LIST_REMOVE(entry, next);
680 for (entry = LIST_FIRST(&pool->free_list);
681 entry && (next_entry = LIST_NEXT(entry, next), 1);
682 entry = next_entry) {
683 LIST_REMOVE(entry, next);
690 LIST_INIT(&pool->alloc_list);
691 LIST_INIT(&pool->free_list);
695 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
697 /* Set VSI LUT selection */
698 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
699 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
700 /* Set Hash scheme */
701 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
702 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
704 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
707 static enum ice_status
708 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
709 struct ice_aqc_vsi_props *info,
710 uint8_t enabled_tcmap)
712 uint16_t bsf, qp_idx;
714 /* default tc 0 now. Multi-TC supporting need to be done later.
715 * Configure TC and queue mapping parameters, for enabled TC,
716 * allocate qpnum_per_tc queues to this traffic.
718 if (enabled_tcmap != 0x01) {
719 PMD_INIT_LOG(ERR, "only TC0 is supported");
723 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
724 bsf = rte_bsf32(vsi->nb_qps);
725 /* Adjust the queue number to actual queues that can be applied */
726 vsi->nb_qps = 0x1 << bsf;
729 /* Set tc and queue mapping with VSI */
730 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
731 ICE_AQ_VSI_TC_Q_OFFSET_S) |
732 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
734 /* Associate queue number with VSI */
735 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
736 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
737 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
738 info->valid_sections |=
739 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
740 /* Set the info.ingress_table and info.egress_table
741 * for UP translate table. Now just set it to 1:1 map by default
742 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
744 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
745 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
746 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
747 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
752 ice_init_mac_address(struct rte_eth_dev *dev)
754 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
756 if (!rte_is_unicast_ether_addr
757 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
758 PMD_INIT_LOG(ERR, "Invalid MAC address");
763 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
764 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
766 dev->data->mac_addrs =
767 rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0);
768 if (!dev->data->mac_addrs) {
770 "Failed to allocate memory to store mac address");
773 /* store it to dev data */
775 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
776 &dev->data->mac_addrs[0]);
780 /* Find out specific MAC filter */
781 static struct ice_mac_filter *
782 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
784 struct ice_mac_filter *f;
786 TAILQ_FOREACH(f, &vsi->mac_list, next) {
787 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
795 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
797 struct ice_fltr_list_entry *m_list_itr = NULL;
798 struct ice_mac_filter *f;
799 struct LIST_HEAD_TYPE list_head;
800 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
803 /* If it's added and configured, return */
804 f = ice_find_mac_filter(vsi, mac_addr);
806 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
810 INIT_LIST_HEAD(&list_head);
812 m_list_itr = (struct ice_fltr_list_entry *)
813 ice_malloc(hw, sizeof(*m_list_itr));
818 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
819 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
820 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
821 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
822 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
823 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
824 m_list_itr->fltr_info.vsi_handle = vsi->idx;
826 LIST_ADD(&m_list_itr->list_entry, &list_head);
829 ret = ice_add_mac(hw, &list_head);
830 if (ret != ICE_SUCCESS) {
831 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
835 /* Add the mac addr into mac list */
836 f = rte_zmalloc(NULL, sizeof(*f), 0);
838 PMD_DRV_LOG(ERR, "failed to allocate memory");
842 rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
843 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
849 rte_free(m_list_itr);
854 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
856 struct ice_fltr_list_entry *m_list_itr = NULL;
857 struct ice_mac_filter *f;
858 struct LIST_HEAD_TYPE list_head;
859 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
862 /* Can't find it, return an error */
863 f = ice_find_mac_filter(vsi, mac_addr);
867 INIT_LIST_HEAD(&list_head);
869 m_list_itr = (struct ice_fltr_list_entry *)
870 ice_malloc(hw, sizeof(*m_list_itr));
875 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
876 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
877 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
878 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
879 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
880 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
881 m_list_itr->fltr_info.vsi_handle = vsi->idx;
883 LIST_ADD(&m_list_itr->list_entry, &list_head);
885 /* remove the mac filter */
886 ret = ice_remove_mac(hw, &list_head);
887 if (ret != ICE_SUCCESS) {
888 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
893 /* Remove the mac addr from mac list */
894 TAILQ_REMOVE(&vsi->mac_list, f, next);
900 rte_free(m_list_itr);
904 /* Find out specific VLAN filter */
905 static struct ice_vlan_filter *
906 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
908 struct ice_vlan_filter *f;
910 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
911 if (vlan_id == f->vlan_info.vlan_id)
919 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
921 struct ice_fltr_list_entry *v_list_itr = NULL;
922 struct ice_vlan_filter *f;
923 struct LIST_HEAD_TYPE list_head;
927 if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)
930 hw = ICE_VSI_TO_HW(vsi);
932 /* If it's added and configured, return. */
933 f = ice_find_vlan_filter(vsi, vlan_id);
935 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
939 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
942 INIT_LIST_HEAD(&list_head);
944 v_list_itr = (struct ice_fltr_list_entry *)
945 ice_malloc(hw, sizeof(*v_list_itr));
950 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
951 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
952 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
953 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
954 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
955 v_list_itr->fltr_info.vsi_handle = vsi->idx;
957 LIST_ADD(&v_list_itr->list_entry, &list_head);
960 ret = ice_add_vlan(hw, &list_head);
961 if (ret != ICE_SUCCESS) {
962 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
967 /* Add vlan into vlan list */
968 f = rte_zmalloc(NULL, sizeof(*f), 0);
970 PMD_DRV_LOG(ERR, "failed to allocate memory");
974 f->vlan_info.vlan_id = vlan_id;
975 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
981 rte_free(v_list_itr);
986 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
988 struct ice_fltr_list_entry *v_list_itr = NULL;
989 struct ice_vlan_filter *f;
990 struct LIST_HEAD_TYPE list_head;
995 * Vlan 0 is the generic filter for untagged packets
996 * and can't be removed.
998 if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
1001 hw = ICE_VSI_TO_HW(vsi);
1003 /* Can't find it, return an error */
1004 f = ice_find_vlan_filter(vsi, vlan_id);
1008 INIT_LIST_HEAD(&list_head);
1010 v_list_itr = (struct ice_fltr_list_entry *)
1011 ice_malloc(hw, sizeof(*v_list_itr));
1017 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
1018 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1019 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1020 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1021 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1022 v_list_itr->fltr_info.vsi_handle = vsi->idx;
1024 LIST_ADD(&v_list_itr->list_entry, &list_head);
1026 /* remove the vlan filter */
1027 ret = ice_remove_vlan(hw, &list_head);
1028 if (ret != ICE_SUCCESS) {
1029 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1034 /* Remove the vlan id from vlan list */
1035 TAILQ_REMOVE(&vsi->vlan_list, f, next);
1041 rte_free(v_list_itr);
1046 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1048 struct ice_mac_filter *m_f;
1049 struct ice_vlan_filter *v_f;
1052 if (!vsi || !vsi->mac_num)
1055 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
1056 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1057 if (ret != ICE_SUCCESS) {
1063 if (vsi->vlan_num == 0)
1066 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
1067 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
1068 if (ret != ICE_SUCCESS) {
1079 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
1081 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1082 struct ice_vsi_ctx ctxt;
1086 /* Check if it has been already on or off */
1087 if (vsi->info.valid_sections &
1088 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1090 if ((vsi->info.outer_tag_flags &
1091 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
1092 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
1093 return 0; /* already on */
1095 if (!(vsi->info.outer_tag_flags &
1096 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
1097 return 0; /* already off */
1102 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
1105 /* clear global insertion and use per packet insertion */
1106 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
1107 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
1108 vsi->info.outer_tag_flags |= qinq_flags;
1109 /* use default vlan type 0x8100 */
1110 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1111 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1112 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1113 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1114 ctxt.info.valid_sections =
1115 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1116 ctxt.vsi_num = vsi->vsi_id;
1117 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1120 "Update VSI failed to %s qinq stripping",
1121 on ? "enable" : "disable");
1125 vsi->info.valid_sections |=
1126 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1132 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
1134 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1135 struct ice_vsi_ctx ctxt;
1139 /* Check if it has been already on or off */
1140 if (vsi->info.valid_sections &
1141 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1143 if ((vsi->info.outer_tag_flags &
1144 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1145 ICE_AQ_VSI_OUTER_TAG_COPY)
1146 return 0; /* already on */
1148 if ((vsi->info.outer_tag_flags &
1149 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1150 ICE_AQ_VSI_OUTER_TAG_NOTHING)
1151 return 0; /* already off */
1156 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
1158 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
1159 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
1160 vsi->info.outer_tag_flags |= qinq_flags;
1161 /* use default vlan type 0x8100 */
1162 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1163 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1164 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1165 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1166 ctxt.info.valid_sections =
1167 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1168 ctxt.vsi_num = vsi->vsi_id;
1169 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1172 "Update VSI failed to %s qinq stripping",
1173 on ? "enable" : "disable");
1177 vsi->info.valid_sections |=
1178 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1184 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
1188 ret = ice_vsi_config_qinq_stripping(vsi, on);
1190 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
1192 ret = ice_vsi_config_qinq_insertion(vsi, on);
1194 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
1201 ice_pf_enable_irq0(struct ice_hw *hw)
1203 /* reset the registers */
1204 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1205 ICE_READ_REG(hw, PFINT_OICR);
1208 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1209 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1210 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1212 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1213 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1214 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1215 PFINT_OICR_CTL_ITR_INDX_M) |
1216 PFINT_OICR_CTL_CAUSE_ENA_M);
1218 ICE_WRITE_REG(hw, PFINT_FW_CTL,
1219 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1220 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1221 PFINT_FW_CTL_ITR_INDX_M) |
1222 PFINT_FW_CTL_CAUSE_ENA_M);
1224 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1227 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1228 GLINT_DYN_CTL_INTENA_M |
1229 GLINT_DYN_CTL_CLEARPBA_M |
1230 GLINT_DYN_CTL_ITR_INDX_M);
1237 ice_pf_disable_irq0(struct ice_hw *hw)
1239 /* Disable all interrupt types */
1240 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1246 ice_handle_aq_msg(struct rte_eth_dev *dev)
1248 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1249 struct ice_ctl_q_info *cq = &hw->adminq;
1250 struct ice_rq_event_info event;
1251 uint16_t pending, opcode;
1254 event.buf_len = ICE_AQ_MAX_BUF_LEN;
1255 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1256 if (!event.msg_buf) {
1257 PMD_DRV_LOG(ERR, "Failed to allocate mem");
1263 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1265 if (ret != ICE_SUCCESS) {
1267 "Failed to read msg from AdminQ, "
1269 hw->adminq.sq_last_status);
1272 opcode = rte_le_to_cpu_16(event.desc.opcode);
1275 case ice_aqc_opc_get_link_status:
1276 ret = ice_link_update(dev, 0);
1278 _rte_eth_dev_callback_process
1279 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1282 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1287 rte_free(event.msg_buf);
1292 * Interrupt handler triggered by NIC for handling
1293 * specific interrupt.
1296 * Pointer to interrupt handle.
1298 * The address of parameter (struct rte_eth_dev *) regsitered before.
1304 ice_interrupt_handler(void *param)
1306 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1307 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1315 uint32_t int_fw_ctl;
1318 /* Disable interrupt */
1319 ice_pf_disable_irq0(hw);
1321 /* read out interrupt causes */
1322 oicr = ICE_READ_REG(hw, PFINT_OICR);
1324 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1327 /* No interrupt event indicated */
1328 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1329 PMD_DRV_LOG(INFO, "No interrupt event");
1334 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1335 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1336 ice_handle_aq_msg(dev);
1339 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1340 PMD_DRV_LOG(INFO, "OICR: link state change event");
1341 ret = ice_link_update(dev, 0);
1343 _rte_eth_dev_callback_process
1344 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1348 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1349 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1350 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1351 if (reg & GL_MDET_TX_PQM_VALID_M) {
1352 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1353 GL_MDET_TX_PQM_PF_NUM_S;
1354 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1355 GL_MDET_TX_PQM_MAL_TYPE_S;
1356 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1357 GL_MDET_TX_PQM_QNUM_S;
1359 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1360 "%d by PQM on TX queue %d PF# %d",
1361 event, queue, pf_num);
1364 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1365 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1366 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1367 GL_MDET_TX_TCLAN_PF_NUM_S;
1368 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1369 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1370 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1371 GL_MDET_TX_TCLAN_QNUM_S;
1373 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1374 "%d by TCLAN on TX queue %d PF# %d",
1375 event, queue, pf_num);
1379 /* Enable interrupt */
1380 ice_pf_enable_irq0(hw);
1381 rte_intr_ack(dev->intr_handle);
1385 ice_init_proto_xtr(struct rte_eth_dev *dev)
1387 struct ice_adapter *ad =
1388 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1389 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1390 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1393 if (!ice_proto_xtr_support(hw)) {
1394 PMD_DRV_LOG(NOTICE, "Protocol extraction is not supported");
1398 pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1399 if (unlikely(pf->proto_xtr == NULL)) {
1400 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1404 for (i = 0; i < pf->lan_nb_qps; i++)
1405 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1406 ad->devargs.proto_xtr[i] :
1407 ad->devargs.proto_xtr_dflt;
1410 /* Initialize SW parameters of PF */
1412 ice_pf_sw_init(struct rte_eth_dev *dev)
1414 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1415 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1418 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1419 hw->func_caps.common_cap.num_rxq);
1421 pf->lan_nb_qps = pf->lan_nb_qp_max;
1423 ice_init_proto_xtr(dev);
1425 if (hw->func_caps.fd_fltr_guar > 0 ||
1426 hw->func_caps.fd_fltr_best_effort > 0) {
1427 pf->flags |= ICE_FLAG_FDIR;
1428 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1429 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1431 pf->fdir_nb_qps = 0;
1433 pf->fdir_qp_offset = 0;
1439 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1441 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1442 struct ice_vsi *vsi = NULL;
1443 struct ice_vsi_ctx vsi_ctx;
1445 struct rte_ether_addr broadcast = {
1446 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1447 struct rte_ether_addr mac_addr;
1448 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1449 uint8_t tc_bitmap = 0x1;
1452 /* hw->num_lports = 1 in NIC mode */
1453 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1457 vsi->idx = pf->next_vsi_idx;
1460 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1461 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1462 vsi->vlan_anti_spoof_on = 0;
1463 vsi->vlan_filter_on = 1;
1464 TAILQ_INIT(&vsi->mac_list);
1465 TAILQ_INIT(&vsi->vlan_list);
1467 /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1468 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1469 ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1470 hw->func_caps.common_cap.rss_table_size;
1471 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1473 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1476 vsi->nb_qps = pf->lan_nb_qps;
1477 vsi->base_queue = 1;
1478 ice_vsi_config_default_rss(&vsi_ctx.info);
1479 vsi_ctx.alloc_from_pool = true;
1480 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1481 /* switch_id is queried by get_switch_config aq, which is done
1484 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1485 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1486 /* Allow all untagged or tagged packets */
1487 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1488 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1489 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1490 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1493 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1494 ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1495 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1496 cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
1497 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1498 vsi_ctx.info.max_fd_fltr_dedicated =
1499 rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1500 vsi_ctx.info.max_fd_fltr_shared =
1501 rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1503 /* Enable VLAN/UP trip */
1504 ret = ice_vsi_config_tc_queue_mapping(vsi,
1509 "tc queue mapping with vsi failed, "
1517 vsi->nb_qps = pf->fdir_nb_qps;
1518 vsi->base_queue = ICE_FDIR_QUEUE_ID;
1519 vsi_ctx.alloc_from_pool = true;
1520 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1522 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1523 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1524 cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
1525 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1526 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1527 ret = ice_vsi_config_tc_queue_mapping(vsi,
1532 "tc queue mapping with vsi failed, "
1539 /* for other types of VSI */
1540 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1544 /* VF has MSIX interrupt in VF range, don't allocate here */
1545 if (type == ICE_VSI_PF) {
1546 ret = ice_res_pool_alloc(&pf->msix_pool,
1547 RTE_MIN(vsi->nb_qps,
1548 RTE_MAX_RXTX_INTR_VEC_ID));
1550 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1553 vsi->msix_intr = ret;
1554 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1555 } else if (type == ICE_VSI_CTRL) {
1556 ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1558 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1561 vsi->msix_intr = ret;
1567 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1568 if (ret != ICE_SUCCESS) {
1569 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1572 /* store vsi information is SW structure */
1573 vsi->vsi_id = vsi_ctx.vsi_num;
1574 vsi->info = vsi_ctx.info;
1575 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1576 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1578 if (type == ICE_VSI_PF) {
1579 /* MAC configuration */
1580 rte_memcpy(pf->dev_addr.addr_bytes,
1581 hw->port_info->mac.perm_addr,
1584 rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
1585 ret = ice_add_mac_filter(vsi, &mac_addr);
1586 if (ret != ICE_SUCCESS)
1587 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1589 rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
1590 ret = ice_add_mac_filter(vsi, &mac_addr);
1591 if (ret != ICE_SUCCESS)
1592 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1595 /* At the beginning, only TC0. */
1596 /* What we need here is the maximam number of the TX queues.
1597 * Currently vsi->nb_qps means it.
1598 * Correct it if any change.
1600 max_txqs[0] = vsi->nb_qps;
1601 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1602 tc_bitmap, max_txqs);
1603 if (ret != ICE_SUCCESS)
1604 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1614 ice_send_driver_ver(struct ice_hw *hw)
1616 struct ice_driver_ver dv;
1618 /* we don't have driver version use 0 for dummy */
1622 dv.subbuild_ver = 0;
1623 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1625 return ice_aq_send_driver_ver(hw, &dv, NULL);
1629 ice_pf_setup(struct ice_pf *pf)
1631 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1632 struct ice_vsi *vsi;
1635 /* Clear all stats counters */
1636 pf->offset_loaded = FALSE;
1637 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1638 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1639 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1640 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1642 /* force guaranteed filter pool for PF */
1643 ice_alloc_fd_guar_item(hw, &unused,
1644 hw->func_caps.fd_fltr_guar);
1645 /* force shared filter pool for PF */
1646 ice_alloc_fd_shrd_item(hw, &unused,
1647 hw->func_caps.fd_fltr_best_effort);
1649 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1651 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1660 /* PCIe configuration space setting */
1661 #define PCI_CFG_SPACE_SIZE 256
1662 #define PCI_CFG_SPACE_EXP_SIZE 4096
1663 #define PCI_EXT_CAP_ID(header) (int)((header) & 0x0000ffff)
1664 #define PCI_EXT_CAP_NEXT(header) (((header) >> 20) & 0xffc)
1665 #define PCI_EXT_CAP_ID_DSN 0x03
1668 ice_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
1672 int pos = PCI_CFG_SPACE_SIZE;
1674 /* minimum 8 bytes per capability */
1675 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1677 if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1678 PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1683 * If we have no capabilities, this is indicated by cap ID,
1684 * cap version and next pointer all being 0.
1690 if (PCI_EXT_CAP_ID(header) == cap)
1693 pos = PCI_EXT_CAP_NEXT(header);
1695 if (pos < PCI_CFG_SPACE_SIZE)
1698 if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1699 PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1708 * Extract device serial number from PCIe Configuration Space and
1709 * determine the pkg file path according to the DSN.
1712 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
1715 char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1716 uint32_t dsn_low, dsn_high;
1717 memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1719 pos = ice_pci_find_next_ext_capability(pci_dev, PCI_EXT_CAP_ID_DSN);
1722 rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4);
1723 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8);
1724 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1725 "ice-%08x%08x.pkg", dsn_high, dsn_low);
1727 PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
1731 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1732 ICE_MAX_PKG_FILENAME_SIZE);
1733 if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1736 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1737 ICE_MAX_PKG_FILENAME_SIZE);
1738 if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1742 strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1743 if (!access(pkg_file, 0))
1745 strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1749 static enum ice_pkg_type
1750 ice_load_pkg_type(struct ice_hw *hw)
1752 enum ice_pkg_type package_type;
1754 /* store the activated package type (OS default or Comms) */
1755 if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1757 package_type = ICE_PKG_TYPE_OS_DEFAULT;
1758 else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1760 package_type = ICE_PKG_TYPE_COMMS;
1762 package_type = ICE_PKG_TYPE_UNKNOWN;
1764 PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s",
1765 hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1766 hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1767 hw->active_pkg_name);
1769 return package_type;
1772 static int ice_load_pkg(struct rte_eth_dev *dev)
1774 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1775 char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1781 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1782 struct ice_adapter *ad =
1783 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1785 ice_pkg_file_search_path(pci_dev, pkg_file);
1787 file = fopen(pkg_file, "rb");
1789 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1793 err = stat(pkg_file, &fstat);
1795 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1800 buf_len = fstat.st_size;
1801 buf = rte_malloc(NULL, buf_len, 0);
1804 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1810 err = fread(buf, buf_len, 1, file);
1812 PMD_INIT_LOG(ERR, "failed to read package data\n");
1820 err = ice_copy_and_init_pkg(hw, buf, buf_len);
1822 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1826 /* store the loaded pkg type info */
1827 ad->active_pkg_type = ice_load_pkg_type(hw);
1829 err = ice_init_hw_tbls(hw);
1831 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1832 goto fail_init_tbls;
1838 rte_free(hw->pkg_copy);
1845 ice_base_queue_get(struct ice_pf *pf)
1848 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1850 reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1851 if (reg & PFLAN_RX_QALLOC_VALID_M) {
1852 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1854 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1860 parse_bool(const char *key, const char *value, void *args)
1862 int *i = (int *)args;
1866 num = strtoul(value, &end, 10);
1868 if (num != 0 && num != 1) {
1869 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1870 "value must be 0 or 1",
1879 static int ice_parse_devargs(struct rte_eth_dev *dev)
1881 struct ice_adapter *ad =
1882 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1883 struct rte_devargs *devargs = dev->device->devargs;
1884 struct rte_kvargs *kvlist;
1887 if (devargs == NULL)
1890 kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1891 if (kvlist == NULL) {
1892 PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1896 ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1897 memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1898 sizeof(ad->devargs.proto_xtr));
1900 ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1901 &handle_proto_xtr_arg, &ad->devargs);
1905 ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1906 &parse_bool, &ad->devargs.safe_mode_support);
1910 ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1911 &parse_bool, &ad->devargs.pipe_mode_support);
1914 rte_kvargs_free(kvlist);
1918 /* Forward LLDP packets to default VSI by set switch rules */
1920 ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on)
1922 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1923 struct ice_fltr_list_entry *s_list_itr = NULL;
1924 struct LIST_HEAD_TYPE list_head;
1927 INIT_LIST_HEAD(&list_head);
1929 s_list_itr = (struct ice_fltr_list_entry *)
1930 ice_malloc(hw, sizeof(*s_list_itr));
1933 s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
1934 s_list_itr->fltr_info.vsi_handle = vsi->idx;
1935 s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
1936 RTE_ETHER_TYPE_LLDP;
1937 s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1938 s_list_itr->fltr_info.flag = ICE_FLTR_RX;
1939 s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
1940 LIST_ADD(&s_list_itr->list_entry, &list_head);
1942 ret = ice_add_eth_mac(hw, &list_head);
1944 ret = ice_remove_eth_mac(hw, &list_head);
1946 rte_free(s_list_itr);
1950 static enum ice_status
1951 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
1952 uint16_t num, uint16_t desc_id,
1953 uint16_t *prof_buf, uint16_t *num_prof)
1955 struct ice_aqc_get_allocd_res_desc_resp *resp_buf;
1958 bool res_shared = 1;
1959 struct ice_aq_desc aq_desc;
1960 struct ice_sq_cd *cd = NULL;
1961 struct ice_aqc_get_allocd_res_desc *cmd =
1962 &aq_desc.params.get_res_desc;
1964 buf_len = sizeof(resp_buf->elem) * num;
1965 resp_buf = ice_malloc(hw, buf_len);
1969 ice_fill_dflt_direct_cmd_desc(&aq_desc,
1970 ice_aqc_opc_get_allocd_res_desc);
1972 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
1973 ICE_AQC_RES_TYPE_M) | (res_shared ?
1974 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
1975 cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
1977 ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
1979 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
1983 ice_memcpy(prof_buf, resp_buf->elem, sizeof(resp_buf->elem) *
1984 (*num_prof), ICE_NONDMA_TO_NONDMA);
1991 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
1995 uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
1996 uint16_t first_desc = 1;
1997 uint16_t num_prof = 0;
1999 ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
2000 first_desc, prof_buf, &num_prof);
2002 PMD_INIT_LOG(ERR, "Failed to get fxp resource");
2006 for (prof_id = 0; prof_id < num_prof; prof_id++) {
2007 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
2009 PMD_INIT_LOG(ERR, "Failed to free fxp resource");
2017 ice_reset_fxp_resource(struct ice_hw *hw)
2021 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
2023 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
2027 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
2029 PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
2037 ice_dev_init(struct rte_eth_dev *dev)
2039 struct rte_pci_device *pci_dev;
2040 struct rte_intr_handle *intr_handle;
2041 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2042 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2043 struct ice_adapter *ad =
2044 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2045 struct ice_vsi *vsi;
2048 dev->dev_ops = &ice_eth_dev_ops;
2049 dev->rx_pkt_burst = ice_recv_pkts;
2050 dev->tx_pkt_burst = ice_xmit_pkts;
2051 dev->tx_pkt_prepare = ice_prep_pkts;
2053 /* for secondary processes, we don't initialise any further as primary
2054 * has already done this work.
2056 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2057 ice_set_rx_function(dev);
2058 ice_set_tx_function(dev);
2062 ice_set_default_ptype_table(dev);
2063 pci_dev = RTE_DEV_TO_PCI(dev->device);
2064 intr_handle = &pci_dev->intr_handle;
2066 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2067 pf->adapter->eth_dev = dev;
2068 pf->dev_data = dev->data;
2069 hw->back = pf->adapter;
2070 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2071 hw->vendor_id = pci_dev->id.vendor_id;
2072 hw->device_id = pci_dev->id.device_id;
2073 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2074 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2075 hw->bus.device = pci_dev->addr.devid;
2076 hw->bus.func = pci_dev->addr.function;
2078 ret = ice_parse_devargs(dev);
2080 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2084 ice_init_controlq_parameter(hw);
2086 ret = ice_init_hw(hw);
2088 PMD_INIT_LOG(ERR, "Failed to initialize HW");
2092 ret = ice_load_pkg(dev);
2094 if (ad->devargs.safe_mode_support == 0) {
2095 PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2096 "Use safe-mode-support=1 to enter Safe Mode");
2100 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2101 "Entering Safe Mode");
2102 ad->is_safe_mode = 1;
2105 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2106 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2107 hw->api_maj_ver, hw->api_min_ver);
2109 ice_pf_sw_init(dev);
2110 ret = ice_init_mac_address(dev);
2112 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2116 /* Pass the information to the rte_eth_dev_close() that it should also
2117 * release the private port resources.
2119 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2121 ret = ice_res_pool_init(&pf->msix_pool, 1,
2122 hw->func_caps.common_cap.num_msix_vectors - 1);
2124 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2125 goto err_msix_pool_init;
2128 ret = ice_pf_setup(pf);
2130 PMD_INIT_LOG(ERR, "Failed to setup PF");
2134 ret = ice_send_driver_ver(hw);
2136 PMD_INIT_LOG(ERR, "Failed to send driver version");
2142 /* Disable double vlan by default */
2143 ice_vsi_config_double_vlan(vsi, FALSE);
2145 ret = ice_aq_stop_lldp(hw, TRUE, FALSE, NULL);
2146 if (ret != ICE_SUCCESS)
2147 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2148 ret = ice_init_dcb(hw, TRUE);
2149 if (ret != ICE_SUCCESS)
2150 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2151 /* Forward LLDP packets to default VSI */
2152 ret = ice_vsi_config_sw_lldp(vsi, TRUE);
2153 if (ret != ICE_SUCCESS)
2154 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2155 /* register callback func to eal lib */
2156 rte_intr_callback_register(intr_handle,
2157 ice_interrupt_handler, dev);
2159 ice_pf_enable_irq0(hw);
2161 /* enable uio intr after callback register */
2162 rte_intr_enable(intr_handle);
2164 /* get base queue pairs index in the device */
2165 ice_base_queue_get(pf);
2167 ret = ice_flow_init(ad);
2169 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2173 ret = ice_reset_fxp_resource(hw);
2175 PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2182 ice_res_pool_destroy(&pf->msix_pool);
2184 rte_free(dev->data->mac_addrs);
2185 dev->data->mac_addrs = NULL;
2187 ice_sched_cleanup_all(hw);
2188 rte_free(hw->port_info);
2189 ice_shutdown_all_ctrlq(hw);
2190 rte_free(pf->proto_xtr);
2196 ice_release_vsi(struct ice_vsi *vsi)
2199 struct ice_vsi_ctx vsi_ctx;
2200 enum ice_status ret;
2205 hw = ICE_VSI_TO_HW(vsi);
2207 ice_remove_all_mac_vlan_filters(vsi);
2209 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2211 vsi_ctx.vsi_num = vsi->vsi_id;
2212 vsi_ctx.info = vsi->info;
2213 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2214 if (ret != ICE_SUCCESS) {
2215 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2225 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2227 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2228 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2229 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2230 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2231 uint16_t msix_intr, i;
2233 /* disable interrupt and also clear all the exist config */
2234 for (i = 0; i < vsi->nb_qps; i++) {
2235 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2236 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2240 if (rte_intr_allow_others(intr_handle))
2242 for (i = 0; i < vsi->nb_msix; i++) {
2243 msix_intr = vsi->msix_intr + i;
2244 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2245 GLINT_DYN_CTL_WB_ON_ITR_M);
2249 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2253 ice_dev_stop(struct rte_eth_dev *dev)
2255 struct rte_eth_dev_data *data = dev->data;
2256 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2257 struct ice_vsi *main_vsi = pf->main_vsi;
2258 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2259 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2262 /* avoid stopping again */
2263 if (pf->adapter_stopped)
2266 /* stop and clear all Rx queues */
2267 for (i = 0; i < data->nb_rx_queues; i++)
2268 ice_rx_queue_stop(dev, i);
2270 /* stop and clear all Tx queues */
2271 for (i = 0; i < data->nb_tx_queues; i++)
2272 ice_tx_queue_stop(dev, i);
2274 /* disable all queue interrupts */
2275 ice_vsi_disable_queues_intr(main_vsi);
2277 if (pf->fdir.fdir_vsi)
2278 ice_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2280 /* Clear all queues and release mbufs */
2281 ice_clear_queues(dev);
2283 ice_dev_set_link_down(dev);
2285 /* Clean datapath event and queue/vec mapping */
2286 rte_intr_efd_disable(intr_handle);
2287 if (intr_handle->intr_vec) {
2288 rte_free(intr_handle->intr_vec);
2289 intr_handle->intr_vec = NULL;
2292 pf->adapter_stopped = true;
2296 ice_dev_close(struct rte_eth_dev *dev)
2298 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2299 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2300 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2301 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2302 struct ice_adapter *ad =
2303 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2305 /* Since stop will make link down, then the link event will be
2306 * triggered, disable the irq firstly to avoid the port_infoe etc
2307 * resources deallocation causing the interrupt service thread
2310 ice_pf_disable_irq0(hw);
2314 ice_flow_uninit(ad);
2316 /* release all queue resource */
2317 ice_free_queues(dev);
2319 ice_res_pool_destroy(&pf->msix_pool);
2320 ice_release_vsi(pf->main_vsi);
2321 ice_sched_cleanup_all(hw);
2322 rte_free(hw->port_info);
2323 hw->port_info = NULL;
2324 ice_shutdown_all_ctrlq(hw);
2325 rte_free(pf->proto_xtr);
2326 pf->proto_xtr = NULL;
2328 dev->dev_ops = NULL;
2329 dev->rx_pkt_burst = NULL;
2330 dev->tx_pkt_burst = NULL;
2332 rte_free(dev->data->mac_addrs);
2333 dev->data->mac_addrs = NULL;
2335 /* disable uio intr before callback unregister */
2336 rte_intr_disable(intr_handle);
2338 /* unregister callback func from eal lib */
2339 rte_intr_callback_unregister(intr_handle,
2340 ice_interrupt_handler, dev);
2344 ice_dev_uninit(struct rte_eth_dev *dev)
2352 ice_dev_configure(struct rte_eth_dev *dev)
2354 struct ice_adapter *ad =
2355 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2357 /* Initialize to TRUE. If any of Rx queues doesn't meet the
2358 * bulk allocation or vector Rx preconditions we will reset it.
2360 ad->rx_bulk_alloc_allowed = true;
2361 ad->tx_simple_allowed = true;
2366 static int ice_init_rss(struct ice_pf *pf)
2368 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2369 struct ice_vsi *vsi = pf->main_vsi;
2370 struct rte_eth_dev *dev = pf->adapter->eth_dev;
2371 struct rte_eth_rss_conf *rss_conf;
2372 struct ice_aqc_get_set_rss_keys key;
2375 bool is_safe_mode = pf->adapter->is_safe_mode;
2378 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
2379 nb_q = dev->data->nb_rx_queues;
2380 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
2381 vsi->rss_lut_size = pf->hash_lut_size;
2384 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
2389 vsi->rss_key = rte_zmalloc(NULL,
2390 vsi->rss_key_size, 0);
2392 vsi->rss_lut = rte_zmalloc(NULL,
2393 vsi->rss_lut_size, 0);
2395 /* configure RSS key */
2396 if (!rss_conf->rss_key) {
2397 /* Calculate the default hash key */
2398 for (i = 0; i <= vsi->rss_key_size; i++)
2399 vsi->rss_key[i] = (uint8_t)rte_rand();
2401 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
2402 RTE_MIN(rss_conf->rss_key_len,
2403 vsi->rss_key_size));
2405 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
2406 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
2410 /* init RSS LUT table */
2411 for (i = 0; i < vsi->rss_lut_size; i++)
2412 vsi->rss_lut[i] = i % nb_q;
2414 ret = ice_aq_set_rss_lut(hw, vsi->idx,
2415 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
2416 vsi->rss_lut, vsi->rss_lut_size);
2420 /* Enable registers for symmetric_toeplitz function. */
2421 reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
2422 reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
2423 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
2424 ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
2426 /* configure RSS for IPv4 with input set IPv4 src/dst */
2427 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
2428 ICE_FLOW_SEG_HDR_IPV4, 0);
2430 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", __func__, ret);
2432 /* configure RSS for IPv6 with input set IPv6 src/dst */
2433 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
2434 ICE_FLOW_SEG_HDR_IPV6, 0);
2436 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", __func__, ret);
2438 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
2439 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
2440 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6, 0);
2442 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret);
2444 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
2445 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
2446 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6, 0);
2448 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret);
2450 /* configure RSS for sctp6 with input set IPv6 src/dst */
2451 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
2452 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, 0);
2454 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2457 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
2458 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
2459 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4, 0);
2461 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret);
2463 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
2464 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
2465 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4, 0);
2467 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret);
2469 /* configure RSS for sctp4 with input set IP src/dst */
2470 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
2471 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4, 0);
2473 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2476 /* configure RSS for gtpu with input set TEID */
2477 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_GTP_U_IPV4_TEID,
2478 ICE_FLOW_SEG_HDR_GTPU_IP, 0);
2480 PMD_DRV_LOG(ERR, "%s GTPU_TEID rss flow fail %d",
2484 * configure RSS for pppoe/pppod with input set
2485 * Source MAC and Session ID
2487 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_PPPOE_SESS_ID_ETH,
2488 ICE_FLOW_SEG_HDR_PPPOE, 0);
2490 PMD_DRV_LOG(ERR, "%s PPPoE/PPPoD_SessionID rss flow fail %d",
2497 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
2498 int base_queue, int nb_queue)
2500 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2501 uint32_t val, val_tx;
2504 for (i = 0; i < nb_queue; i++) {
2506 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
2507 (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
2508 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
2509 (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
2511 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
2512 base_queue + i, msix_vect);
2513 /* set ITR0 value */
2514 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
2515 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
2516 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
2521 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
2523 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2524 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2525 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2526 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2527 uint16_t msix_vect = vsi->msix_intr;
2528 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2529 uint16_t queue_idx = 0;
2533 /* clear Rx/Tx queue interrupt */
2534 for (i = 0; i < vsi->nb_used_qps; i++) {
2535 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2536 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2539 /* PF bind interrupt */
2540 if (rte_intr_dp_is_en(intr_handle)) {
2545 for (i = 0; i < vsi->nb_used_qps; i++) {
2547 if (!rte_intr_allow_others(intr_handle))
2548 msix_vect = ICE_MISC_VEC_ID;
2550 /* uio mapping all queue to one msix_vect */
2551 __vsi_queues_bind_intr(vsi, msix_vect,
2552 vsi->base_queue + i,
2553 vsi->nb_used_qps - i);
2555 for (; !!record && i < vsi->nb_used_qps; i++)
2556 intr_handle->intr_vec[queue_idx + i] =
2561 /* vfio 1:1 queue/msix_vect mapping */
2562 __vsi_queues_bind_intr(vsi, msix_vect,
2563 vsi->base_queue + i, 1);
2566 intr_handle->intr_vec[queue_idx + i] = msix_vect;
2574 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
2576 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2577 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2578 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2579 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2580 uint16_t msix_intr, i;
2582 if (rte_intr_allow_others(intr_handle))
2583 for (i = 0; i < vsi->nb_used_qps; i++) {
2584 msix_intr = vsi->msix_intr + i;
2585 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2586 GLINT_DYN_CTL_INTENA_M |
2587 GLINT_DYN_CTL_CLEARPBA_M |
2588 GLINT_DYN_CTL_ITR_INDX_M |
2589 GLINT_DYN_CTL_WB_ON_ITR_M);
2592 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
2593 GLINT_DYN_CTL_INTENA_M |
2594 GLINT_DYN_CTL_CLEARPBA_M |
2595 GLINT_DYN_CTL_ITR_INDX_M |
2596 GLINT_DYN_CTL_WB_ON_ITR_M);
2600 ice_rxq_intr_setup(struct rte_eth_dev *dev)
2602 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2603 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2604 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2605 struct ice_vsi *vsi = pf->main_vsi;
2606 uint32_t intr_vector = 0;
2608 rte_intr_disable(intr_handle);
2610 /* check and configure queue intr-vector mapping */
2611 if ((rte_intr_cap_multiple(intr_handle) ||
2612 !RTE_ETH_DEV_SRIOV(dev).active) &&
2613 dev->data->dev_conf.intr_conf.rxq != 0) {
2614 intr_vector = dev->data->nb_rx_queues;
2615 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
2616 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
2617 ICE_MAX_INTR_QUEUE_NUM);
2620 if (rte_intr_efd_enable(intr_handle, intr_vector))
2624 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2625 intr_handle->intr_vec =
2626 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
2628 if (!intr_handle->intr_vec) {
2630 "Failed to allocate %d rx_queues intr_vec",
2631 dev->data->nb_rx_queues);
2636 /* Map queues with MSIX interrupt */
2637 vsi->nb_used_qps = dev->data->nb_rx_queues;
2638 ice_vsi_queues_bind_intr(vsi);
2640 /* Enable interrupts for all the queues */
2641 ice_vsi_enable_queues_intr(vsi);
2643 /* Enable FDIR MSIX interrupt */
2644 if (pf->fdir.fdir_vsi) {
2645 ice_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
2646 ice_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2649 rte_intr_enable(intr_handle);
2655 ice_dev_start(struct rte_eth_dev *dev)
2657 struct rte_eth_dev_data *data = dev->data;
2658 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2659 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2660 struct ice_vsi *vsi = pf->main_vsi;
2661 uint16_t nb_rxq = 0;
2663 uint16_t max_frame_size;
2666 /* program Tx queues' context in hardware */
2667 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
2668 ret = ice_tx_queue_start(dev, nb_txq);
2670 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
2675 /* program Rx queues' context in hardware*/
2676 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
2677 ret = ice_rx_queue_start(dev, nb_rxq);
2679 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
2684 ret = ice_init_rss(pf);
2686 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
2690 ice_set_rx_function(dev);
2691 ice_set_tx_function(dev);
2693 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2694 ETH_VLAN_EXTEND_MASK;
2695 ret = ice_vlan_offload_set(dev, mask);
2697 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
2701 /* enable Rx interrput and mapping Rx queue to interrupt vector */
2702 if (ice_rxq_intr_setup(dev))
2705 /* Enable receiving broadcast packets and transmitting packets */
2706 ret = ice_set_vsi_promisc(hw, vsi->idx,
2707 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
2708 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
2710 if (ret != ICE_SUCCESS)
2711 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2713 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
2714 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
2715 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
2716 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
2717 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
2718 ICE_AQ_LINK_EVENT_AN_COMPLETED |
2719 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
2721 if (ret != ICE_SUCCESS)
2722 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2724 ice_dev_set_link_up(dev);
2726 /* Call get_link_info aq commond to enable/disable LSE */
2727 ice_link_update(dev, 0);
2729 pf->adapter_stopped = false;
2731 /* Set the max frame size to default value*/
2732 max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
2733 pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
2736 /* Set the max frame size to HW*/
2737 ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
2741 /* stop the started queues if failed to start all queues */
2743 for (i = 0; i < nb_rxq; i++)
2744 ice_rx_queue_stop(dev, i);
2746 for (i = 0; i < nb_txq; i++)
2747 ice_tx_queue_stop(dev, i);
2753 ice_dev_reset(struct rte_eth_dev *dev)
2757 if (dev->data->sriov.active)
2760 ret = ice_dev_uninit(dev);
2762 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
2766 ret = ice_dev_init(dev);
2768 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
2776 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2778 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2779 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2780 struct ice_vsi *vsi = pf->main_vsi;
2781 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
2782 bool is_safe_mode = pf->adapter->is_safe_mode;
2786 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
2787 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
2788 dev_info->max_rx_queues = vsi->nb_qps;
2789 dev_info->max_tx_queues = vsi->nb_qps;
2790 dev_info->max_mac_addrs = vsi->max_macaddrs;
2791 dev_info->max_vfs = pci_dev->max_vfs;
2792 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
2793 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2795 dev_info->rx_offload_capa =
2796 DEV_RX_OFFLOAD_VLAN_STRIP |
2797 DEV_RX_OFFLOAD_JUMBO_FRAME |
2798 DEV_RX_OFFLOAD_KEEP_CRC |
2799 DEV_RX_OFFLOAD_SCATTER |
2800 DEV_RX_OFFLOAD_VLAN_FILTER;
2801 dev_info->tx_offload_capa =
2802 DEV_TX_OFFLOAD_VLAN_INSERT |
2803 DEV_TX_OFFLOAD_TCP_TSO |
2804 DEV_TX_OFFLOAD_MULTI_SEGS |
2805 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
2806 dev_info->flow_type_rss_offloads = 0;
2808 if (!is_safe_mode) {
2809 dev_info->rx_offload_capa |=
2810 DEV_RX_OFFLOAD_IPV4_CKSUM |
2811 DEV_RX_OFFLOAD_UDP_CKSUM |
2812 DEV_RX_OFFLOAD_TCP_CKSUM |
2813 DEV_RX_OFFLOAD_QINQ_STRIP |
2814 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
2815 DEV_RX_OFFLOAD_VLAN_EXTEND;
2816 dev_info->tx_offload_capa |=
2817 DEV_TX_OFFLOAD_QINQ_INSERT |
2818 DEV_TX_OFFLOAD_IPV4_CKSUM |
2819 DEV_TX_OFFLOAD_UDP_CKSUM |
2820 DEV_TX_OFFLOAD_TCP_CKSUM |
2821 DEV_TX_OFFLOAD_SCTP_CKSUM |
2822 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2823 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
2824 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
2827 dev_info->rx_queue_offload_capa = 0;
2828 dev_info->tx_queue_offload_capa = 0;
2830 dev_info->reta_size = pf->hash_lut_size;
2831 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2833 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2835 .pthresh = ICE_DEFAULT_RX_PTHRESH,
2836 .hthresh = ICE_DEFAULT_RX_HTHRESH,
2837 .wthresh = ICE_DEFAULT_RX_WTHRESH,
2839 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
2844 dev_info->default_txconf = (struct rte_eth_txconf) {
2846 .pthresh = ICE_DEFAULT_TX_PTHRESH,
2847 .hthresh = ICE_DEFAULT_TX_HTHRESH,
2848 .wthresh = ICE_DEFAULT_TX_WTHRESH,
2850 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
2851 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
2855 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2856 .nb_max = ICE_MAX_RING_DESC,
2857 .nb_min = ICE_MIN_RING_DESC,
2858 .nb_align = ICE_ALIGN_RING_DESC,
2861 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2862 .nb_max = ICE_MAX_RING_DESC,
2863 .nb_min = ICE_MIN_RING_DESC,
2864 .nb_align = ICE_ALIGN_RING_DESC,
2867 dev_info->speed_capa = ETH_LINK_SPEED_10M |
2868 ETH_LINK_SPEED_100M |
2870 ETH_LINK_SPEED_2_5G |
2872 ETH_LINK_SPEED_10G |
2873 ETH_LINK_SPEED_20G |
2876 phy_type_low = hw->port_info->phy.phy_type_low;
2877 phy_type_high = hw->port_info->phy.phy_type_high;
2879 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
2880 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
2882 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
2883 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
2884 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
2886 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2887 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2889 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
2890 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
2891 dev_info->default_rxportconf.nb_queues = 1;
2892 dev_info->default_txportconf.nb_queues = 1;
2893 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
2894 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
2900 ice_atomic_read_link_status(struct rte_eth_dev *dev,
2901 struct rte_eth_link *link)
2903 struct rte_eth_link *dst = link;
2904 struct rte_eth_link *src = &dev->data->dev_link;
2906 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2907 *(uint64_t *)src) == 0)
2914 ice_atomic_write_link_status(struct rte_eth_dev *dev,
2915 struct rte_eth_link *link)
2917 struct rte_eth_link *dst = &dev->data->dev_link;
2918 struct rte_eth_link *src = link;
2920 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2921 *(uint64_t *)src) == 0)
2928 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2930 #define CHECK_INTERVAL 100 /* 100ms */
2931 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
2932 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2933 struct ice_link_status link_status;
2934 struct rte_eth_link link, old;
2936 unsigned int rep_cnt = MAX_REPEAT_TIME;
2937 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2939 memset(&link, 0, sizeof(link));
2940 memset(&old, 0, sizeof(old));
2941 memset(&link_status, 0, sizeof(link_status));
2942 ice_atomic_read_link_status(dev, &old);
2945 /* Get link status information from hardware */
2946 status = ice_aq_get_link_info(hw->port_info, enable_lse,
2947 &link_status, NULL);
2948 if (status != ICE_SUCCESS) {
2949 link.link_speed = ETH_SPEED_NUM_100M;
2950 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2951 PMD_DRV_LOG(ERR, "Failed to get link info");
2955 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
2956 if (!wait_to_complete || link.link_status)
2959 rte_delay_ms(CHECK_INTERVAL);
2960 } while (--rep_cnt);
2962 if (!link.link_status)
2965 /* Full-duplex operation at all supported speeds */
2966 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2968 /* Parse the link status */
2969 switch (link_status.link_speed) {
2970 case ICE_AQ_LINK_SPEED_10MB:
2971 link.link_speed = ETH_SPEED_NUM_10M;
2973 case ICE_AQ_LINK_SPEED_100MB:
2974 link.link_speed = ETH_SPEED_NUM_100M;
2976 case ICE_AQ_LINK_SPEED_1000MB:
2977 link.link_speed = ETH_SPEED_NUM_1G;
2979 case ICE_AQ_LINK_SPEED_2500MB:
2980 link.link_speed = ETH_SPEED_NUM_2_5G;
2982 case ICE_AQ_LINK_SPEED_5GB:
2983 link.link_speed = ETH_SPEED_NUM_5G;
2985 case ICE_AQ_LINK_SPEED_10GB:
2986 link.link_speed = ETH_SPEED_NUM_10G;
2988 case ICE_AQ_LINK_SPEED_20GB:
2989 link.link_speed = ETH_SPEED_NUM_20G;
2991 case ICE_AQ_LINK_SPEED_25GB:
2992 link.link_speed = ETH_SPEED_NUM_25G;
2994 case ICE_AQ_LINK_SPEED_40GB:
2995 link.link_speed = ETH_SPEED_NUM_40G;
2997 case ICE_AQ_LINK_SPEED_50GB:
2998 link.link_speed = ETH_SPEED_NUM_50G;
3000 case ICE_AQ_LINK_SPEED_100GB:
3001 link.link_speed = ETH_SPEED_NUM_100G;
3003 case ICE_AQ_LINK_SPEED_UNKNOWN:
3005 PMD_DRV_LOG(ERR, "Unknown link speed");
3006 link.link_speed = ETH_SPEED_NUM_NONE;
3010 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3011 ETH_LINK_SPEED_FIXED);
3014 ice_atomic_write_link_status(dev, &link);
3015 if (link.link_status == old.link_status)
3021 /* Force the physical link state by getting the current PHY capabilities from
3022 * hardware and setting the PHY config based on the determined capabilities. If
3023 * link changes, link event will be triggered because both the Enable Automatic
3024 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3026 static enum ice_status
3027 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3029 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3030 struct ice_aqc_get_phy_caps_data *pcaps;
3031 struct ice_port_info *pi;
3032 enum ice_status status;
3034 if (!hw || !hw->port_info)
3035 return ICE_ERR_PARAM;
3039 pcaps = (struct ice_aqc_get_phy_caps_data *)
3040 ice_malloc(hw, sizeof(*pcaps));
3042 return ICE_ERR_NO_MEMORY;
3044 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3049 /* No change in link */
3050 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3051 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3054 cfg.phy_type_low = pcaps->phy_type_low;
3055 cfg.phy_type_high = pcaps->phy_type_high;
3056 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3057 cfg.low_power_ctrl = pcaps->low_power_ctrl;
3058 cfg.eee_cap = pcaps->eee_cap;
3059 cfg.eeer_value = pcaps->eeer_value;
3060 cfg.link_fec_opt = pcaps->link_fec_options;
3062 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3064 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3066 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3069 ice_free(hw, pcaps);
3074 ice_dev_set_link_up(struct rte_eth_dev *dev)
3076 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3078 return ice_force_phys_link_state(hw, true);
3082 ice_dev_set_link_down(struct rte_eth_dev *dev)
3084 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3086 return ice_force_phys_link_state(hw, false);
3090 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3092 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3093 struct rte_eth_dev_data *dev_data = pf->dev_data;
3094 uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3096 /* check if mtu is within the allowed range */
3097 if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3100 /* mtu setting is forbidden if port is start */
3101 if (dev_data->dev_started) {
3103 "port %d must be stopped before configuration",
3108 if (frame_size > RTE_ETHER_MAX_LEN)
3109 dev_data->dev_conf.rxmode.offloads |=
3110 DEV_RX_OFFLOAD_JUMBO_FRAME;
3112 dev_data->dev_conf.rxmode.offloads &=
3113 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3115 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3120 static int ice_macaddr_set(struct rte_eth_dev *dev,
3121 struct rte_ether_addr *mac_addr)
3123 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3124 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3125 struct ice_vsi *vsi = pf->main_vsi;
3126 struct ice_mac_filter *f;
3130 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3131 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3135 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3136 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3141 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3145 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3146 if (ret != ICE_SUCCESS) {
3147 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3150 ret = ice_add_mac_filter(vsi, mac_addr);
3151 if (ret != ICE_SUCCESS) {
3152 PMD_DRV_LOG(ERR, "Failed to add mac filter");
3155 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
3157 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3158 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3159 if (ret != ICE_SUCCESS)
3160 PMD_DRV_LOG(ERR, "Failed to set manage mac");
3165 /* Add a MAC address, and update filters */
3167 ice_macaddr_add(struct rte_eth_dev *dev,
3168 struct rte_ether_addr *mac_addr,
3169 __rte_unused uint32_t index,
3170 __rte_unused uint32_t pool)
3172 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3173 struct ice_vsi *vsi = pf->main_vsi;
3176 ret = ice_add_mac_filter(vsi, mac_addr);
3177 if (ret != ICE_SUCCESS) {
3178 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3185 /* Remove a MAC address, and update filters */
3187 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3189 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3190 struct ice_vsi *vsi = pf->main_vsi;
3191 struct rte_eth_dev_data *data = dev->data;
3192 struct rte_ether_addr *macaddr;
3195 macaddr = &data->mac_addrs[index];
3196 ret = ice_remove_mac_filter(vsi, macaddr);
3198 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3204 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3206 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3207 struct ice_vsi *vsi = pf->main_vsi;
3210 PMD_INIT_FUNC_TRACE();
3213 ret = ice_add_vlan_filter(vsi, vlan_id);
3215 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3219 ret = ice_remove_vlan_filter(vsi, vlan_id);
3221 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3229 /* Configure vlan filter on or off */
3231 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
3233 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3234 struct ice_vsi_ctx ctxt;
3235 uint8_t sec_flags, sw_flags2;
3238 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3239 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
3240 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3243 vsi->info.sec_flags |= sec_flags;
3244 vsi->info.sw_flags2 |= sw_flags2;
3246 vsi->info.sec_flags &= ~sec_flags;
3247 vsi->info.sw_flags2 &= ~sw_flags2;
3249 vsi->info.sw_id = hw->port_info->sw_id;
3250 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3251 ctxt.info.valid_sections =
3252 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3253 ICE_AQ_VSI_PROP_SECURITY_VALID);
3254 ctxt.vsi_num = vsi->vsi_id;
3256 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3258 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
3259 on ? "enable" : "disable");
3262 vsi->info.valid_sections |=
3263 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3264 ICE_AQ_VSI_PROP_SECURITY_VALID);
3267 /* consist with other drivers, allow untagged packet when vlan filter on */
3269 ret = ice_add_vlan_filter(vsi, 0);
3271 ret = ice_remove_vlan_filter(vsi, 0);
3277 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
3279 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3280 struct ice_vsi_ctx ctxt;
3284 /* Check if it has been already on or off */
3285 if (vsi->info.valid_sections &
3286 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
3288 if ((vsi->info.vlan_flags &
3289 ICE_AQ_VSI_VLAN_EMOD_M) ==
3290 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
3291 return 0; /* already on */
3293 if ((vsi->info.vlan_flags &
3294 ICE_AQ_VSI_VLAN_EMOD_M) ==
3295 ICE_AQ_VSI_VLAN_EMOD_NOTHING)
3296 return 0; /* already off */
3301 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3303 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3304 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
3305 vsi->info.vlan_flags |= vlan_flags;
3306 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3307 ctxt.info.valid_sections =
3308 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3309 ctxt.vsi_num = vsi->vsi_id;
3310 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3312 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3313 on ? "enable" : "disable");
3317 vsi->info.valid_sections |=
3318 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3324 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3326 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3327 struct ice_vsi *vsi = pf->main_vsi;
3328 struct rte_eth_rxmode *rxmode;
3330 rxmode = &dev->data->dev_conf.rxmode;
3331 if (mask & ETH_VLAN_FILTER_MASK) {
3332 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3333 ice_vsi_config_vlan_filter(vsi, TRUE);
3335 ice_vsi_config_vlan_filter(vsi, FALSE);
3338 if (mask & ETH_VLAN_STRIP_MASK) {
3339 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3340 ice_vsi_config_vlan_stripping(vsi, TRUE);
3342 ice_vsi_config_vlan_stripping(vsi, FALSE);
3345 if (mask & ETH_VLAN_EXTEND_MASK) {
3346 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3347 ice_vsi_config_double_vlan(vsi, TRUE);
3349 ice_vsi_config_double_vlan(vsi, FALSE);
3356 ice_vlan_tpid_set(struct rte_eth_dev *dev,
3357 enum rte_vlan_type vlan_type,
3360 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3361 uint64_t reg_r = 0, reg_w = 0;
3362 uint16_t reg_id = 0;
3364 int qinq = dev->data->dev_conf.rxmode.offloads &
3365 DEV_RX_OFFLOAD_VLAN_EXTEND;
3367 switch (vlan_type) {
3368 case ETH_VLAN_TYPE_OUTER:
3374 case ETH_VLAN_TYPE_INNER:
3379 "Unsupported vlan type in single vlan.");
3384 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
3387 reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
3388 PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
3389 "0x%08"PRIx64"", reg_id, reg_r);
3391 reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
3392 reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
3393 if (reg_r == reg_w) {
3394 PMD_DRV_LOG(DEBUG, "No need to write");
3398 ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
3399 PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
3400 "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
3406 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3408 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
3409 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3415 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
3416 ret = ice_aq_get_rss_lut(hw, vsi->idx,
3417 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
3419 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3423 uint64_t *lut_dw = (uint64_t *)lut;
3424 uint16_t i, lut_size_dw = lut_size / 4;
3426 for (i = 0; i < lut_size_dw; i++)
3427 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
3434 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3443 pf = ICE_VSI_TO_PF(vsi);
3444 hw = ICE_VSI_TO_HW(vsi);
3446 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
3447 ret = ice_aq_set_rss_lut(hw, vsi->idx,
3448 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
3450 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3454 uint64_t *lut_dw = (uint64_t *)lut;
3455 uint16_t i, lut_size_dw = lut_size / 4;
3457 for (i = 0; i < lut_size_dw; i++)
3458 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
3467 ice_rss_reta_update(struct rte_eth_dev *dev,
3468 struct rte_eth_rss_reta_entry64 *reta_conf,
3471 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3472 uint16_t i, lut_size = pf->hash_lut_size;
3473 uint16_t idx, shift;
3477 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
3478 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
3479 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
3481 "The size of hash lookup table configured (%d)"
3482 "doesn't match the number hardware can "
3483 "supported (128, 512, 2048)",
3488 /* It MUST use the current LUT size to get the RSS lookup table,
3489 * otherwise if will fail with -100 error code.
3491 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0);
3493 PMD_DRV_LOG(ERR, "No memory can be allocated");
3496 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
3500 for (i = 0; i < reta_size; i++) {
3501 idx = i / RTE_RETA_GROUP_SIZE;
3502 shift = i % RTE_RETA_GROUP_SIZE;
3503 if (reta_conf[idx].mask & (1ULL << shift))
3504 lut[i] = reta_conf[idx].reta[shift];
3506 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
3507 if (ret == 0 && lut_size != reta_size) {
3509 "The size of hash lookup table is changed from (%d) to (%d)",
3510 lut_size, reta_size);
3511 pf->hash_lut_size = reta_size;
3521 ice_rss_reta_query(struct rte_eth_dev *dev,
3522 struct rte_eth_rss_reta_entry64 *reta_conf,
3525 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3526 uint16_t i, lut_size = pf->hash_lut_size;
3527 uint16_t idx, shift;
3531 if (reta_size != lut_size) {
3533 "The size of hash lookup table configured (%d)"
3534 "doesn't match the number hardware can "
3536 reta_size, lut_size);
3540 lut = rte_zmalloc(NULL, reta_size, 0);
3542 PMD_DRV_LOG(ERR, "No memory can be allocated");
3546 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
3550 for (i = 0; i < reta_size; i++) {
3551 idx = i / RTE_RETA_GROUP_SIZE;
3552 shift = i % RTE_RETA_GROUP_SIZE;
3553 if (reta_conf[idx].mask & (1ULL << shift))
3554 reta_conf[idx].reta[shift] = lut[i];
3564 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
3566 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3569 if (!key || key_len == 0) {
3570 PMD_DRV_LOG(DEBUG, "No key to be configured");
3572 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
3574 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
3578 struct ice_aqc_get_set_rss_keys *key_dw =
3579 (struct ice_aqc_get_set_rss_keys *)key;
3581 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
3583 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
3591 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
3593 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3596 if (!key || !key_len)
3599 ret = ice_aq_get_rss_key
3601 (struct ice_aqc_get_set_rss_keys *)key);
3603 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
3606 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3612 ice_rss_hash_update(struct rte_eth_dev *dev,
3613 struct rte_eth_rss_conf *rss_conf)
3615 enum ice_status status = ICE_SUCCESS;
3616 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3617 struct ice_vsi *vsi = pf->main_vsi;
3620 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
3624 /* TODO: hash enable config, ice_add_rss_cfg */
3629 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
3630 struct rte_eth_rss_conf *rss_conf)
3632 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3633 struct ice_vsi *vsi = pf->main_vsi;
3635 ice_get_rss_key(vsi, rss_conf->rss_key,
3636 &rss_conf->rss_key_len);
3638 /* TODO: default set to 0 as hf config is not supported now */
3639 rss_conf->rss_hf = 0;
3644 ice_promisc_enable(struct rte_eth_dev *dev)
3646 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3647 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3648 struct ice_vsi *vsi = pf->main_vsi;
3649 enum ice_status status;
3653 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
3654 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3656 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
3658 case ICE_ERR_ALREADY_EXISTS:
3659 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
3663 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
3671 ice_promisc_disable(struct rte_eth_dev *dev)
3673 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3674 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3675 struct ice_vsi *vsi = pf->main_vsi;
3676 enum ice_status status;
3680 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
3681 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3683 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
3684 if (status != ICE_SUCCESS) {
3685 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
3693 ice_allmulti_enable(struct rte_eth_dev *dev)
3695 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3696 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3697 struct ice_vsi *vsi = pf->main_vsi;
3698 enum ice_status status;
3702 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3704 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
3707 case ICE_ERR_ALREADY_EXISTS:
3708 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
3712 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
3720 ice_allmulti_disable(struct rte_eth_dev *dev)
3722 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3723 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3724 struct ice_vsi *vsi = pf->main_vsi;
3725 enum ice_status status;
3729 if (dev->data->promiscuous == 1)
3730 return 0; /* must remain in all_multicast mode */
3732 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3734 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
3735 if (status != ICE_SUCCESS) {
3736 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
3743 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
3746 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3747 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3748 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3752 msix_intr = intr_handle->intr_vec[queue_id];
3754 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3755 GLINT_DYN_CTL_ITR_INDX_M;
3756 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
3758 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
3759 rte_intr_ack(&pci_dev->intr_handle);
3764 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
3767 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3768 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3769 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3772 msix_intr = intr_handle->intr_vec[queue_id];
3774 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
3780 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3782 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3788 full_ver = hw->nvm.oem_ver;
3789 ver = (u8)(full_ver >> 24);
3790 build = (u16)((full_ver >> 8) & 0xffff);
3791 patch = (u8)(full_ver & 0xff);
3793 ret = snprintf(fw_version, fw_size,
3794 "%d.%d%d 0x%08x %d.%d.%d",
3795 ((hw->nvm.ver >> 12) & 0xf),
3796 ((hw->nvm.ver >> 4) & 0xff),
3797 (hw->nvm.ver & 0xf), hw->nvm.eetrack,
3800 /* add the size of '\0' */
3802 if (fw_size < (u32)ret)
3809 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
3812 struct ice_vsi_ctx ctxt;
3813 uint8_t vlan_flags = 0;
3816 if (!vsi || !info) {
3817 PMD_DRV_LOG(ERR, "invalid parameters");
3822 vsi->info.pvid = info->config.pvid;
3824 * If insert pvid is enabled, only tagged pkts are
3825 * allowed to be sent out.
3827 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
3828 ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
3831 if (info->config.reject.tagged == 0)
3832 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
3834 if (info->config.reject.untagged == 0)
3835 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
3837 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
3838 ICE_AQ_VSI_VLAN_MODE_M);
3839 vsi->info.vlan_flags |= vlan_flags;
3840 memset(&ctxt, 0, sizeof(ctxt));
3841 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3842 ctxt.info.valid_sections =
3843 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3844 ctxt.vsi_num = vsi->vsi_id;
3846 hw = ICE_VSI_TO_HW(vsi);
3847 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3848 if (ret != ICE_SUCCESS) {
3850 "update VSI for VLAN insert failed, err %d",
3855 vsi->info.valid_sections |=
3856 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3862 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3864 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3865 struct ice_vsi *vsi = pf->main_vsi;
3866 struct rte_eth_dev_data *data = pf->dev_data;
3867 struct ice_vsi_vlan_pvid_info info;
3870 memset(&info, 0, sizeof(info));
3873 info.config.pvid = pvid;
3875 info.config.reject.tagged =
3876 data->dev_conf.txmode.hw_vlan_reject_tagged;
3877 info.config.reject.untagged =
3878 data->dev_conf.txmode.hw_vlan_reject_untagged;
3881 ret = ice_vsi_vlan_pvid_set(vsi, &info);
3883 PMD_DRV_LOG(ERR, "Failed to set pvid.");
3891 ice_get_eeprom_length(struct rte_eth_dev *dev)
3893 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3895 /* Convert word count to byte count */
3896 return hw->nvm.sr_words << 1;
3900 ice_get_eeprom(struct rte_eth_dev *dev,
3901 struct rte_dev_eeprom_info *eeprom)
3903 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3904 uint16_t *data = eeprom->data;
3905 uint16_t first_word, last_word, nwords;
3906 enum ice_status status = ICE_SUCCESS;
3908 first_word = eeprom->offset >> 1;
3909 last_word = (eeprom->offset + eeprom->length - 1) >> 1;
3910 nwords = last_word - first_word + 1;
3912 if (first_word >= hw->nvm.sr_words ||
3913 last_word >= hw->nvm.sr_words) {
3914 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
3918 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3920 status = ice_read_sr_buf(hw, first_word, &nwords, data);
3922 PMD_DRV_LOG(ERR, "EEPROM read failed.");
3923 eeprom->length = sizeof(uint16_t) * nwords;
3931 ice_stat_update_32(struct ice_hw *hw,
3939 new_data = (uint64_t)ICE_READ_REG(hw, reg);
3943 if (new_data >= *offset)
3944 *stat = (uint64_t)(new_data - *offset);
3946 *stat = (uint64_t)((new_data +
3947 ((uint64_t)1 << ICE_32_BIT_WIDTH))
3952 ice_stat_update_40(struct ice_hw *hw,
3961 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
3962 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
3968 if (new_data >= *offset)
3969 *stat = new_data - *offset;
3971 *stat = (uint64_t)((new_data +
3972 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
3975 *stat &= ICE_40_BIT_MASK;
3978 /* Get all the statistics of a VSI */
3980 ice_update_vsi_stats(struct ice_vsi *vsi)
3982 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
3983 struct ice_eth_stats *nes = &vsi->eth_stats;
3984 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3985 int idx = rte_le_to_cpu_16(vsi->vsi_id);
3987 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
3988 vsi->offset_loaded, &oes->rx_bytes,
3990 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
3991 vsi->offset_loaded, &oes->rx_unicast,
3993 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
3994 vsi->offset_loaded, &oes->rx_multicast,
3995 &nes->rx_multicast);
3996 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
3997 vsi->offset_loaded, &oes->rx_broadcast,
3998 &nes->rx_broadcast);
3999 /* exclude CRC bytes */
4000 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4001 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4003 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4004 &oes->rx_discards, &nes->rx_discards);
4005 /* GLV_REPC not supported */
4006 /* GLV_RMPC not supported */
4007 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4008 &oes->rx_unknown_protocol,
4009 &nes->rx_unknown_protocol);
4010 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4011 vsi->offset_loaded, &oes->tx_bytes,
4013 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4014 vsi->offset_loaded, &oes->tx_unicast,
4016 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4017 vsi->offset_loaded, &oes->tx_multicast,
4018 &nes->tx_multicast);
4019 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4020 vsi->offset_loaded, &oes->tx_broadcast,
4021 &nes->tx_broadcast);
4022 /* GLV_TDPC not supported */
4023 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4024 &oes->tx_errors, &nes->tx_errors);
4025 vsi->offset_loaded = true;
4027 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4029 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
4030 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
4031 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
4032 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
4033 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
4034 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4035 nes->rx_unknown_protocol);
4036 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
4037 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
4038 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
4039 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
4040 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
4041 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
4042 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4047 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4049 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4050 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4052 /* Get statistics of struct ice_eth_stats */
4053 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4054 GLPRT_GORCL(hw->port_info->lport),
4055 pf->offset_loaded, &os->eth.rx_bytes,
4057 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4058 GLPRT_UPRCL(hw->port_info->lport),
4059 pf->offset_loaded, &os->eth.rx_unicast,
4060 &ns->eth.rx_unicast);
4061 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4062 GLPRT_MPRCL(hw->port_info->lport),
4063 pf->offset_loaded, &os->eth.rx_multicast,
4064 &ns->eth.rx_multicast);
4065 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4066 GLPRT_BPRCL(hw->port_info->lport),
4067 pf->offset_loaded, &os->eth.rx_broadcast,
4068 &ns->eth.rx_broadcast);
4069 ice_stat_update_32(hw, PRTRPB_RDPC,
4070 pf->offset_loaded, &os->eth.rx_discards,
4071 &ns->eth.rx_discards);
4073 /* Workaround: CRC size should not be included in byte statistics,
4074 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4077 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4078 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4080 /* GLPRT_REPC not supported */
4081 /* GLPRT_RMPC not supported */
4082 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4084 &os->eth.rx_unknown_protocol,
4085 &ns->eth.rx_unknown_protocol);
4086 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4087 GLPRT_GOTCL(hw->port_info->lport),
4088 pf->offset_loaded, &os->eth.tx_bytes,
4090 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4091 GLPRT_UPTCL(hw->port_info->lport),
4092 pf->offset_loaded, &os->eth.tx_unicast,
4093 &ns->eth.tx_unicast);
4094 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4095 GLPRT_MPTCL(hw->port_info->lport),
4096 pf->offset_loaded, &os->eth.tx_multicast,
4097 &ns->eth.tx_multicast);
4098 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4099 GLPRT_BPTCL(hw->port_info->lport),
4100 pf->offset_loaded, &os->eth.tx_broadcast,
4101 &ns->eth.tx_broadcast);
4102 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4103 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4105 /* GLPRT_TEPC not supported */
4107 /* additional port specific stats */
4108 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4109 pf->offset_loaded, &os->tx_dropped_link_down,
4110 &ns->tx_dropped_link_down);
4111 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4112 pf->offset_loaded, &os->crc_errors,
4114 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4115 pf->offset_loaded, &os->illegal_bytes,
4116 &ns->illegal_bytes);
4117 /* GLPRT_ERRBC not supported */
4118 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4119 pf->offset_loaded, &os->mac_local_faults,
4120 &ns->mac_local_faults);
4121 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4122 pf->offset_loaded, &os->mac_remote_faults,
4123 &ns->mac_remote_faults);
4125 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4126 pf->offset_loaded, &os->rx_len_errors,
4127 &ns->rx_len_errors);
4129 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4130 pf->offset_loaded, &os->link_xon_rx,
4132 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
4133 pf->offset_loaded, &os->link_xoff_rx,
4135 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
4136 pf->offset_loaded, &os->link_xon_tx,
4138 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
4139 pf->offset_loaded, &os->link_xoff_tx,
4141 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
4142 GLPRT_PRC64L(hw->port_info->lport),
4143 pf->offset_loaded, &os->rx_size_64,
4145 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
4146 GLPRT_PRC127L(hw->port_info->lport),
4147 pf->offset_loaded, &os->rx_size_127,
4149 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
4150 GLPRT_PRC255L(hw->port_info->lport),
4151 pf->offset_loaded, &os->rx_size_255,
4153 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
4154 GLPRT_PRC511L(hw->port_info->lport),
4155 pf->offset_loaded, &os->rx_size_511,
4157 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
4158 GLPRT_PRC1023L(hw->port_info->lport),
4159 pf->offset_loaded, &os->rx_size_1023,
4161 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
4162 GLPRT_PRC1522L(hw->port_info->lport),
4163 pf->offset_loaded, &os->rx_size_1522,
4165 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
4166 GLPRT_PRC9522L(hw->port_info->lport),
4167 pf->offset_loaded, &os->rx_size_big,
4169 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
4170 pf->offset_loaded, &os->rx_undersize,
4172 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
4173 pf->offset_loaded, &os->rx_fragments,
4175 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
4176 pf->offset_loaded, &os->rx_oversize,
4178 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
4179 pf->offset_loaded, &os->rx_jabber,
4181 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
4182 GLPRT_PTC64L(hw->port_info->lport),
4183 pf->offset_loaded, &os->tx_size_64,
4185 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
4186 GLPRT_PTC127L(hw->port_info->lport),
4187 pf->offset_loaded, &os->tx_size_127,
4189 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
4190 GLPRT_PTC255L(hw->port_info->lport),
4191 pf->offset_loaded, &os->tx_size_255,
4193 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
4194 GLPRT_PTC511L(hw->port_info->lport),
4195 pf->offset_loaded, &os->tx_size_511,
4197 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
4198 GLPRT_PTC1023L(hw->port_info->lport),
4199 pf->offset_loaded, &os->tx_size_1023,
4201 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
4202 GLPRT_PTC1522L(hw->port_info->lport),
4203 pf->offset_loaded, &os->tx_size_1522,
4205 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
4206 GLPRT_PTC9522L(hw->port_info->lport),
4207 pf->offset_loaded, &os->tx_size_big,
4210 /* GLPRT_MSPDC not supported */
4211 /* GLPRT_XEC not supported */
4213 pf->offset_loaded = true;
4216 ice_update_vsi_stats(pf->main_vsi);
4219 /* Get all statistics of a port */
4221 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
4223 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4224 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4225 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4227 /* call read registers - updates values, now write them to struct */
4228 ice_read_stats_registers(pf, hw);
4230 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
4231 pf->main_vsi->eth_stats.rx_multicast +
4232 pf->main_vsi->eth_stats.rx_broadcast -
4233 pf->main_vsi->eth_stats.rx_discards;
4234 stats->opackets = ns->eth.tx_unicast +
4235 ns->eth.tx_multicast +
4236 ns->eth.tx_broadcast;
4237 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
4238 stats->obytes = ns->eth.tx_bytes;
4239 stats->oerrors = ns->eth.tx_errors +
4240 pf->main_vsi->eth_stats.tx_errors;
4243 stats->imissed = ns->eth.rx_discards +
4244 pf->main_vsi->eth_stats.rx_discards;
4245 stats->ierrors = ns->crc_errors +
4247 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
4249 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
4250 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
4251 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
4252 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
4253 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
4254 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
4255 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
4256 pf->main_vsi->eth_stats.rx_discards);
4257 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4258 ns->eth.rx_unknown_protocol);
4259 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
4260 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
4261 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
4262 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
4263 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
4264 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
4265 pf->main_vsi->eth_stats.tx_discards);
4266 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
4268 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
4269 ns->tx_dropped_link_down);
4270 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
4271 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
4273 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
4274 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
4275 ns->mac_local_faults);
4276 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
4277 ns->mac_remote_faults);
4278 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
4279 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
4280 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
4281 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
4282 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
4283 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
4284 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
4285 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
4286 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
4287 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
4288 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
4289 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
4290 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
4291 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
4292 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
4293 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
4294 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
4295 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
4296 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
4297 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
4298 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
4299 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
4300 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
4301 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
4305 /* Reset the statistics */
4307 ice_stats_reset(struct rte_eth_dev *dev)
4309 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4310 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4312 /* Mark PF and VSI stats to update the offset, aka "reset" */
4313 pf->offset_loaded = false;
4315 pf->main_vsi->offset_loaded = false;
4317 /* read the stats, reading current register values into offset */
4318 ice_read_stats_registers(pf, hw);
4324 ice_xstats_calc_num(void)
4328 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
4334 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
4337 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4338 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4341 struct ice_hw_port_stats *hw_stats = &pf->stats;
4343 count = ice_xstats_calc_num();
4347 ice_read_stats_registers(pf, hw);
4354 /* Get stats from ice_eth_stats struct */
4355 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
4356 xstats[count].value =
4357 *(uint64_t *)((char *)&hw_stats->eth +
4358 ice_stats_strings[i].offset);
4359 xstats[count].id = count;
4363 /* Get individiual stats from ice_hw_port struct */
4364 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
4365 xstats[count].value =
4366 *(uint64_t *)((char *)hw_stats +
4367 ice_hw_port_strings[i].offset);
4368 xstats[count].id = count;
4375 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
4376 struct rte_eth_xstat_name *xstats_names,
4377 __rte_unused unsigned int limit)
4379 unsigned int count = 0;
4383 return ice_xstats_calc_num();
4385 /* Note: limit checked in rte_eth_xstats_names() */
4387 /* Get stats from ice_eth_stats struct */
4388 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
4389 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
4390 sizeof(xstats_names[count].name));
4394 /* Get individiual stats from ice_hw_port struct */
4395 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
4396 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
4397 sizeof(xstats_names[count].name));
4405 ice_dev_filter_ctrl(struct rte_eth_dev *dev,
4406 enum rte_filter_type filter_type,
4407 enum rte_filter_op filter_op,
4415 switch (filter_type) {
4416 case RTE_ETH_FILTER_GENERIC:
4417 if (filter_op != RTE_ETH_FILTER_GET)
4419 *(const void **)arg = &ice_flow_ops;
4422 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4431 /* Add UDP tunneling port */
4433 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
4434 struct rte_eth_udp_tunnel *udp_tunnel)
4437 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4439 if (udp_tunnel == NULL)
4442 switch (udp_tunnel->prot_type) {
4443 case RTE_TUNNEL_TYPE_VXLAN:
4444 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
4447 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4455 /* Delete UDP tunneling port */
4457 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
4458 struct rte_eth_udp_tunnel *udp_tunnel)
4461 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4463 if (udp_tunnel == NULL)
4466 switch (udp_tunnel->prot_type) {
4467 case RTE_TUNNEL_TYPE_VXLAN:
4468 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
4471 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4480 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
4481 struct rte_pci_device *pci_dev)
4483 return rte_eth_dev_pci_generic_probe(pci_dev,
4484 sizeof(struct ice_adapter),
4489 ice_pci_remove(struct rte_pci_device *pci_dev)
4491 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
4494 static struct rte_pci_driver rte_ice_pmd = {
4495 .id_table = pci_id_ice_map,
4496 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
4497 .probe = ice_pci_probe,
4498 .remove = ice_pci_remove,
4502 * Driver initialization routine.
4503 * Invoked once at EAL init time.
4504 * Register itself as the [Poll Mode] Driver of PCI devices.
4506 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
4507 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
4508 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
4509 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
4510 ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp>"
4511 ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
4512 ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
4514 RTE_INIT(ice_init_log)
4516 ice_logtype_init = rte_log_register("pmd.net.ice.init");
4517 if (ice_logtype_init >= 0)
4518 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
4519 ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
4520 if (ice_logtype_driver >= 0)
4521 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
4523 #ifdef RTE_LIBRTE_ICE_DEBUG_RX
4524 ice_logtype_rx = rte_log_register("pmd.net.ice.rx");
4525 if (ice_logtype_rx >= 0)
4526 rte_log_set_level(ice_logtype_rx, RTE_LOG_DEBUG);
4529 #ifdef RTE_LIBRTE_ICE_DEBUG_TX
4530 ice_logtype_tx = rte_log_register("pmd.net.ice.tx");
4531 if (ice_logtype_tx >= 0)
4532 rte_log_set_level(ice_logtype_tx, RTE_LOG_DEBUG);
4535 #ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE
4536 ice_logtype_tx_free = rte_log_register("pmd.net.ice.tx_free");
4537 if (ice_logtype_tx_free >= 0)
4538 rte_log_set_level(ice_logtype_tx_free, RTE_LOG_DEBUG);