1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
13 #include <rte_tailq.h>
15 #include "eal_firmware.h"
17 #include "base/ice_sched.h"
18 #include "base/ice_flow.h"
19 #include "base/ice_dcb.h"
20 #include "base/ice_common.h"
21 #include "base/ice_ptp_hw.h"
23 #include "rte_pmd_ice.h"
24 #include "ice_ethdev.h"
26 #include "ice_generic_flow.h"
29 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
30 #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support"
31 #define ICE_PROTO_XTR_ARG "proto_xtr"
32 #define ICE_HW_DEBUG_MASK_ARG "hw_debug_mask"
33 #define ICE_ONE_PPS_OUT_ARG "pps_out"
34 #define ICE_RX_LOW_LATENCY_ARG "rx_low_latency"
36 #define ICE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
38 uint64_t ice_timestamp_dynflag;
39 int ice_timestamp_dynfield_offset = -1;
41 static const char * const ice_valid_args[] = {
42 ICE_SAFE_MODE_SUPPORT_ARG,
43 ICE_PIPELINE_MODE_SUPPORT_ARG,
45 ICE_HW_DEBUG_MASK_ARG,
47 ICE_RX_LOW_LATENCY_ARG,
51 #define PPS_OUT_DELAY_NS 1
53 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
54 .name = "intel_pmd_dynfield_proto_xtr_metadata",
55 .size = sizeof(uint32_t),
56 .align = __alignof__(uint32_t),
60 struct proto_xtr_ol_flag {
61 const struct rte_mbuf_dynflag param;
66 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
68 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
70 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
71 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
73 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
74 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
76 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
77 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
78 [PROTO_XTR_IPV6_FLOW] = {
79 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
80 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
82 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
83 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
84 [PROTO_XTR_IP_OFFSET] = {
85 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
86 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
89 #define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package"
90 #define ICE_COMMS_PKG_NAME "ICE COMMS Package"
91 #define ICE_MAX_RES_DESC_NUM 1024
93 static int ice_dev_configure(struct rte_eth_dev *dev);
94 static int ice_dev_start(struct rte_eth_dev *dev);
95 static int ice_dev_stop(struct rte_eth_dev *dev);
96 static int ice_dev_close(struct rte_eth_dev *dev);
97 static int ice_dev_reset(struct rte_eth_dev *dev);
98 static int ice_dev_info_get(struct rte_eth_dev *dev,
99 struct rte_eth_dev_info *dev_info);
100 static int ice_link_update(struct rte_eth_dev *dev,
101 int wait_to_complete);
102 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
103 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
105 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
106 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
107 static int ice_rss_reta_update(struct rte_eth_dev *dev,
108 struct rte_eth_rss_reta_entry64 *reta_conf,
110 static int ice_rss_reta_query(struct rte_eth_dev *dev,
111 struct rte_eth_rss_reta_entry64 *reta_conf,
113 static int ice_rss_hash_update(struct rte_eth_dev *dev,
114 struct rte_eth_rss_conf *rss_conf);
115 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
116 struct rte_eth_rss_conf *rss_conf);
117 static int ice_promisc_enable(struct rte_eth_dev *dev);
118 static int ice_promisc_disable(struct rte_eth_dev *dev);
119 static int ice_allmulti_enable(struct rte_eth_dev *dev);
120 static int ice_allmulti_disable(struct rte_eth_dev *dev);
121 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
124 static int ice_macaddr_set(struct rte_eth_dev *dev,
125 struct rte_ether_addr *mac_addr);
126 static int ice_macaddr_add(struct rte_eth_dev *dev,
127 struct rte_ether_addr *mac_addr,
128 __rte_unused uint32_t index,
130 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
131 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
133 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
135 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
137 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
138 uint16_t pvid, int on);
139 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
140 static int ice_get_eeprom(struct rte_eth_dev *dev,
141 struct rte_dev_eeprom_info *eeprom);
142 static int ice_get_module_info(struct rte_eth_dev *dev,
143 struct rte_eth_dev_module_info *modinfo);
144 static int ice_get_module_eeprom(struct rte_eth_dev *dev,
145 struct rte_dev_eeprom_info *info);
146 static int ice_stats_get(struct rte_eth_dev *dev,
147 struct rte_eth_stats *stats);
148 static int ice_stats_reset(struct rte_eth_dev *dev);
149 static int ice_xstats_get(struct rte_eth_dev *dev,
150 struct rte_eth_xstat *xstats, unsigned int n);
151 static int ice_xstats_get_names(struct rte_eth_dev *dev,
152 struct rte_eth_xstat_name *xstats_names,
154 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
155 const struct rte_flow_ops **ops);
156 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
157 struct rte_eth_udp_tunnel *udp_tunnel);
158 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
159 struct rte_eth_udp_tunnel *udp_tunnel);
160 static int ice_timesync_enable(struct rte_eth_dev *dev);
161 static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
162 struct timespec *timestamp,
164 static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
165 struct timespec *timestamp);
166 static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
167 static int ice_timesync_read_time(struct rte_eth_dev *dev,
168 struct timespec *timestamp);
169 static int ice_timesync_write_time(struct rte_eth_dev *dev,
170 const struct timespec *timestamp);
171 static int ice_timesync_disable(struct rte_eth_dev *dev);
173 static const struct rte_pci_id pci_id_ice_map[] = {
174 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
175 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
176 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
177 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
178 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
179 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
180 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
181 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
182 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
183 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
184 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
185 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) },
186 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) },
187 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
188 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
189 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
190 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
191 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
192 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
193 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
194 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
195 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
196 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
197 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
198 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
199 { .vendor_id = 0, /* sentinel */ },
202 static const struct eth_dev_ops ice_eth_dev_ops = {
203 .dev_configure = ice_dev_configure,
204 .dev_start = ice_dev_start,
205 .dev_stop = ice_dev_stop,
206 .dev_close = ice_dev_close,
207 .dev_reset = ice_dev_reset,
208 .dev_set_link_up = ice_dev_set_link_up,
209 .dev_set_link_down = ice_dev_set_link_down,
210 .rx_queue_start = ice_rx_queue_start,
211 .rx_queue_stop = ice_rx_queue_stop,
212 .tx_queue_start = ice_tx_queue_start,
213 .tx_queue_stop = ice_tx_queue_stop,
214 .rx_queue_setup = ice_rx_queue_setup,
215 .rx_queue_release = ice_dev_rx_queue_release,
216 .tx_queue_setup = ice_tx_queue_setup,
217 .tx_queue_release = ice_dev_tx_queue_release,
218 .dev_infos_get = ice_dev_info_get,
219 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
220 .link_update = ice_link_update,
221 .mtu_set = ice_mtu_set,
222 .mac_addr_set = ice_macaddr_set,
223 .mac_addr_add = ice_macaddr_add,
224 .mac_addr_remove = ice_macaddr_remove,
225 .vlan_filter_set = ice_vlan_filter_set,
226 .vlan_offload_set = ice_vlan_offload_set,
227 .reta_update = ice_rss_reta_update,
228 .reta_query = ice_rss_reta_query,
229 .rss_hash_update = ice_rss_hash_update,
230 .rss_hash_conf_get = ice_rss_hash_conf_get,
231 .promiscuous_enable = ice_promisc_enable,
232 .promiscuous_disable = ice_promisc_disable,
233 .allmulticast_enable = ice_allmulti_enable,
234 .allmulticast_disable = ice_allmulti_disable,
235 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
236 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
237 .fw_version_get = ice_fw_version_get,
238 .vlan_pvid_set = ice_vlan_pvid_set,
239 .rxq_info_get = ice_rxq_info_get,
240 .txq_info_get = ice_txq_info_get,
241 .rx_burst_mode_get = ice_rx_burst_mode_get,
242 .tx_burst_mode_get = ice_tx_burst_mode_get,
243 .get_eeprom_length = ice_get_eeprom_length,
244 .get_eeprom = ice_get_eeprom,
245 .get_module_info = ice_get_module_info,
246 .get_module_eeprom = ice_get_module_eeprom,
247 .stats_get = ice_stats_get,
248 .stats_reset = ice_stats_reset,
249 .xstats_get = ice_xstats_get,
250 .xstats_get_names = ice_xstats_get_names,
251 .xstats_reset = ice_stats_reset,
252 .flow_ops_get = ice_dev_flow_ops_get,
253 .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add,
254 .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
255 .tx_done_cleanup = ice_tx_done_cleanup,
256 .get_monitor_addr = ice_get_monitor_addr,
257 .timesync_enable = ice_timesync_enable,
258 .timesync_read_rx_timestamp = ice_timesync_read_rx_timestamp,
259 .timesync_read_tx_timestamp = ice_timesync_read_tx_timestamp,
260 .timesync_adjust_time = ice_timesync_adjust_time,
261 .timesync_read_time = ice_timesync_read_time,
262 .timesync_write_time = ice_timesync_write_time,
263 .timesync_disable = ice_timesync_disable,
266 /* store statistics names and its offset in stats structure */
267 struct ice_xstats_name_off {
268 char name[RTE_ETH_XSTATS_NAME_SIZE];
272 static const struct ice_xstats_name_off ice_stats_strings[] = {
273 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
274 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
275 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
276 {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
277 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
278 rx_unknown_protocol)},
279 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
280 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
281 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
282 {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
285 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
286 sizeof(ice_stats_strings[0]))
288 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
289 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
290 tx_dropped_link_down)},
291 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
292 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
294 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
295 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
297 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
299 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
301 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
302 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
303 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
304 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
305 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
306 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
308 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
310 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
312 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
314 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
316 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
318 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
320 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
322 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
323 mac_short_pkt_dropped)},
324 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
326 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
327 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
328 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
330 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
332 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
334 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
336 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
338 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
342 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
343 sizeof(ice_hw_port_strings[0]))
346 ice_init_controlq_parameter(struct ice_hw *hw)
348 /* fields for adminq */
349 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
350 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
351 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
352 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
354 /* fields for mailboxq, DPDK used as PF host */
355 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
356 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
357 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
358 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
360 /* fields for sideband queue */
361 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
362 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
363 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
364 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
369 lookup_proto_xtr_type(const char *xtr_name)
373 enum proto_xtr_type type;
375 { "vlan", PROTO_XTR_VLAN },
376 { "ipv4", PROTO_XTR_IPV4 },
377 { "ipv6", PROTO_XTR_IPV6 },
378 { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
379 { "tcp", PROTO_XTR_TCP },
380 { "ip_offset", PROTO_XTR_IP_OFFSET },
384 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
385 if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
386 return xtr_type_map[i].type;
393 * Parse elem, the elem could be single number/range or '(' ')' group
394 * 1) A single number elem, it's just a simple digit. e.g. 9
395 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
396 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
397 * Within group elem, '-' used for a range separator;
398 * ',' used for a single number.
401 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
403 const char *str = input;
408 while (isblank(*str))
411 if (!isdigit(*str) && *str != '(')
414 /* process single number or single range of number */
417 idx = strtoul(str, &end, 10);
418 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
421 while (isblank(*end))
427 /* process single <number>-<number> */
430 while (isblank(*end))
436 idx = strtoul(end, &end, 10);
437 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
441 while (isblank(*end))
448 for (idx = RTE_MIN(min, max);
449 idx <= RTE_MAX(min, max); idx++)
450 devargs->proto_xtr[idx] = xtr_type;
455 /* process set within bracket */
457 while (isblank(*str))
462 min = ICE_MAX_QUEUE_NUM;
464 /* go ahead to the first digit */
465 while (isblank(*str))
470 /* get the digit value */
472 idx = strtoul(str, &end, 10);
473 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
476 /* go ahead to separator '-',',' and ')' */
477 while (isblank(*end))
480 if (min == ICE_MAX_QUEUE_NUM)
482 else /* avoid continuous '-' */
484 } else if (*end == ',' || *end == ')') {
486 if (min == ICE_MAX_QUEUE_NUM)
489 for (idx = RTE_MIN(min, max);
490 idx <= RTE_MAX(min, max); idx++)
491 devargs->proto_xtr[idx] = xtr_type;
493 min = ICE_MAX_QUEUE_NUM;
499 } while (*end != ')' && *end != '\0');
505 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
507 const char *queue_start;
512 while (isblank(*queues))
515 if (*queues != '[') {
516 xtr_type = lookup_proto_xtr_type(queues);
520 devargs->proto_xtr_dflt = xtr_type;
527 while (isblank(*queues))
532 queue_start = queues;
534 /* go across a complete bracket */
535 if (*queue_start == '(') {
536 queues += strcspn(queues, ")");
541 /* scan the separator ':' */
542 queues += strcspn(queues, ":");
543 if (*queues++ != ':')
545 while (isblank(*queues))
548 for (idx = 0; ; idx++) {
549 if (isblank(queues[idx]) ||
550 queues[idx] == ',' ||
551 queues[idx] == ']' ||
555 if (idx > sizeof(xtr_name) - 2)
558 xtr_name[idx] = queues[idx];
560 xtr_name[idx] = '\0';
561 xtr_type = lookup_proto_xtr_type(xtr_name);
567 while (isblank(*queues) || *queues == ',' || *queues == ']')
570 if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
572 } while (*queues != '\0');
578 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
581 struct ice_devargs *devargs = extra_args;
583 if (value == NULL || extra_args == NULL)
586 if (parse_queue_proto_xtr(value, devargs) < 0) {
588 "The protocol extraction parameter is wrong : '%s'",
597 ice_check_proto_xtr_support(struct ice_hw *hw)
599 #define FLX_REG(val, fld, idx) \
600 (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
601 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
608 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
610 ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
611 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
613 ICE_PROT_IPV4_OF_OR_S,
614 ICE_PROT_IPV4_OF_OR_S },
615 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
617 ICE_PROT_IPV6_OF_OR_S,
618 ICE_PROT_IPV6_OF_OR_S },
619 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
621 ICE_PROT_IPV6_OF_OR_S,
622 ICE_PROT_IPV6_OF_OR_S },
623 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
625 ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
626 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
628 ICE_PROT_IPV4_OF_OR_S,
629 ICE_PROT_IPV6_OF_OR_S },
633 for (i = 0; i < RTE_DIM(xtr_sets); i++) {
634 uint32_t rxdid = xtr_sets[i].rxdid;
637 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
638 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
640 if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
641 FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
642 ice_proto_xtr_hw_support[i] = true;
645 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
646 v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
648 if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
649 FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
650 ice_proto_xtr_hw_support[i] = true;
656 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
659 struct pool_entry *entry;
664 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
667 "Failed to allocate memory for resource pool");
671 /* queue heap initialize */
672 pool->num_free = num;
675 LIST_INIT(&pool->alloc_list);
676 LIST_INIT(&pool->free_list);
678 /* Initialize element */
682 LIST_INSERT_HEAD(&pool->free_list, entry, next);
687 ice_res_pool_alloc(struct ice_res_pool_info *pool,
690 struct pool_entry *entry, *valid_entry;
693 PMD_INIT_LOG(ERR, "Invalid parameter");
697 if (pool->num_free < num) {
698 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
699 num, pool->num_free);
704 /* Lookup in free list and find most fit one */
705 LIST_FOREACH(entry, &pool->free_list, next) {
706 if (entry->len >= num) {
708 if (entry->len == num) {
713 valid_entry->len > entry->len)
718 /* Not find one to satisfy the request, return */
720 PMD_INIT_LOG(ERR, "No valid entry found");
724 * The entry have equal queue number as requested,
725 * remove it from alloc_list.
727 if (valid_entry->len == num) {
728 LIST_REMOVE(valid_entry, next);
731 * The entry have more numbers than requested,
732 * create a new entry for alloc_list and minus its
733 * queue base and number in free_list.
735 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
738 "Failed to allocate memory for "
742 entry->base = valid_entry->base;
744 valid_entry->base += num;
745 valid_entry->len -= num;
749 /* Insert it into alloc list, not sorted */
750 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
752 pool->num_free -= valid_entry->len;
753 pool->num_alloc += valid_entry->len;
755 return valid_entry->base + pool->base;
759 ice_res_pool_destroy(struct ice_res_pool_info *pool)
761 struct pool_entry *entry, *next_entry;
766 for (entry = LIST_FIRST(&pool->alloc_list);
767 entry && (next_entry = LIST_NEXT(entry, next), 1);
768 entry = next_entry) {
769 LIST_REMOVE(entry, next);
773 for (entry = LIST_FIRST(&pool->free_list);
774 entry && (next_entry = LIST_NEXT(entry, next), 1);
775 entry = next_entry) {
776 LIST_REMOVE(entry, next);
783 LIST_INIT(&pool->alloc_list);
784 LIST_INIT(&pool->free_list);
788 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
790 /* Set VSI LUT selection */
791 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
792 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
793 /* Set Hash scheme */
794 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
795 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
797 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
800 static enum ice_status
801 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
802 struct ice_aqc_vsi_props *info,
803 uint8_t enabled_tcmap)
805 uint16_t bsf, qp_idx;
807 /* default tc 0 now. Multi-TC supporting need to be done later.
808 * Configure TC and queue mapping parameters, for enabled TC,
809 * allocate qpnum_per_tc queues to this traffic.
811 if (enabled_tcmap != 0x01) {
812 PMD_INIT_LOG(ERR, "only TC0 is supported");
816 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
817 bsf = rte_bsf32(vsi->nb_qps);
818 /* Adjust the queue number to actual queues that can be applied */
819 vsi->nb_qps = 0x1 << bsf;
822 /* Set tc and queue mapping with VSI */
823 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
824 ICE_AQ_VSI_TC_Q_OFFSET_S) |
825 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
827 /* Associate queue number with VSI */
828 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
829 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
830 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
831 info->valid_sections |=
832 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
833 /* Set the info.ingress_table and info.egress_table
834 * for UP translate table. Now just set it to 1:1 map by default
835 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
837 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
838 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
839 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
840 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
845 ice_init_mac_address(struct rte_eth_dev *dev)
847 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
849 if (!rte_is_unicast_ether_addr
850 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
851 PMD_INIT_LOG(ERR, "Invalid MAC address");
856 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
857 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
859 dev->data->mac_addrs =
860 rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0);
861 if (!dev->data->mac_addrs) {
863 "Failed to allocate memory to store mac address");
866 /* store it to dev data */
868 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
869 &dev->data->mac_addrs[0]);
873 /* Find out specific MAC filter */
874 static struct ice_mac_filter *
875 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
877 struct ice_mac_filter *f;
879 TAILQ_FOREACH(f, &vsi->mac_list, next) {
880 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
888 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
890 struct ice_fltr_list_entry *m_list_itr = NULL;
891 struct ice_mac_filter *f;
892 struct LIST_HEAD_TYPE list_head;
893 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
896 /* If it's added and configured, return */
897 f = ice_find_mac_filter(vsi, mac_addr);
899 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
903 INIT_LIST_HEAD(&list_head);
905 m_list_itr = (struct ice_fltr_list_entry *)
906 ice_malloc(hw, sizeof(*m_list_itr));
911 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
912 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
913 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
914 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
915 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
916 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
917 m_list_itr->fltr_info.vsi_handle = vsi->idx;
919 LIST_ADD(&m_list_itr->list_entry, &list_head);
922 ret = ice_add_mac(hw, &list_head);
923 if (ret != ICE_SUCCESS) {
924 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
928 /* Add the mac addr into mac list */
929 f = rte_zmalloc(NULL, sizeof(*f), 0);
931 PMD_DRV_LOG(ERR, "failed to allocate memory");
935 rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
936 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
942 rte_free(m_list_itr);
947 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
949 struct ice_fltr_list_entry *m_list_itr = NULL;
950 struct ice_mac_filter *f;
951 struct LIST_HEAD_TYPE list_head;
952 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
955 /* Can't find it, return an error */
956 f = ice_find_mac_filter(vsi, mac_addr);
960 INIT_LIST_HEAD(&list_head);
962 m_list_itr = (struct ice_fltr_list_entry *)
963 ice_malloc(hw, sizeof(*m_list_itr));
968 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
969 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
970 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
971 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
972 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
973 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
974 m_list_itr->fltr_info.vsi_handle = vsi->idx;
976 LIST_ADD(&m_list_itr->list_entry, &list_head);
978 /* remove the mac filter */
979 ret = ice_remove_mac(hw, &list_head);
980 if (ret != ICE_SUCCESS) {
981 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
986 /* Remove the mac addr from mac list */
987 TAILQ_REMOVE(&vsi->mac_list, f, next);
993 rte_free(m_list_itr);
997 /* Find out specific VLAN filter */
998 static struct ice_vlan_filter *
999 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1001 struct ice_vlan_filter *f;
1003 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
1004 if (vlan->tpid == f->vlan_info.vlan.tpid &&
1005 vlan->vid == f->vlan_info.vlan.vid)
1013 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1015 struct ice_fltr_list_entry *v_list_itr = NULL;
1016 struct ice_vlan_filter *f;
1017 struct LIST_HEAD_TYPE list_head;
1021 if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1024 hw = ICE_VSI_TO_HW(vsi);
1026 /* If it's added and configured, return. */
1027 f = ice_find_vlan_filter(vsi, vlan);
1029 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
1033 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
1036 INIT_LIST_HEAD(&list_head);
1038 v_list_itr = (struct ice_fltr_list_entry *)
1039 ice_malloc(hw, sizeof(*v_list_itr));
1044 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1045 v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1046 v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1047 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1048 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1049 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1050 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1051 v_list_itr->fltr_info.vsi_handle = vsi->idx;
1053 LIST_ADD(&v_list_itr->list_entry, &list_head);
1056 ret = ice_add_vlan(hw, &list_head);
1057 if (ret != ICE_SUCCESS) {
1058 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1063 /* Add vlan into vlan list */
1064 f = rte_zmalloc(NULL, sizeof(*f), 0);
1066 PMD_DRV_LOG(ERR, "failed to allocate memory");
1070 f->vlan_info.vlan.tpid = vlan->tpid;
1071 f->vlan_info.vlan.vid = vlan->vid;
1072 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1078 rte_free(v_list_itr);
1083 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1085 struct ice_fltr_list_entry *v_list_itr = NULL;
1086 struct ice_vlan_filter *f;
1087 struct LIST_HEAD_TYPE list_head;
1091 if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1094 hw = ICE_VSI_TO_HW(vsi);
1096 /* Can't find it, return an error */
1097 f = ice_find_vlan_filter(vsi, vlan);
1101 INIT_LIST_HEAD(&list_head);
1103 v_list_itr = (struct ice_fltr_list_entry *)
1104 ice_malloc(hw, sizeof(*v_list_itr));
1110 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1111 v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1112 v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1113 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1114 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1115 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1116 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1117 v_list_itr->fltr_info.vsi_handle = vsi->idx;
1119 LIST_ADD(&v_list_itr->list_entry, &list_head);
1121 /* remove the vlan filter */
1122 ret = ice_remove_vlan(hw, &list_head);
1123 if (ret != ICE_SUCCESS) {
1124 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1129 /* Remove the vlan id from vlan list */
1130 TAILQ_REMOVE(&vsi->vlan_list, f, next);
1136 rte_free(v_list_itr);
1141 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1143 struct ice_mac_filter *m_f;
1144 struct ice_vlan_filter *v_f;
1148 if (!vsi || !vsi->mac_num)
1151 RTE_TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) {
1152 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1153 if (ret != ICE_SUCCESS) {
1159 if (vsi->vlan_num == 0)
1162 RTE_TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) {
1163 ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan);
1164 if (ret != ICE_SUCCESS) {
1176 ice_pf_enable_irq0(struct ice_hw *hw)
1178 /* reset the registers */
1179 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1180 ICE_READ_REG(hw, PFINT_OICR);
1183 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1184 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1185 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1187 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1188 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1189 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1190 PFINT_OICR_CTL_ITR_INDX_M) |
1191 PFINT_OICR_CTL_CAUSE_ENA_M);
1193 ICE_WRITE_REG(hw, PFINT_FW_CTL,
1194 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1195 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1196 PFINT_FW_CTL_ITR_INDX_M) |
1197 PFINT_FW_CTL_CAUSE_ENA_M);
1199 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1202 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1203 GLINT_DYN_CTL_INTENA_M |
1204 GLINT_DYN_CTL_CLEARPBA_M |
1205 GLINT_DYN_CTL_ITR_INDX_M);
1212 ice_pf_disable_irq0(struct ice_hw *hw)
1214 /* Disable all interrupt types */
1215 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1221 ice_handle_aq_msg(struct rte_eth_dev *dev)
1223 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1224 struct ice_ctl_q_info *cq = &hw->adminq;
1225 struct ice_rq_event_info event;
1226 uint16_t pending, opcode;
1229 event.buf_len = ICE_AQ_MAX_BUF_LEN;
1230 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1231 if (!event.msg_buf) {
1232 PMD_DRV_LOG(ERR, "Failed to allocate mem");
1238 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1240 if (ret != ICE_SUCCESS) {
1242 "Failed to read msg from AdminQ, "
1244 hw->adminq.sq_last_status);
1247 opcode = rte_le_to_cpu_16(event.desc.opcode);
1250 case ice_aqc_opc_get_link_status:
1251 ret = ice_link_update(dev, 0);
1253 rte_eth_dev_callback_process
1254 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1257 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1262 rte_free(event.msg_buf);
1267 * Interrupt handler triggered by NIC for handling
1268 * specific interrupt.
1271 * Pointer to interrupt handle.
1273 * The address of parameter (struct rte_eth_dev *) registered before.
1279 ice_interrupt_handler(void *param)
1281 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1282 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1290 uint32_t int_fw_ctl;
1293 /* Disable interrupt */
1294 ice_pf_disable_irq0(hw);
1296 /* read out interrupt causes */
1297 oicr = ICE_READ_REG(hw, PFINT_OICR);
1299 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1302 /* No interrupt event indicated */
1303 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1304 PMD_DRV_LOG(INFO, "No interrupt event");
1309 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1310 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1311 ice_handle_aq_msg(dev);
1314 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1315 PMD_DRV_LOG(INFO, "OICR: link state change event");
1316 ret = ice_link_update(dev, 0);
1318 rte_eth_dev_callback_process
1319 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1323 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1324 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1325 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1326 if (reg & GL_MDET_TX_PQM_VALID_M) {
1327 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1328 GL_MDET_TX_PQM_PF_NUM_S;
1329 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1330 GL_MDET_TX_PQM_MAL_TYPE_S;
1331 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1332 GL_MDET_TX_PQM_QNUM_S;
1334 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1335 "%d by PQM on TX queue %d PF# %d",
1336 event, queue, pf_num);
1339 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1340 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1341 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1342 GL_MDET_TX_TCLAN_PF_NUM_S;
1343 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1344 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1345 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1346 GL_MDET_TX_TCLAN_QNUM_S;
1348 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1349 "%d by TCLAN on TX queue %d PF# %d",
1350 event, queue, pf_num);
1354 /* Enable interrupt */
1355 ice_pf_enable_irq0(hw);
1356 rte_intr_ack(dev->intr_handle);
1360 ice_init_proto_xtr(struct rte_eth_dev *dev)
1362 struct ice_adapter *ad =
1363 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1364 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1365 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1366 const struct proto_xtr_ol_flag *ol_flag;
1367 bool proto_xtr_enable = false;
1371 pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1372 if (unlikely(pf->proto_xtr == NULL)) {
1373 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1377 for (i = 0; i < pf->lan_nb_qps; i++) {
1378 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1379 ad->devargs.proto_xtr[i] :
1380 ad->devargs.proto_xtr_dflt;
1382 if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1383 uint8_t type = pf->proto_xtr[i];
1385 ice_proto_xtr_ol_flag_params[type].required = true;
1386 proto_xtr_enable = true;
1390 if (likely(!proto_xtr_enable))
1393 ice_check_proto_xtr_support(hw);
1395 offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1396 if (unlikely(offset == -1)) {
1398 "Protocol extraction metadata is disabled in mbuf with error %d",
1404 "Protocol extraction metadata offset in mbuf is : %d",
1406 rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1408 for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1409 ol_flag = &ice_proto_xtr_ol_flag_params[i];
1411 if (!ol_flag->required)
1414 if (!ice_proto_xtr_hw_support[i]) {
1416 "Protocol extraction type %u is not supported in hardware",
1418 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1422 offset = rte_mbuf_dynflag_register(&ol_flag->param);
1423 if (unlikely(offset == -1)) {
1425 "Protocol extraction offload '%s' failed to register with error %d",
1426 ol_flag->param.name, -rte_errno);
1428 rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1433 "Protocol extraction offload '%s' offset in mbuf is : %d",
1434 ol_flag->param.name, offset);
1435 *ol_flag->ol_flag = 1ULL << offset;
1439 /* Initialize SW parameters of PF */
1441 ice_pf_sw_init(struct rte_eth_dev *dev)
1443 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1444 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1447 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1448 hw->func_caps.common_cap.num_rxq);
1450 pf->lan_nb_qps = pf->lan_nb_qp_max;
1452 ice_init_proto_xtr(dev);
1454 if (hw->func_caps.fd_fltr_guar > 0 ||
1455 hw->func_caps.fd_fltr_best_effort > 0) {
1456 pf->flags |= ICE_FLAG_FDIR;
1457 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1458 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1460 pf->fdir_nb_qps = 0;
1462 pf->fdir_qp_offset = 0;
1468 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1470 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1471 struct ice_vsi *vsi = NULL;
1472 struct ice_vsi_ctx vsi_ctx;
1474 struct rte_ether_addr broadcast = {
1475 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1476 struct rte_ether_addr mac_addr;
1477 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1478 uint8_t tc_bitmap = 0x1;
1481 /* hw->num_lports = 1 in NIC mode */
1482 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1486 vsi->idx = pf->next_vsi_idx;
1489 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1490 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1491 vsi->vlan_anti_spoof_on = 0;
1492 vsi->vlan_filter_on = 1;
1493 TAILQ_INIT(&vsi->mac_list);
1494 TAILQ_INIT(&vsi->vlan_list);
1496 /* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
1497 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1498 RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
1499 hw->func_caps.common_cap.rss_table_size;
1500 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1502 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1505 vsi->nb_qps = pf->lan_nb_qps;
1506 vsi->base_queue = 1;
1507 ice_vsi_config_default_rss(&vsi_ctx.info);
1508 vsi_ctx.alloc_from_pool = true;
1509 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1510 /* switch_id is queried by get_switch_config aq, which is done
1513 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1514 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1515 /* Allow all untagged or tagged packets */
1516 vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
1517 vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
1518 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1519 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1520 if (ice_is_dvm_ena(hw)) {
1521 vsi_ctx.info.outer_vlan_flags =
1522 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
1523 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
1524 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
1525 vsi_ctx.info.outer_vlan_flags |=
1526 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
1527 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
1528 ICE_AQ_VSI_OUTER_TAG_TYPE_M;
1532 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1533 ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1534 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1535 cfg = ICE_AQ_VSI_FD_ENABLE;
1536 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1537 vsi_ctx.info.max_fd_fltr_dedicated =
1538 rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1539 vsi_ctx.info.max_fd_fltr_shared =
1540 rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1542 /* Enable VLAN/UP trip */
1543 ret = ice_vsi_config_tc_queue_mapping(vsi,
1548 "tc queue mapping with vsi failed, "
1556 vsi->nb_qps = pf->fdir_nb_qps;
1557 vsi->base_queue = ICE_FDIR_QUEUE_ID;
1558 vsi_ctx.alloc_from_pool = true;
1559 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1561 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1562 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1563 cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1564 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1565 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1566 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1567 ret = ice_vsi_config_tc_queue_mapping(vsi,
1572 "tc queue mapping with vsi failed, "
1579 /* for other types of VSI */
1580 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1584 /* VF has MSIX interrupt in VF range, don't allocate here */
1585 if (type == ICE_VSI_PF) {
1586 ret = ice_res_pool_alloc(&pf->msix_pool,
1587 RTE_MIN(vsi->nb_qps,
1588 RTE_MAX_RXTX_INTR_VEC_ID));
1590 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1593 vsi->msix_intr = ret;
1594 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1595 } else if (type == ICE_VSI_CTRL) {
1596 ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1598 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1601 vsi->msix_intr = ret;
1607 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1608 if (ret != ICE_SUCCESS) {
1609 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1612 /* store vsi information is SW structure */
1613 vsi->vsi_id = vsi_ctx.vsi_num;
1614 vsi->info = vsi_ctx.info;
1615 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1616 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1618 if (type == ICE_VSI_PF) {
1619 /* MAC configuration */
1620 rte_ether_addr_copy((struct rte_ether_addr *)
1621 hw->port_info->mac.perm_addr,
1624 rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1625 ret = ice_add_mac_filter(vsi, &mac_addr);
1626 if (ret != ICE_SUCCESS)
1627 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1629 rte_ether_addr_copy(&broadcast, &mac_addr);
1630 ret = ice_add_mac_filter(vsi, &mac_addr);
1631 if (ret != ICE_SUCCESS)
1632 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1635 /* At the beginning, only TC0. */
1636 /* What we need here is the maximum number of the TX queues.
1637 * Currently vsi->nb_qps means it.
1638 * Correct it if any change.
1640 max_txqs[0] = vsi->nb_qps;
1641 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1642 tc_bitmap, max_txqs);
1643 if (ret != ICE_SUCCESS)
1644 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1654 ice_send_driver_ver(struct ice_hw *hw)
1656 struct ice_driver_ver dv;
1658 /* we don't have driver version use 0 for dummy */
1662 dv.subbuild_ver = 0;
1663 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1665 return ice_aq_send_driver_ver(hw, &dv, NULL);
1669 ice_pf_setup(struct ice_pf *pf)
1671 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1672 struct ice_vsi *vsi;
1675 /* Clear all stats counters */
1676 pf->offset_loaded = false;
1677 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1678 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1679 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1680 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1682 /* force guaranteed filter pool for PF */
1683 ice_alloc_fd_guar_item(hw, &unused,
1684 hw->func_caps.fd_fltr_guar);
1685 /* force shared filter pool for PF */
1686 ice_alloc_fd_shrd_item(hw, &unused,
1687 hw->func_caps.fd_fltr_best_effort);
1689 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1691 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1700 static enum ice_pkg_type
1701 ice_load_pkg_type(struct ice_hw *hw)
1703 enum ice_pkg_type package_type;
1705 /* store the activated package type (OS default or Comms) */
1706 if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1708 package_type = ICE_PKG_TYPE_OS_DEFAULT;
1709 else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1711 package_type = ICE_PKG_TYPE_COMMS;
1713 package_type = ICE_PKG_TYPE_UNKNOWN;
1715 PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)",
1716 hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1717 hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1718 hw->active_pkg_name,
1719 ice_is_dvm_ena(hw) ? "double" : "single");
1721 return package_type;
1724 int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn)
1726 struct ice_hw *hw = &adapter->hw;
1727 char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1728 char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1736 memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1737 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1738 "ice-%016" PRIx64 ".pkg", dsn);
1739 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1740 ICE_MAX_PKG_FILENAME_SIZE);
1741 strcat(pkg_file, opt_ddp_filename);
1742 if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1745 strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1746 ICE_MAX_PKG_FILENAME_SIZE);
1747 strcat(pkg_file, opt_ddp_filename);
1748 if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1752 strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1753 if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1756 strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1757 if (rte_firmware_read(pkg_file, &buf, &bufsz) < 0) {
1758 PMD_INIT_LOG(ERR, "failed to search file path\n");
1763 PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_file);
1765 err = ice_copy_and_init_pkg(hw, buf, bufsz);
1767 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1771 /* store the loaded pkg type info */
1772 adapter->active_pkg_type = ice_load_pkg_type(hw);
1780 ice_base_queue_get(struct ice_pf *pf)
1783 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1785 reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1786 if (reg & PFLAN_RX_QALLOC_VALID_M) {
1787 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1789 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1795 parse_bool(const char *key, const char *value, void *args)
1797 int *i = (int *)args;
1801 num = strtoul(value, &end, 10);
1803 if (num != 0 && num != 1) {
1804 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1805 "value must be 0 or 1",
1815 parse_u64(const char *key, const char *value, void *args)
1817 u64 *num = (u64 *)args;
1821 tmp = strtoull(value, NULL, 16);
1823 PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u64",
1834 lookup_pps_type(const char *pps_name)
1839 } pps_type_map[] = {
1845 for (i = 0; i < RTE_DIM(pps_type_map); i++) {
1846 if (strcmp(pps_name, pps_type_map[i].name) == 0)
1847 return pps_type_map[i].type;
1854 parse_pin_set(const char *input, int pps_type, struct ice_devargs *devargs)
1856 const char *str = input;
1860 while (isblank(*str))
1866 if (pps_type == PPS_PIN) {
1867 idx = strtoul(str, &end, 10);
1868 if (end == NULL || idx >= ICE_MAX_PIN_NUM)
1870 while (isblank(*end))
1875 devargs->pin_idx = idx;
1876 devargs->pps_out_ena = 1;
1885 parse_pps_out_parameter(const char *pins, struct ice_devargs *devargs)
1887 const char *pin_start;
1892 while (isblank(*pins))
1896 while (isblank(*pins))
1901 for (idx = 0; ; idx++) {
1902 if (isblank(pins[idx]) ||
1907 pps_name[idx] = pins[idx];
1909 pps_name[idx] = '\0';
1910 pps_type = lookup_pps_type(pps_name);
1916 pins += strcspn(pins, ":");
1919 while (isblank(*pins))
1924 while (isblank(*pins))
1927 if (parse_pin_set(pin_start, pps_type, devargs) < 0)
1934 handle_pps_out_arg(__rte_unused const char *key, const char *value,
1937 struct ice_devargs *devargs = extra_args;
1939 if (value == NULL || extra_args == NULL)
1942 if (parse_pps_out_parameter(value, devargs) < 0) {
1944 "The GPIO pin parameter is wrong : '%s'",
1952 static int ice_parse_devargs(struct rte_eth_dev *dev)
1954 struct ice_adapter *ad =
1955 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1956 struct rte_devargs *devargs = dev->device->devargs;
1957 struct rte_kvargs *kvlist;
1960 if (devargs == NULL)
1963 kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1964 if (kvlist == NULL) {
1965 PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1969 ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1970 memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1971 sizeof(ad->devargs.proto_xtr));
1973 ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1974 &handle_proto_xtr_arg, &ad->devargs);
1978 ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1979 &parse_bool, &ad->devargs.safe_mode_support);
1983 ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1984 &parse_bool, &ad->devargs.pipe_mode_support);
1988 ret = rte_kvargs_process(kvlist, ICE_HW_DEBUG_MASK_ARG,
1989 &parse_u64, &ad->hw.debug_mask);
1993 ret = rte_kvargs_process(kvlist, ICE_ONE_PPS_OUT_ARG,
1994 &handle_pps_out_arg, &ad->devargs);
1998 ret = rte_kvargs_process(kvlist, ICE_RX_LOW_LATENCY_ARG,
1999 &parse_bool, &ad->devargs.rx_low_latency);
2002 rte_kvargs_free(kvlist);
2006 /* Forward LLDP packets to default VSI by set switch rules */
2008 ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on)
2010 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2011 struct ice_fltr_list_entry *s_list_itr = NULL;
2012 struct LIST_HEAD_TYPE list_head;
2015 INIT_LIST_HEAD(&list_head);
2017 s_list_itr = (struct ice_fltr_list_entry *)
2018 ice_malloc(hw, sizeof(*s_list_itr));
2021 s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2022 s_list_itr->fltr_info.vsi_handle = vsi->idx;
2023 s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
2024 RTE_ETHER_TYPE_LLDP;
2025 s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2026 s_list_itr->fltr_info.flag = ICE_FLTR_RX;
2027 s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
2028 LIST_ADD(&s_list_itr->list_entry, &list_head);
2030 ret = ice_add_eth_mac(hw, &list_head);
2032 ret = ice_remove_eth_mac(hw, &list_head);
2034 rte_free(s_list_itr);
2038 static enum ice_status
2039 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
2040 uint16_t num, uint16_t desc_id,
2041 uint16_t *prof_buf, uint16_t *num_prof)
2043 struct ice_aqc_res_elem *resp_buf;
2046 bool res_shared = 1;
2047 struct ice_aq_desc aq_desc;
2048 struct ice_sq_cd *cd = NULL;
2049 struct ice_aqc_get_allocd_res_desc *cmd =
2050 &aq_desc.params.get_res_desc;
2052 buf_len = sizeof(*resp_buf) * num;
2053 resp_buf = ice_malloc(hw, buf_len);
2057 ice_fill_dflt_direct_cmd_desc(&aq_desc,
2058 ice_aqc_opc_get_allocd_res_desc);
2060 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2061 ICE_AQC_RES_TYPE_M) | (res_shared ?
2062 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2063 cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
2065 ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
2067 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
2071 ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
2072 (*num_prof), ICE_NONDMA_TO_NONDMA);
2079 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
2083 uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
2084 uint16_t first_desc = 1;
2085 uint16_t num_prof = 0;
2087 ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
2088 first_desc, prof_buf, &num_prof);
2090 PMD_INIT_LOG(ERR, "Failed to get fxp resource");
2094 for (prof_id = 0; prof_id < num_prof; prof_id++) {
2095 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
2097 PMD_INIT_LOG(ERR, "Failed to free fxp resource");
2105 ice_reset_fxp_resource(struct ice_hw *hw)
2109 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
2111 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
2115 ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
2117 PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
2125 ice_rss_ctx_init(struct ice_pf *pf)
2127 memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
2131 ice_get_supported_rxdid(struct ice_hw *hw)
2133 uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
2137 supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
2139 for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2140 regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
2141 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2142 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2143 supported_rxdid |= BIT(i);
2145 return supported_rxdid;
2149 ice_dev_init(struct rte_eth_dev *dev)
2151 struct rte_pci_device *pci_dev;
2152 struct rte_intr_handle *intr_handle;
2153 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2154 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2155 struct ice_adapter *ad =
2156 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2157 struct ice_vsi *vsi;
2159 #ifndef RTE_EXEC_ENV_WINDOWS
2161 uint32_t dsn_low, dsn_high;
2166 dev->dev_ops = &ice_eth_dev_ops;
2167 dev->rx_queue_count = ice_rx_queue_count;
2168 dev->rx_descriptor_status = ice_rx_descriptor_status;
2169 dev->tx_descriptor_status = ice_tx_descriptor_status;
2170 dev->rx_pkt_burst = ice_recv_pkts;
2171 dev->tx_pkt_burst = ice_xmit_pkts;
2172 dev->tx_pkt_prepare = ice_prep_pkts;
2174 /* for secondary processes, we don't initialise any further as primary
2175 * has already done this work.
2177 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2178 ice_set_rx_function(dev);
2179 ice_set_tx_function(dev);
2183 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2185 ice_set_default_ptype_table(dev);
2186 pci_dev = RTE_DEV_TO_PCI(dev->device);
2187 intr_handle = pci_dev->intr_handle;
2189 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2190 pf->dev_data = dev->data;
2191 hw->back = pf->adapter;
2192 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2193 hw->vendor_id = pci_dev->id.vendor_id;
2194 hw->device_id = pci_dev->id.device_id;
2195 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2196 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2197 hw->bus.device = pci_dev->addr.devid;
2198 hw->bus.func = pci_dev->addr.function;
2200 ret = ice_parse_devargs(dev);
2202 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2206 ice_init_controlq_parameter(hw);
2208 ret = ice_init_hw(hw);
2210 PMD_INIT_LOG(ERR, "Failed to initialize HW");
2214 #ifndef RTE_EXEC_ENV_WINDOWS
2217 pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
2219 if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0 ||
2220 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) {
2221 PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
2224 dsn = (uint64_t)dsn_high << 32 | dsn_low;
2227 PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
2230 ret = ice_load_pkg(pf->adapter, use_dsn, dsn);
2232 ret = ice_init_hw_tbls(hw);
2234 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", ret);
2235 rte_free(hw->pkg_copy);
2240 if (ad->devargs.safe_mode_support == 0) {
2241 PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2242 "Use safe-mode-support=1 to enter Safe Mode");
2246 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2247 "Entering Safe Mode");
2248 ad->is_safe_mode = 1;
2252 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2253 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2254 hw->api_maj_ver, hw->api_min_ver);
2256 ice_pf_sw_init(dev);
2257 ret = ice_init_mac_address(dev);
2259 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2263 ret = ice_res_pool_init(&pf->msix_pool, 1,
2264 hw->func_caps.common_cap.num_msix_vectors - 1);
2266 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2267 goto err_msix_pool_init;
2270 ret = ice_pf_setup(pf);
2272 PMD_INIT_LOG(ERR, "Failed to setup PF");
2276 ret = ice_send_driver_ver(hw);
2278 PMD_INIT_LOG(ERR, "Failed to send driver version");
2284 ret = ice_aq_stop_lldp(hw, true, false, NULL);
2285 if (ret != ICE_SUCCESS)
2286 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2287 ret = ice_init_dcb(hw, true);
2288 if (ret != ICE_SUCCESS)
2289 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2290 /* Forward LLDP packets to default VSI */
2291 ret = ice_vsi_config_sw_lldp(vsi, true);
2292 if (ret != ICE_SUCCESS)
2293 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2294 /* register callback func to eal lib */
2295 rte_intr_callback_register(intr_handle,
2296 ice_interrupt_handler, dev);
2298 ice_pf_enable_irq0(hw);
2300 /* enable uio intr after callback register */
2301 rte_intr_enable(intr_handle);
2303 /* get base queue pairs index in the device */
2304 ice_base_queue_get(pf);
2306 /* Initialize RSS context for gtpu_eh */
2307 ice_rss_ctx_init(pf);
2309 if (!ad->is_safe_mode) {
2310 ret = ice_flow_init(ad);
2312 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2317 ret = ice_reset_fxp_resource(hw);
2319 PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2323 pf->supported_rxdid = ice_get_supported_rxdid(hw);
2328 ice_flow_uninit(ad);
2329 rte_intr_disable(intr_handle);
2330 ice_pf_disable_irq0(hw);
2331 rte_intr_callback_unregister(intr_handle,
2332 ice_interrupt_handler, dev);
2334 ice_res_pool_destroy(&pf->msix_pool);
2336 rte_free(dev->data->mac_addrs);
2337 dev->data->mac_addrs = NULL;
2339 rte_free(pf->proto_xtr);
2340 #ifndef RTE_EXEC_ENV_WINDOWS
2349 ice_release_vsi(struct ice_vsi *vsi)
2352 struct ice_vsi_ctx vsi_ctx;
2353 enum ice_status ret;
2359 hw = ICE_VSI_TO_HW(vsi);
2361 ice_remove_all_mac_vlan_filters(vsi);
2363 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2365 vsi_ctx.vsi_num = vsi->vsi_id;
2366 vsi_ctx.info = vsi->info;
2367 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2368 if (ret != ICE_SUCCESS) {
2369 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2373 rte_free(vsi->rss_lut);
2374 rte_free(vsi->rss_key);
2380 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2382 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
2383 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2384 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2385 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2386 uint16_t msix_intr, i;
2388 /* disable interrupt and also clear all the exist config */
2389 for (i = 0; i < vsi->nb_qps; i++) {
2390 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2391 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2395 if (rte_intr_allow_others(intr_handle))
2397 for (i = 0; i < vsi->nb_msix; i++) {
2398 msix_intr = vsi->msix_intr + i;
2399 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2400 GLINT_DYN_CTL_WB_ON_ITR_M);
2404 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2408 ice_dev_stop(struct rte_eth_dev *dev)
2410 struct rte_eth_dev_data *data = dev->data;
2411 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2412 struct ice_vsi *main_vsi = pf->main_vsi;
2413 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2414 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2417 /* avoid stopping again */
2418 if (pf->adapter_stopped)
2421 /* stop and clear all Rx queues */
2422 for (i = 0; i < data->nb_rx_queues; i++)
2423 ice_rx_queue_stop(dev, i);
2425 /* stop and clear all Tx queues */
2426 for (i = 0; i < data->nb_tx_queues; i++)
2427 ice_tx_queue_stop(dev, i);
2429 /* disable all queue interrupts */
2430 ice_vsi_disable_queues_intr(main_vsi);
2432 if (pf->init_link_up)
2433 ice_dev_set_link_up(dev);
2435 ice_dev_set_link_down(dev);
2437 /* Clean datapath event and queue/vec mapping */
2438 rte_intr_efd_disable(intr_handle);
2439 rte_intr_vec_list_free(intr_handle);
2441 pf->adapter_stopped = true;
2442 dev->data->dev_started = 0;
2448 ice_dev_close(struct rte_eth_dev *dev)
2450 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2451 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2452 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2453 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2454 struct ice_adapter *ad =
2455 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2458 uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned;
2459 uint32_t pin_idx = ad->devargs.pin_idx;
2461 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2464 /* Since stop will make link down, then the link event will be
2465 * triggered, disable the irq firstly to avoid the port_infoe etc
2466 * resources deallocation causing the interrupt service thread
2469 ice_pf_disable_irq0(hw);
2471 ret = ice_dev_stop(dev);
2473 if (!ad->is_safe_mode)
2474 ice_flow_uninit(ad);
2476 /* release all queue resource */
2477 ice_free_queues(dev);
2479 ice_res_pool_destroy(&pf->msix_pool);
2480 ice_release_vsi(pf->main_vsi);
2481 ice_sched_cleanup_all(hw);
2482 ice_free_hw_tbls(hw);
2483 rte_free(hw->port_info);
2484 hw->port_info = NULL;
2485 ice_shutdown_all_ctrlq(hw);
2486 rte_free(pf->proto_xtr);
2487 pf->proto_xtr = NULL;
2489 if (ad->devargs.pps_out_ena) {
2490 ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0);
2491 ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0);
2492 ICE_WRITE_REG(hw, GLTSYN_TGT_L(pin_idx, timer), 0);
2493 ICE_WRITE_REG(hw, GLTSYN_TGT_H(pin_idx, timer), 0);
2495 val = GLGEN_GPIO_CTL_PIN_DIR_M;
2496 ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(pin_idx), val);
2499 /* disable uio intr before callback unregister */
2500 rte_intr_disable(intr_handle);
2502 /* unregister callback func from eal lib */
2503 rte_intr_callback_unregister(intr_handle,
2504 ice_interrupt_handler, dev);
2510 ice_dev_uninit(struct rte_eth_dev *dev)
2518 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
2520 return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false;
2524 hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
2529 cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
2533 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2535 enum ice_status status = ICE_SUCCESS;
2536 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2537 struct ice_vsi *vsi = pf->main_vsi;
2539 if (!is_hash_cfg_valid(cfg))
2542 status = ice_rem_rss_cfg(hw, vsi->idx, cfg);
2543 if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2545 "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
2554 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2556 enum ice_status status = ICE_SUCCESS;
2557 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2558 struct ice_vsi *vsi = pf->main_vsi;
2560 if (!is_hash_cfg_valid(cfg))
2563 status = ice_add_rss_cfg(hw, vsi->idx, cfg);
2566 "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
2575 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2579 ret = ice_hash_moveout(pf, cfg);
2580 if (ret && (ret != -ENOENT))
2583 hash_cfg_reset(cfg);
2589 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2595 case ICE_HASH_GTPU_CTX_EH_IP:
2596 ret = ice_hash_remove(pf,
2597 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2598 if (ret && (ret != -ENOENT))
2601 ret = ice_hash_remove(pf,
2602 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2603 if (ret && (ret != -ENOENT))
2606 ret = ice_hash_remove(pf,
2607 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2608 if (ret && (ret != -ENOENT))
2611 ret = ice_hash_remove(pf,
2612 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2613 if (ret && (ret != -ENOENT))
2616 ret = ice_hash_remove(pf,
2617 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2618 if (ret && (ret != -ENOENT))
2621 ret = ice_hash_remove(pf,
2622 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2623 if (ret && (ret != -ENOENT))
2626 ret = ice_hash_remove(pf,
2627 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2628 if (ret && (ret != -ENOENT))
2631 ret = ice_hash_remove(pf,
2632 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2633 if (ret && (ret != -ENOENT))
2637 case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2638 ret = ice_hash_remove(pf,
2639 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2640 if (ret && (ret != -ENOENT))
2643 ret = ice_hash_remove(pf,
2644 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2645 if (ret && (ret != -ENOENT))
2648 ret = ice_hash_moveout(pf,
2649 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2650 if (ret && (ret != -ENOENT))
2653 ret = ice_hash_moveout(pf,
2654 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2655 if (ret && (ret != -ENOENT))
2658 ret = ice_hash_moveout(pf,
2659 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2660 if (ret && (ret != -ENOENT))
2663 ret = ice_hash_moveout(pf,
2664 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2665 if (ret && (ret != -ENOENT))
2669 case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2670 ret = ice_hash_remove(pf,
2671 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2672 if (ret && (ret != -ENOENT))
2675 ret = ice_hash_remove(pf,
2676 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2677 if (ret && (ret != -ENOENT))
2680 ret = ice_hash_moveout(pf,
2681 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2682 if (ret && (ret != -ENOENT))
2685 ret = ice_hash_moveout(pf,
2686 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2687 if (ret && (ret != -ENOENT))
2690 ret = ice_hash_moveout(pf,
2691 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2692 if (ret && (ret != -ENOENT))
2695 ret = ice_hash_moveout(pf,
2696 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2697 if (ret && (ret != -ENOENT))
2701 case ICE_HASH_GTPU_CTX_UP_IP:
2702 ret = ice_hash_remove(pf,
2703 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2704 if (ret && (ret != -ENOENT))
2707 ret = ice_hash_remove(pf,
2708 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2709 if (ret && (ret != -ENOENT))
2712 ret = ice_hash_moveout(pf,
2713 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2714 if (ret && (ret != -ENOENT))
2717 ret = ice_hash_moveout(pf,
2718 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2719 if (ret && (ret != -ENOENT))
2722 ret = ice_hash_moveout(pf,
2723 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2724 if (ret && (ret != -ENOENT))
2728 case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2729 case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2730 ret = ice_hash_moveout(pf,
2731 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2732 if (ret && (ret != -ENOENT))
2735 ret = ice_hash_moveout(pf,
2736 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2737 if (ret && (ret != -ENOENT))
2740 ret = ice_hash_moveout(pf,
2741 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2742 if (ret && (ret != -ENOENT))
2746 case ICE_HASH_GTPU_CTX_DW_IP:
2747 ret = ice_hash_remove(pf,
2748 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2749 if (ret && (ret != -ENOENT))
2752 ret = ice_hash_remove(pf,
2753 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2754 if (ret && (ret != -ENOENT))
2757 ret = ice_hash_moveout(pf,
2758 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2759 if (ret && (ret != -ENOENT))
2762 ret = ice_hash_moveout(pf,
2763 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2764 if (ret && (ret != -ENOENT))
2767 ret = ice_hash_moveout(pf,
2768 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2769 if (ret && (ret != -ENOENT))
2773 case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2774 case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2775 ret = ice_hash_moveout(pf,
2776 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2777 if (ret && (ret != -ENOENT))
2780 ret = ice_hash_moveout(pf,
2781 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2782 if (ret && (ret != -ENOENT))
2785 ret = ice_hash_moveout(pf,
2786 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2787 if (ret && (ret != -ENOENT))
2798 static u8 calc_gtpu_ctx_idx(uint32_t hdr)
2802 if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
2804 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
2806 else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
2809 return ICE_HASH_GTPU_CTX_MAX;
2812 if (hdr & ICE_FLOW_SEG_HDR_UDP)
2814 else if (hdr & ICE_FLOW_SEG_HDR_TCP)
2817 if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
2818 return eh_idx * 3 + ip_idx;
2820 return ICE_HASH_GTPU_CTX_MAX;
2824 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2826 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2828 if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2829 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
2831 else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2832 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
2839 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2840 u8 ctx_idx, struct ice_rss_hash_cfg *cfg)
2844 if (ctx_idx < ICE_HASH_GTPU_CTX_MAX)
2845 ctx->ctx[ctx_idx] = *cfg;
2848 case ICE_HASH_GTPU_CTX_EH_IP:
2850 case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2851 ret = ice_hash_moveback(pf,
2852 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2853 if (ret && (ret != -ENOENT))
2856 ret = ice_hash_moveback(pf,
2857 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2858 if (ret && (ret != -ENOENT))
2861 ret = ice_hash_moveback(pf,
2862 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2863 if (ret && (ret != -ENOENT))
2866 ret = ice_hash_moveback(pf,
2867 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2868 if (ret && (ret != -ENOENT))
2872 case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2873 ret = ice_hash_moveback(pf,
2874 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2875 if (ret && (ret != -ENOENT))
2878 ret = ice_hash_moveback(pf,
2879 &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2880 if (ret && (ret != -ENOENT))
2883 ret = ice_hash_moveback(pf,
2884 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2885 if (ret && (ret != -ENOENT))
2888 ret = ice_hash_moveback(pf,
2889 &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2890 if (ret && (ret != -ENOENT))
2894 case ICE_HASH_GTPU_CTX_UP_IP:
2895 case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2896 case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2897 case ICE_HASH_GTPU_CTX_DW_IP:
2898 case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2899 case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2900 ret = ice_hash_moveback(pf,
2901 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2902 if (ret && (ret != -ENOENT))
2905 ret = ice_hash_moveback(pf,
2906 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2907 if (ret && (ret != -ENOENT))
2910 ret = ice_hash_moveback(pf,
2911 &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2912 if (ret && (ret != -ENOENT))
2924 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2926 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs);
2928 if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
2929 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4,
2931 else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
2932 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6,
2939 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2941 u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2943 if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
2946 if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2947 hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
2948 else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2949 hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
2953 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2954 struct ice_rss_hash_cfg *cfg)
2956 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2959 ret = ice_rem_rss_cfg(hw, vsi_id, cfg);
2960 if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2961 PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2963 ice_rem_rss_cfg_post(pf, cfg->addl_hdrs);
2969 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2970 struct ice_rss_hash_cfg *cfg)
2972 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2975 ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs);
2977 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2979 ret = ice_add_rss_cfg(hw, vsi_id, cfg);
2981 PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2983 ret = ice_add_rss_cfg_post(pf, cfg);
2985 PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2991 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2993 struct ice_hw *hw = ICE_PF_TO_HW(pf);
2994 struct ice_vsi *vsi = pf->main_vsi;
2995 struct ice_rss_hash_cfg cfg;
2998 #define ICE_RSS_HF_ALL ( \
2999 RTE_ETH_RSS_IPV4 | \
3000 RTE_ETH_RSS_IPV6 | \
3001 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
3002 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
3003 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
3004 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
3005 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
3006 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
3008 ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
3010 PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d",
3014 cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3015 /* Configure RSS for IPv4 with src/dst addr as input set */
3016 if (rss_hf & RTE_ETH_RSS_IPV4) {
3017 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3018 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
3019 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3021 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
3025 /* Configure RSS for IPv6 with src/dst addr as input set */
3026 if (rss_hf & RTE_ETH_RSS_IPV6) {
3027 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3028 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
3029 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3031 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
3035 /* Configure RSS for udp4 with src/dst addr and port as input set */
3036 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
3037 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
3038 ICE_FLOW_SEG_HDR_IPV_OTHER;
3039 cfg.hash_flds = ICE_HASH_UDP_IPV4;
3040 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3042 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
3046 /* Configure RSS for udp6 with src/dst addr and port as input set */
3047 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
3048 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
3049 ICE_FLOW_SEG_HDR_IPV_OTHER;
3050 cfg.hash_flds = ICE_HASH_UDP_IPV6;
3051 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3053 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
3057 /* Configure RSS for tcp4 with src/dst addr and port as input set */
3058 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
3059 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
3060 ICE_FLOW_SEG_HDR_IPV_OTHER;
3061 cfg.hash_flds = ICE_HASH_TCP_IPV4;
3062 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3064 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
3068 /* Configure RSS for tcp6 with src/dst addr and port as input set */
3069 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
3070 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
3071 ICE_FLOW_SEG_HDR_IPV_OTHER;
3072 cfg.hash_flds = ICE_HASH_TCP_IPV6;
3073 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3075 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
3079 /* Configure RSS for sctp4 with src/dst addr and port as input set */
3080 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
3081 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
3082 ICE_FLOW_SEG_HDR_IPV_OTHER;
3083 cfg.hash_flds = ICE_HASH_SCTP_IPV4;
3084 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3086 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
3090 /* Configure RSS for sctp6 with src/dst addr and port as input set */
3091 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
3092 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
3093 ICE_FLOW_SEG_HDR_IPV_OTHER;
3094 cfg.hash_flds = ICE_HASH_SCTP_IPV6;
3095 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3097 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
3101 if (rss_hf & RTE_ETH_RSS_IPV4) {
3102 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
3103 ICE_FLOW_SEG_HDR_IPV_OTHER;
3104 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
3105 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3107 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
3111 if (rss_hf & RTE_ETH_RSS_IPV6) {
3112 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
3113 ICE_FLOW_SEG_HDR_IPV_OTHER;
3114 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
3115 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3117 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
3121 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
3122 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3123 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3124 cfg.hash_flds = ICE_HASH_UDP_IPV4;
3125 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3127 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
3131 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
3132 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3133 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3134 cfg.hash_flds = ICE_HASH_UDP_IPV6;
3135 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3137 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
3141 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
3142 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3143 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3144 cfg.hash_flds = ICE_HASH_TCP_IPV4;
3145 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3147 PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
3151 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
3152 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3153 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3154 cfg.hash_flds = ICE_HASH_TCP_IPV6;
3155 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3157 PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
3161 pf->rss_hf = rss_hf & ICE_RSS_HF_ALL;
3165 ice_get_default_rss_key(uint8_t *rss_key, uint32_t rss_key_size)
3167 static struct ice_aqc_get_set_rss_keys default_key;
3168 static bool default_key_done;
3169 uint8_t *key = (uint8_t *)&default_key;
3172 if (rss_key_size > sizeof(default_key)) {
3173 PMD_DRV_LOG(WARNING,
3174 "requested size %u is larger than default %zu, "
3175 "only %zu bytes are gotten for key\n",
3176 rss_key_size, sizeof(default_key),
3177 sizeof(default_key));
3180 if (!default_key_done) {
3181 /* Calculate the default hash key */
3182 for (i = 0; i < sizeof(default_key); i++)
3183 key[i] = (uint8_t)rte_rand();
3184 default_key_done = true;
3186 rte_memcpy(rss_key, key, RTE_MIN(rss_key_size, sizeof(default_key)));
3189 static int ice_init_rss(struct ice_pf *pf)
3191 struct ice_hw *hw = ICE_PF_TO_HW(pf);
3192 struct ice_vsi *vsi = pf->main_vsi;
3193 struct rte_eth_dev_data *dev_data = pf->dev_data;
3194 struct ice_aq_get_set_rss_lut_params lut_params;
3195 struct rte_eth_rss_conf *rss_conf;
3196 struct ice_aqc_get_set_rss_keys key;
3199 bool is_safe_mode = pf->adapter->is_safe_mode;
3202 rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
3203 nb_q = dev_data->nb_rx_queues;
3204 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3205 vsi->rss_lut_size = pf->hash_lut_size;
3208 PMD_DRV_LOG(WARNING,
3209 "RSS is not supported as rx queues number is zero\n");
3214 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3218 if (!vsi->rss_key) {
3219 vsi->rss_key = rte_zmalloc(NULL,
3220 vsi->rss_key_size, 0);
3221 if (vsi->rss_key == NULL) {
3222 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3226 if (!vsi->rss_lut) {
3227 vsi->rss_lut = rte_zmalloc(NULL,
3228 vsi->rss_lut_size, 0);
3229 if (vsi->rss_lut == NULL) {
3230 PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3231 rte_free(vsi->rss_key);
3232 vsi->rss_key = NULL;
3236 /* configure RSS key */
3237 if (!rss_conf->rss_key)
3238 ice_get_default_rss_key(vsi->rss_key, vsi->rss_key_size);
3240 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3241 RTE_MIN(rss_conf->rss_key_len,
3242 vsi->rss_key_size));
3244 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3245 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3249 /* init RSS LUT table */
3250 for (i = 0; i < vsi->rss_lut_size; i++)
3251 vsi->rss_lut[i] = i % nb_q;
3253 lut_params.vsi_handle = vsi->idx;
3254 lut_params.lut_size = vsi->rss_lut_size;
3255 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
3256 lut_params.lut = vsi->rss_lut;
3257 lut_params.global_lut_id = 0;
3258 ret = ice_aq_set_rss_lut(hw, &lut_params);
3262 /* Enable registers for symmetric_toeplitz function. */
3263 reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3264 reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3265 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3266 ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3268 /* RSS hash configuration */
3269 ice_rss_hash_set(pf, rss_conf->rss_hf);
3273 rte_free(vsi->rss_key);
3274 vsi->rss_key = NULL;
3275 rte_free(vsi->rss_lut);
3276 vsi->rss_lut = NULL;
3281 ice_dev_configure(struct rte_eth_dev *dev)
3283 struct ice_adapter *ad =
3284 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3285 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3288 /* Initialize to TRUE. If any of Rx queues doesn't meet the
3289 * bulk allocation or vector Rx preconditions we will reset it.
3291 ad->rx_bulk_alloc_allowed = true;
3292 ad->tx_simple_allowed = true;
3294 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
3295 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
3297 if (dev->data->nb_rx_queues) {
3298 ret = ice_init_rss(pf);
3300 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3309 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3310 int base_queue, int nb_queue)
3312 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3313 uint32_t val, val_tx;
3314 int rx_low_latency, i;
3316 rx_low_latency = vsi->adapter->devargs.rx_low_latency;
3317 for (i = 0; i < nb_queue; i++) {
3319 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3320 (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3321 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3322 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3324 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3325 base_queue + i, msix_vect);
3327 /* set ITR0 value */
3328 if (rx_low_latency) {
3330 * Empirical configuration for optimal real time
3331 * latency reduced interrupt throttling to 2us
3333 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x1);
3334 ICE_WRITE_REG(hw, QRX_ITR(base_queue + i),
3337 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
3338 ICE_WRITE_REG(hw, QRX_ITR(base_queue + i), 0);
3341 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3342 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3347 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3349 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3350 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3351 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3352 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3353 uint16_t msix_vect = vsi->msix_intr;
3354 uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
3355 rte_intr_nb_efd_get(intr_handle));
3356 uint16_t queue_idx = 0;
3360 /* clear Rx/Tx queue interrupt */
3361 for (i = 0; i < vsi->nb_used_qps; i++) {
3362 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3363 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3366 /* PF bind interrupt */
3367 if (rte_intr_dp_is_en(intr_handle)) {
3372 for (i = 0; i < vsi->nb_used_qps; i++) {
3374 if (!rte_intr_allow_others(intr_handle))
3375 msix_vect = ICE_MISC_VEC_ID;
3377 /* uio mapping all queue to one msix_vect */
3378 __vsi_queues_bind_intr(vsi, msix_vect,
3379 vsi->base_queue + i,
3380 vsi->nb_used_qps - i);
3382 for (; !!record && i < vsi->nb_used_qps; i++)
3383 rte_intr_vec_list_index_set(intr_handle,
3384 queue_idx + i, msix_vect);
3389 /* vfio 1:1 queue/msix_vect mapping */
3390 __vsi_queues_bind_intr(vsi, msix_vect,
3391 vsi->base_queue + i, 1);
3394 rte_intr_vec_list_index_set(intr_handle,
3404 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3406 struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3407 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3408 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3409 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3410 uint16_t msix_intr, i;
3412 if (rte_intr_allow_others(intr_handle))
3413 for (i = 0; i < vsi->nb_used_qps; i++) {
3414 msix_intr = vsi->msix_intr + i;
3415 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3416 GLINT_DYN_CTL_INTENA_M |
3417 GLINT_DYN_CTL_CLEARPBA_M |
3418 GLINT_DYN_CTL_ITR_INDX_M |
3419 GLINT_DYN_CTL_WB_ON_ITR_M);
3422 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3423 GLINT_DYN_CTL_INTENA_M |
3424 GLINT_DYN_CTL_CLEARPBA_M |
3425 GLINT_DYN_CTL_ITR_INDX_M |
3426 GLINT_DYN_CTL_WB_ON_ITR_M);
3430 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3432 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3433 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3434 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3435 struct ice_vsi *vsi = pf->main_vsi;
3436 uint32_t intr_vector = 0;
3438 rte_intr_disable(intr_handle);
3440 /* check and configure queue intr-vector mapping */
3441 if ((rte_intr_cap_multiple(intr_handle) ||
3442 !RTE_ETH_DEV_SRIOV(dev).active) &&
3443 dev->data->dev_conf.intr_conf.rxq != 0) {
3444 intr_vector = dev->data->nb_rx_queues;
3445 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3446 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3447 ICE_MAX_INTR_QUEUE_NUM);
3450 if (rte_intr_efd_enable(intr_handle, intr_vector))
3454 if (rte_intr_dp_is_en(intr_handle)) {
3455 if (rte_intr_vec_list_alloc(intr_handle, NULL,
3456 dev->data->nb_rx_queues)) {
3458 "Failed to allocate %d rx_queues intr_vec",
3459 dev->data->nb_rx_queues);
3464 /* Map queues with MSIX interrupt */
3465 vsi->nb_used_qps = dev->data->nb_rx_queues;
3466 ice_vsi_queues_bind_intr(vsi);
3468 /* Enable interrupts for all the queues */
3469 ice_vsi_enable_queues_intr(vsi);
3471 rte_intr_enable(intr_handle);
3477 ice_get_init_link_status(struct rte_eth_dev *dev)
3479 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3480 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3481 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3482 struct ice_link_status link_status;
3485 ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3486 &link_status, NULL);
3487 if (ret != ICE_SUCCESS) {
3488 PMD_DRV_LOG(ERR, "Failed to get link info");
3489 pf->init_link_up = false;
3493 if (link_status.link_info & ICE_AQ_LINK_UP)
3494 pf->init_link_up = true;
3498 ice_pps_out_cfg(struct ice_hw *hw, int idx, int timer)
3500 uint64_t current_time, start_time;
3501 uint32_t hi, lo, lo2, func, val;
3503 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3504 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer));
3505 lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3508 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3509 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer));
3512 current_time = ((uint64_t)hi << 32) | lo;
3514 start_time = (current_time + NSEC_PER_SEC) /
3515 NSEC_PER_SEC * NSEC_PER_SEC;
3516 start_time = start_time - PPS_OUT_DELAY_NS;
3518 func = 8 + idx + timer * 4;
3519 val = GLGEN_GPIO_CTL_PIN_DIR_M |
3520 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
3521 GLGEN_GPIO_CTL_PIN_FUNC_M);
3523 /* Write clkout with half of period value */
3524 ICE_WRITE_REG(hw, GLTSYN_CLKO(idx, timer), NSEC_PER_SEC / 2);
3526 /* Write TARGET time register */
3527 ICE_WRITE_REG(hw, GLTSYN_TGT_L(idx, timer), start_time & 0xffffffff);
3528 ICE_WRITE_REG(hw, GLTSYN_TGT_H(idx, timer), start_time >> 32);
3530 /* Write AUX_OUT register */
3531 ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(idx, timer),
3532 GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M);
3534 /* Write GPIO CTL register */
3535 ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(idx), val);
3541 ice_dev_start(struct rte_eth_dev *dev)
3543 struct rte_eth_dev_data *data = dev->data;
3544 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3545 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3546 struct ice_vsi *vsi = pf->main_vsi;
3547 struct ice_adapter *ad =
3548 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3549 uint16_t nb_rxq = 0;
3551 uint16_t max_frame_size;
3553 uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned;
3554 uint32_t pin_idx = ad->devargs.pin_idx;
3556 /* program Tx queues' context in hardware */
3557 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3558 ret = ice_tx_queue_start(dev, nb_txq);
3560 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3565 /* program Rx queues' context in hardware*/
3566 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3567 ret = ice_rx_queue_start(dev, nb_rxq);
3569 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3574 ice_set_rx_function(dev);
3575 ice_set_tx_function(dev);
3577 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
3578 RTE_ETH_VLAN_EXTEND_MASK;
3579 ret = ice_vlan_offload_set(dev, mask);
3581 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3585 /* enable Rx interrupt and mapping Rx queue to interrupt vector */
3586 if (ice_rxq_intr_setup(dev))
3589 /* Enable receiving broadcast packets and transmitting packets */
3590 ret = ice_set_vsi_promisc(hw, vsi->idx,
3591 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3592 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3594 if (ret != ICE_SUCCESS)
3595 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3597 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3598 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3599 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3600 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3601 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3602 ICE_AQ_LINK_EVENT_AN_COMPLETED |
3603 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3605 if (ret != ICE_SUCCESS)
3606 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3608 ice_get_init_link_status(dev);
3610 ice_dev_set_link_up(dev);
3612 /* Call get_link_info aq command to enable/disable LSE */
3613 ice_link_update(dev, 1);
3615 pf->adapter_stopped = false;
3617 /* Set the max frame size to default value*/
3618 max_frame_size = pf->dev_data->mtu ?
3619 pf->dev_data->mtu + ICE_ETH_OVERHEAD :
3622 /* Set the max frame size to HW*/
3623 ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3625 if (ad->devargs.pps_out_ena) {
3626 ret = ice_pps_out_cfg(hw, pin_idx, timer);
3628 PMD_DRV_LOG(ERR, "Fail to configure 1pps out");
3635 /* stop the started queues if failed to start all queues */
3637 for (i = 0; i < nb_rxq; i++)
3638 ice_rx_queue_stop(dev, i);
3640 for (i = 0; i < nb_txq; i++)
3641 ice_tx_queue_stop(dev, i);
3647 ice_dev_reset(struct rte_eth_dev *dev)
3651 if (dev->data->sriov.active)
3654 ret = ice_dev_uninit(dev);
3656 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3660 ret = ice_dev_init(dev);
3662 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3670 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3672 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3673 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3674 struct ice_vsi *vsi = pf->main_vsi;
3675 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3676 bool is_safe_mode = pf->adapter->is_safe_mode;
3680 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3681 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3682 dev_info->max_rx_queues = vsi->nb_qps;
3683 dev_info->max_tx_queues = vsi->nb_qps;
3684 dev_info->max_mac_addrs = vsi->max_macaddrs;
3685 dev_info->max_vfs = pci_dev->max_vfs;
3686 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3687 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3689 dev_info->rx_offload_capa =
3690 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
3691 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
3692 RTE_ETH_RX_OFFLOAD_SCATTER |
3693 RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3694 dev_info->tx_offload_capa =
3695 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
3696 RTE_ETH_TX_OFFLOAD_TCP_TSO |
3697 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
3698 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
3699 dev_info->flow_type_rss_offloads = 0;
3701 if (!is_safe_mode) {
3702 dev_info->rx_offload_capa |=
3703 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
3704 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
3705 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
3706 RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
3707 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3708 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
3709 RTE_ETH_RX_OFFLOAD_RSS_HASH |
3710 RTE_ETH_RX_OFFLOAD_TIMESTAMP;
3711 dev_info->tx_offload_capa |=
3712 RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
3713 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
3714 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
3715 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
3716 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
3717 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3718 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
3719 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3722 dev_info->rx_queue_offload_capa = 0;
3723 dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
3725 dev_info->reta_size = pf->hash_lut_size;
3726 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3728 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3730 .pthresh = ICE_DEFAULT_RX_PTHRESH,
3731 .hthresh = ICE_DEFAULT_RX_HTHRESH,
3732 .wthresh = ICE_DEFAULT_RX_WTHRESH,
3734 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3739 dev_info->default_txconf = (struct rte_eth_txconf) {
3741 .pthresh = ICE_DEFAULT_TX_PTHRESH,
3742 .hthresh = ICE_DEFAULT_TX_HTHRESH,
3743 .wthresh = ICE_DEFAULT_TX_WTHRESH,
3745 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3746 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3750 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3751 .nb_max = ICE_MAX_RING_DESC,
3752 .nb_min = ICE_MIN_RING_DESC,
3753 .nb_align = ICE_ALIGN_RING_DESC,
3756 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3757 .nb_max = ICE_MAX_RING_DESC,
3758 .nb_min = ICE_MIN_RING_DESC,
3759 .nb_align = ICE_ALIGN_RING_DESC,
3762 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
3763 RTE_ETH_LINK_SPEED_100M |
3764 RTE_ETH_LINK_SPEED_1G |
3765 RTE_ETH_LINK_SPEED_2_5G |
3766 RTE_ETH_LINK_SPEED_5G |
3767 RTE_ETH_LINK_SPEED_10G |
3768 RTE_ETH_LINK_SPEED_20G |
3769 RTE_ETH_LINK_SPEED_25G;
3771 phy_type_low = hw->port_info->phy.phy_type_low;
3772 phy_type_high = hw->port_info->phy.phy_type_high;
3774 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3775 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
3777 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3778 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3779 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
3781 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3782 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3784 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3785 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3786 dev_info->default_rxportconf.nb_queues = 1;
3787 dev_info->default_txportconf.nb_queues = 1;
3788 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3789 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3795 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3796 struct rte_eth_link *link)
3798 struct rte_eth_link *dst = link;
3799 struct rte_eth_link *src = &dev->data->dev_link;
3801 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3802 *(uint64_t *)src) == 0)
3809 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3810 struct rte_eth_link *link)
3812 struct rte_eth_link *dst = &dev->data->dev_link;
3813 struct rte_eth_link *src = link;
3815 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3816 *(uint64_t *)src) == 0)
3823 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3825 #define CHECK_INTERVAL 100 /* 100ms */
3826 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
3827 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3828 struct ice_link_status link_status;
3829 struct rte_eth_link link, old;
3831 unsigned int rep_cnt = MAX_REPEAT_TIME;
3832 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3834 memset(&link, 0, sizeof(link));
3835 memset(&old, 0, sizeof(old));
3836 memset(&link_status, 0, sizeof(link_status));
3837 ice_atomic_read_link_status(dev, &old);
3840 /* Get link status information from hardware */
3841 status = ice_aq_get_link_info(hw->port_info, enable_lse,
3842 &link_status, NULL);
3843 if (status != ICE_SUCCESS) {
3844 link.link_speed = RTE_ETH_SPEED_NUM_100M;
3845 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3846 PMD_DRV_LOG(ERR, "Failed to get link info");
3850 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3851 if (!wait_to_complete || link.link_status)
3854 rte_delay_ms(CHECK_INTERVAL);
3855 } while (--rep_cnt);
3857 if (!link.link_status)
3860 /* Full-duplex operation at all supported speeds */
3861 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3863 /* Parse the link status */
3864 switch (link_status.link_speed) {
3865 case ICE_AQ_LINK_SPEED_10MB:
3866 link.link_speed = RTE_ETH_SPEED_NUM_10M;
3868 case ICE_AQ_LINK_SPEED_100MB:
3869 link.link_speed = RTE_ETH_SPEED_NUM_100M;
3871 case ICE_AQ_LINK_SPEED_1000MB:
3872 link.link_speed = RTE_ETH_SPEED_NUM_1G;
3874 case ICE_AQ_LINK_SPEED_2500MB:
3875 link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
3877 case ICE_AQ_LINK_SPEED_5GB:
3878 link.link_speed = RTE_ETH_SPEED_NUM_5G;
3880 case ICE_AQ_LINK_SPEED_10GB:
3881 link.link_speed = RTE_ETH_SPEED_NUM_10G;
3883 case ICE_AQ_LINK_SPEED_20GB:
3884 link.link_speed = RTE_ETH_SPEED_NUM_20G;
3886 case ICE_AQ_LINK_SPEED_25GB:
3887 link.link_speed = RTE_ETH_SPEED_NUM_25G;
3889 case ICE_AQ_LINK_SPEED_40GB:
3890 link.link_speed = RTE_ETH_SPEED_NUM_40G;
3892 case ICE_AQ_LINK_SPEED_50GB:
3893 link.link_speed = RTE_ETH_SPEED_NUM_50G;
3895 case ICE_AQ_LINK_SPEED_100GB:
3896 link.link_speed = RTE_ETH_SPEED_NUM_100G;
3898 case ICE_AQ_LINK_SPEED_UNKNOWN:
3899 PMD_DRV_LOG(ERR, "Unknown link speed");
3900 link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
3903 PMD_DRV_LOG(ERR, "None link speed");
3904 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
3908 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3909 RTE_ETH_LINK_SPEED_FIXED);
3912 ice_atomic_write_link_status(dev, &link);
3913 if (link.link_status == old.link_status)
3919 /* Force the physical link state by getting the current PHY capabilities from
3920 * hardware and setting the PHY config based on the determined capabilities. If
3921 * link changes, link event will be triggered because both the Enable Automatic
3922 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3924 static enum ice_status
3925 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3927 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3928 struct ice_aqc_get_phy_caps_data *pcaps;
3929 struct ice_port_info *pi;
3930 enum ice_status status;
3932 if (!hw || !hw->port_info)
3933 return ICE_ERR_PARAM;
3937 pcaps = (struct ice_aqc_get_phy_caps_data *)
3938 ice_malloc(hw, sizeof(*pcaps));
3940 return ICE_ERR_NO_MEMORY;
3942 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3947 /* No change in link */
3948 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3949 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3952 cfg.phy_type_low = pcaps->phy_type_low;
3953 cfg.phy_type_high = pcaps->phy_type_high;
3954 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3955 cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3956 cfg.eee_cap = pcaps->eee_cap;
3957 cfg.eeer_value = pcaps->eeer_value;
3958 cfg.link_fec_opt = pcaps->link_fec_options;
3960 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3962 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3964 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3967 ice_free(hw, pcaps);
3972 ice_dev_set_link_up(struct rte_eth_dev *dev)
3974 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3976 return ice_force_phys_link_state(hw, true);
3980 ice_dev_set_link_down(struct rte_eth_dev *dev)
3982 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3984 return ice_force_phys_link_state(hw, false);
3988 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
3990 /* mtu setting is forbidden if port is start */
3991 if (dev->data->dev_started != 0) {
3993 "port %d must be stopped before configuration",
3994 dev->data->port_id);
4001 static int ice_macaddr_set(struct rte_eth_dev *dev,
4002 struct rte_ether_addr *mac_addr)
4004 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4005 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4006 struct ice_vsi *vsi = pf->main_vsi;
4007 struct ice_mac_filter *f;
4011 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
4012 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
4016 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4017 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
4022 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
4026 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
4027 if (ret != ICE_SUCCESS) {
4028 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
4031 ret = ice_add_mac_filter(vsi, mac_addr);
4032 if (ret != ICE_SUCCESS) {
4033 PMD_DRV_LOG(ERR, "Failed to add mac filter");
4036 rte_ether_addr_copy(mac_addr, &pf->dev_addr);
4038 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
4039 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
4040 if (ret != ICE_SUCCESS)
4041 PMD_DRV_LOG(ERR, "Failed to set manage mac");
4046 /* Add a MAC address, and update filters */
4048 ice_macaddr_add(struct rte_eth_dev *dev,
4049 struct rte_ether_addr *mac_addr,
4050 __rte_unused uint32_t index,
4051 __rte_unused uint32_t pool)
4053 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4054 struct ice_vsi *vsi = pf->main_vsi;
4057 ret = ice_add_mac_filter(vsi, mac_addr);
4058 if (ret != ICE_SUCCESS) {
4059 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
4066 /* Remove a MAC address, and update filters */
4068 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4070 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4071 struct ice_vsi *vsi = pf->main_vsi;
4072 struct rte_eth_dev_data *data = dev->data;
4073 struct rte_ether_addr *macaddr;
4076 macaddr = &data->mac_addrs[index];
4077 ret = ice_remove_mac_filter(vsi, macaddr);
4079 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
4085 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
4087 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4088 struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id);
4089 struct ice_vsi *vsi = pf->main_vsi;
4092 PMD_INIT_FUNC_TRACE();
4095 * Vlan 0 is the generic filter for untagged packets
4096 * and can't be removed or added by user.
4102 ret = ice_add_vlan_filter(vsi, &vlan);
4104 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
4108 ret = ice_remove_vlan_filter(vsi, &vlan);
4110 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
4118 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are
4119 * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8)
4120 * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
4121 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
4123 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
4124 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
4125 * traffic in SVM, since the VLAN TPID isn't part of filtering.
4127 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
4128 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
4129 * part of filtering.
4132 ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
4134 struct ice_vlan vlan;
4137 vlan = ICE_VLAN(0, 0);
4138 err = ice_add_vlan_filter(vsi, &vlan);
4140 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0");
4144 /* in SVM both VLAN 0 filters are identical */
4145 if (!ice_is_dvm_ena(&vsi->adapter->hw))
4148 vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4149 err = ice_add_vlan_filter(vsi, &vlan);
4151 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode");
4159 * Delete the VLAN 0 filters in the same manner that they were added in
4160 * ice_vsi_add_vlan_zero.
4163 ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
4165 struct ice_vlan vlan;
4168 vlan = ICE_VLAN(0, 0);
4169 err = ice_remove_vlan_filter(vsi, &vlan);
4171 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0");
4175 /* in SVM both VLAN 0 filters are identical */
4176 if (!ice_is_dvm_ena(&vsi->adapter->hw))
4179 vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4180 err = ice_remove_vlan_filter(vsi, &vlan);
4182 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode");
4189 /* Configure vlan filter on or off */
4191 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
4193 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4194 struct ice_vsi_ctx ctxt;
4198 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4201 vsi->info.sw_flags2 |= sw_flags2;
4203 vsi->info.sw_flags2 &= ~sw_flags2;
4205 vsi->info.sw_id = hw->port_info->sw_id;
4206 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4207 ctxt.info.valid_sections =
4208 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4209 ICE_AQ_VSI_PROP_SECURITY_VALID);
4210 ctxt.vsi_num = vsi->vsi_id;
4212 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4214 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
4215 on ? "enable" : "disable");
4218 vsi->info.valid_sections |=
4219 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4220 ICE_AQ_VSI_PROP_SECURITY_VALID);
4223 /* consist with other drivers, allow untagged packet when vlan filter on */
4225 ret = ice_vsi_add_vlan_zero(vsi);
4227 ret = ice_vsi_del_vlan_zero(vsi);
4232 /* Manage VLAN stripping for the VSI for Rx */
4234 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4236 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4237 struct ice_vsi_ctx ctxt;
4238 enum ice_status status;
4241 /* do not allow modifying VLAN stripping when a port VLAN is configured
4244 if (vsi->info.port_based_inner_vlan)
4247 memset(&ctxt, 0, sizeof(ctxt));
4250 /* Strip VLAN tag from Rx packet and put it in the desc */
4251 ctxt.info.inner_vlan_flags =
4252 ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
4254 /* Disable stripping. Leave tag in packet */
4255 ctxt.info.inner_vlan_flags =
4256 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4258 /* Allow all packets untagged/tagged */
4259 ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
4261 ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4263 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4265 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping",
4266 ena ? "enable" : "disable");
4269 vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags;
4276 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi)
4278 return ice_vsi_manage_vlan_stripping(vsi, true);
4282 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
4284 return ice_vsi_manage_vlan_stripping(vsi, false);
4287 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi)
4289 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4290 struct ice_vsi_ctx ctxt;
4291 enum ice_status status;
4294 /* do not allow modifying VLAN stripping when a port VLAN is configured
4297 if (vsi->info.port_based_outer_vlan)
4300 memset(&ctxt, 0, sizeof(ctxt));
4302 ctxt.info.valid_sections =
4303 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4304 /* clear current outer VLAN strip settings */
4305 ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4306 ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
4307 ctxt.info.outer_vlan_flags |=
4308 (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
4309 ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
4310 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
4311 ICE_AQ_VSI_OUTER_TAG_TYPE_S);
4313 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4315 PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping");
4318 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4325 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
4327 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4328 struct ice_vsi_ctx ctxt;
4329 enum ice_status status;
4332 if (vsi->info.port_based_outer_vlan)
4335 memset(&ctxt, 0, sizeof(ctxt));
4337 ctxt.info.valid_sections =
4338 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4339 /* clear current outer VLAN strip settings */
4340 ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4341 ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
4342 ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
4343 ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
4345 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4347 PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping");
4350 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4357 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena)
4359 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4362 if (ice_is_dvm_ena(hw)) {
4364 ret = ice_vsi_ena_outer_stripping(vsi);
4366 ret = ice_vsi_dis_outer_stripping(vsi);
4369 ret = ice_vsi_ena_inner_stripping(vsi);
4371 ret = ice_vsi_dis_inner_stripping(vsi);
4378 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4380 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4381 struct ice_vsi *vsi = pf->main_vsi;
4382 struct rte_eth_rxmode *rxmode;
4384 rxmode = &dev->data->dev_conf.rxmode;
4385 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
4386 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
4387 ice_vsi_config_vlan_filter(vsi, true);
4389 ice_vsi_config_vlan_filter(vsi, false);
4392 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
4393 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4394 ice_vsi_config_vlan_stripping(vsi, true);
4396 ice_vsi_config_vlan_stripping(vsi, false);
4403 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4405 struct ice_aq_get_set_rss_lut_params lut_params;
4406 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4407 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4413 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4414 lut_params.vsi_handle = vsi->idx;
4415 lut_params.lut_size = lut_size;
4416 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4417 lut_params.lut = lut;
4418 lut_params.global_lut_id = 0;
4419 ret = ice_aq_get_rss_lut(hw, &lut_params);
4421 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4425 uint64_t *lut_dw = (uint64_t *)lut;
4426 uint16_t i, lut_size_dw = lut_size / 4;
4428 for (i = 0; i < lut_size_dw; i++)
4429 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4436 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4438 struct ice_aq_get_set_rss_lut_params lut_params;
4446 pf = ICE_VSI_TO_PF(vsi);
4447 hw = ICE_VSI_TO_HW(vsi);
4449 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4450 lut_params.vsi_handle = vsi->idx;
4451 lut_params.lut_size = lut_size;
4452 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4453 lut_params.lut = lut;
4454 lut_params.global_lut_id = 0;
4455 ret = ice_aq_set_rss_lut(hw, &lut_params);
4457 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4461 uint64_t *lut_dw = (uint64_t *)lut;
4462 uint16_t i, lut_size_dw = lut_size / 4;
4464 for (i = 0; i < lut_size_dw; i++)
4465 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4474 ice_rss_reta_update(struct rte_eth_dev *dev,
4475 struct rte_eth_rss_reta_entry64 *reta_conf,
4478 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4479 uint16_t i, lut_size = pf->hash_lut_size;
4480 uint16_t idx, shift;
4484 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4485 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4486 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4488 "The size of hash lookup table configured (%d)"
4489 "doesn't match the number hardware can "
4490 "supported (128, 512, 2048)",
4495 /* It MUST use the current LUT size to get the RSS lookup table,
4496 * otherwise if will fail with -100 error code.
4498 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0);
4500 PMD_DRV_LOG(ERR, "No memory can be allocated");
4503 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4507 for (i = 0; i < reta_size; i++) {
4508 idx = i / RTE_ETH_RETA_GROUP_SIZE;
4509 shift = i % RTE_ETH_RETA_GROUP_SIZE;
4510 if (reta_conf[idx].mask & (1ULL << shift))
4511 lut[i] = reta_conf[idx].reta[shift];
4513 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4514 if (ret == 0 && lut_size != reta_size) {
4516 "The size of hash lookup table is changed from (%d) to (%d)",
4517 lut_size, reta_size);
4518 pf->hash_lut_size = reta_size;
4528 ice_rss_reta_query(struct rte_eth_dev *dev,
4529 struct rte_eth_rss_reta_entry64 *reta_conf,
4532 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4533 uint16_t i, lut_size = pf->hash_lut_size;
4534 uint16_t idx, shift;
4538 if (reta_size != lut_size) {
4540 "The size of hash lookup table configured (%d)"
4541 "doesn't match the number hardware can "
4543 reta_size, lut_size);
4547 lut = rte_zmalloc(NULL, reta_size, 0);
4549 PMD_DRV_LOG(ERR, "No memory can be allocated");
4553 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4557 for (i = 0; i < reta_size; i++) {
4558 idx = i / RTE_ETH_RETA_GROUP_SIZE;
4559 shift = i % RTE_ETH_RETA_GROUP_SIZE;
4560 if (reta_conf[idx].mask & (1ULL << shift))
4561 reta_conf[idx].reta[shift] = lut[i];
4571 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4573 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4576 if (!key || key_len == 0) {
4577 PMD_DRV_LOG(DEBUG, "No key to be configured");
4579 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4581 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4585 struct ice_aqc_get_set_rss_keys *key_dw =
4586 (struct ice_aqc_get_set_rss_keys *)key;
4588 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4590 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4598 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4600 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4603 if (!key || !key_len)
4606 ret = ice_aq_get_rss_key
4608 (struct ice_aqc_get_set_rss_keys *)key);
4610 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4613 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4619 ice_rss_hash_update(struct rte_eth_dev *dev,
4620 struct rte_eth_rss_conf *rss_conf)
4622 enum ice_status status = ICE_SUCCESS;
4623 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4624 struct ice_vsi *vsi = pf->main_vsi;
4627 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4631 if (rss_conf->rss_hf == 0) {
4636 /* RSS hash configuration */
4637 ice_rss_hash_set(pf, rss_conf->rss_hf);
4643 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4644 struct rte_eth_rss_conf *rss_conf)
4646 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4647 struct ice_vsi *vsi = pf->main_vsi;
4649 ice_get_rss_key(vsi, rss_conf->rss_key,
4650 &rss_conf->rss_key_len);
4652 rss_conf->rss_hf = pf->rss_hf;
4657 ice_promisc_enable(struct rte_eth_dev *dev)
4659 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4660 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4661 struct ice_vsi *vsi = pf->main_vsi;
4662 enum ice_status status;
4666 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4667 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4669 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4671 case ICE_ERR_ALREADY_EXISTS:
4672 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4676 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4684 ice_promisc_disable(struct rte_eth_dev *dev)
4686 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4687 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4688 struct ice_vsi *vsi = pf->main_vsi;
4689 enum ice_status status;
4693 if (dev->data->all_multicast == 1)
4694 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX;
4696 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4697 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4699 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4700 if (status != ICE_SUCCESS) {
4701 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4709 ice_allmulti_enable(struct rte_eth_dev *dev)
4711 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4712 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4713 struct ice_vsi *vsi = pf->main_vsi;
4714 enum ice_status status;
4718 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4720 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4723 case ICE_ERR_ALREADY_EXISTS:
4724 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4728 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4736 ice_allmulti_disable(struct rte_eth_dev *dev)
4738 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4739 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4740 struct ice_vsi *vsi = pf->main_vsi;
4741 enum ice_status status;
4745 if (dev->data->promiscuous == 1)
4746 return 0; /* must remain in all_multicast mode */
4748 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4750 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4751 if (status != ICE_SUCCESS) {
4752 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4759 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4762 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4763 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
4764 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4768 msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
4770 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4771 GLINT_DYN_CTL_ITR_INDX_M;
4772 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4774 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4775 rte_intr_ack(pci_dev->intr_handle);
4780 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4783 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4784 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
4785 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4788 msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
4790 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4796 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4798 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4803 ver = hw->flash.orom.major;
4804 patch = hw->flash.orom.patch;
4805 build = hw->flash.orom.build;
4807 ret = snprintf(fw_version, fw_size,
4808 "%x.%02x 0x%08x %d.%d.%d",
4809 hw->flash.nvm.major,
4810 hw->flash.nvm.minor,
4811 hw->flash.nvm.eetrack,
4816 /* add the size of '\0' */
4818 if (fw_size < (size_t)ret)
4825 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4828 struct ice_vsi_ctx ctxt;
4829 uint8_t vlan_flags = 0;
4832 if (!vsi || !info) {
4833 PMD_DRV_LOG(ERR, "invalid parameters");
4838 vsi->info.port_based_inner_vlan = info->config.pvid;
4840 * If insert pvid is enabled, only tagged pkts are
4841 * allowed to be sent out.
4843 vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4844 ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4846 vsi->info.port_based_inner_vlan = 0;
4847 if (info->config.reject.tagged == 0)
4848 vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED;
4850 if (info->config.reject.untagged == 0)
4851 vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4853 vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4854 ICE_AQ_VSI_INNER_VLAN_EMODE_M);
4855 vsi->info.inner_vlan_flags |= vlan_flags;
4856 memset(&ctxt, 0, sizeof(ctxt));
4857 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4858 ctxt.info.valid_sections =
4859 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4860 ctxt.vsi_num = vsi->vsi_id;
4862 hw = ICE_VSI_TO_HW(vsi);
4863 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4864 if (ret != ICE_SUCCESS) {
4866 "update VSI for VLAN insert failed, err %d",
4871 vsi->info.valid_sections |=
4872 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4878 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4880 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4881 struct ice_vsi *vsi = pf->main_vsi;
4882 struct rte_eth_dev_data *data = pf->dev_data;
4883 struct ice_vsi_vlan_pvid_info info;
4886 memset(&info, 0, sizeof(info));
4889 info.config.pvid = pvid;
4891 info.config.reject.tagged =
4892 data->dev_conf.txmode.hw_vlan_reject_tagged;
4893 info.config.reject.untagged =
4894 data->dev_conf.txmode.hw_vlan_reject_untagged;
4897 ret = ice_vsi_vlan_pvid_set(vsi, &info);
4899 PMD_DRV_LOG(ERR, "Failed to set pvid.");
4907 ice_get_eeprom_length(struct rte_eth_dev *dev)
4909 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4911 return hw->flash.flash_size;
4915 ice_get_eeprom(struct rte_eth_dev *dev,
4916 struct rte_dev_eeprom_info *eeprom)
4918 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4919 enum ice_status status = ICE_SUCCESS;
4920 uint8_t *data = eeprom->data;
4922 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4924 status = ice_acquire_nvm(hw, ICE_RES_READ);
4926 PMD_DRV_LOG(ERR, "acquire nvm failed.");
4930 status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4933 ice_release_nvm(hw);
4936 PMD_DRV_LOG(ERR, "EEPROM read failed.");
4944 ice_get_module_info(struct rte_eth_dev *dev,
4945 struct rte_eth_dev_module_info *modinfo)
4947 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4948 enum ice_status status;
4949 u8 sff8472_comp = 0;
4950 u8 sff8472_swap = 0;
4954 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
4955 0, &value, 1, 0, NULL);
4960 case ICE_MODULE_TYPE_SFP:
4961 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4962 ICE_MODULE_SFF_8472_COMP, 0x00, 0,
4963 &sff8472_comp, 1, 0, NULL);
4966 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4967 ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
4968 &sff8472_swap, 1, 0, NULL);
4972 if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
4973 modinfo->type = ICE_MODULE_SFF_8079;
4974 modinfo->eeprom_len = ICE_MODULE_SFF_8079_LEN;
4975 } else if (sff8472_comp &&
4976 (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) {
4977 modinfo->type = ICE_MODULE_SFF_8472;
4978 modinfo->eeprom_len = ICE_MODULE_SFF_8472_LEN;
4980 modinfo->type = ICE_MODULE_SFF_8079;
4981 modinfo->eeprom_len = ICE_MODULE_SFF_8079_LEN;
4984 case ICE_MODULE_TYPE_QSFP_PLUS:
4985 case ICE_MODULE_TYPE_QSFP28:
4986 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4987 ICE_MODULE_REVISION_ADDR, 0x00, 0,
4988 &sff8636_rev, 1, 0, NULL);
4991 /* Check revision compliance */
4992 if (sff8636_rev > 0x02) {
4993 /* Module is SFF-8636 compliant */
4994 modinfo->type = ICE_MODULE_SFF_8636;
4995 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
4997 modinfo->type = ICE_MODULE_SFF_8436;
4998 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
5002 PMD_DRV_LOG(WARNING, "SFF Module Type not recognized.\n");
5009 ice_get_module_eeprom(struct rte_eth_dev *dev,
5010 struct rte_dev_eeprom_info *info)
5012 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5013 #define SFF_READ_BLOCK_SIZE 8
5014 #define I2C_BUSY_TRY_TIMES 4
5015 #define I2C_USLEEP_MIN_TIME 1500
5016 #define I2C_USLEEP_MAX_TIME 2500
5017 uint8_t value[SFF_READ_BLOCK_SIZE] = {0};
5018 uint8_t addr = ICE_I2C_EEPROM_DEV_ADDR;
5019 uint8_t *data = NULL;
5020 enum ice_status status;
5021 bool is_sfp = false;
5023 uint32_t offset = 0;
5026 if (!info || !info->length || !info->data)
5029 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
5034 if (value[0] == ICE_MODULE_TYPE_SFP)
5038 memset(data, 0, info->length);
5039 for (i = 0; i < info->length; i += SFF_READ_BLOCK_SIZE) {
5040 offset = i + info->offset;
5043 /* Check if we need to access the other memory page */
5045 if (offset >= ICE_MODULE_SFF_8079_LEN) {
5046 offset -= ICE_MODULE_SFF_8079_LEN;
5047 addr = ICE_I2C_EEPROM_DEV_ADDR2;
5050 while (offset >= ICE_MODULE_SFF_8436_LEN) {
5051 /* Compute memory page number and offset. */
5052 offset -= ICE_MODULE_SFF_8436_LEN / 2;
5057 /* Bit 2 of eeprom address 0x02 declares upper
5058 * pages are disabled on QSFP modules.
5059 * SFP modules only ever use page 0.
5061 if (page == 0 || !(data[0x2] & 0x4)) {
5062 /* If i2c bus is busy due to slow page change or
5063 * link management access, call can fail.
5064 * This is normal. So we retry this a few times.
5066 for (j = 0; j < I2C_BUSY_TRY_TIMES; j++) {
5067 status = ice_aq_sff_eeprom(hw, 0, addr, offset,
5068 page, !is_sfp, value,
5069 SFF_READ_BLOCK_SIZE,
5071 PMD_DRV_LOG(DEBUG, "SFF %02X %02X %02X %X = "
5073 "%02X%02X%02X%02X (%X)\n",
5074 addr, offset, page, is_sfp,
5081 usleep_range(I2C_USLEEP_MIN_TIME,
5082 I2C_USLEEP_MAX_TIME);
5083 memset(value, 0, SFF_READ_BLOCK_SIZE);
5089 /* Make sure we have enough room for the new block */
5090 if ((i + SFF_READ_BLOCK_SIZE) < info->length)
5091 memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
5099 ice_stat_update_32(struct ice_hw *hw,
5107 new_data = (uint64_t)ICE_READ_REG(hw, reg);
5111 if (new_data >= *offset)
5112 *stat = (uint64_t)(new_data - *offset);
5114 *stat = (uint64_t)((new_data +
5115 ((uint64_t)1 << ICE_32_BIT_WIDTH))
5120 ice_stat_update_40(struct ice_hw *hw,
5129 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
5130 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
5136 if (new_data >= *offset)
5137 *stat = new_data - *offset;
5139 *stat = (uint64_t)((new_data +
5140 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
5143 *stat &= ICE_40_BIT_MASK;
5146 /* Get all the statistics of a VSI */
5148 ice_update_vsi_stats(struct ice_vsi *vsi)
5150 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
5151 struct ice_eth_stats *nes = &vsi->eth_stats;
5152 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
5153 int idx = rte_le_to_cpu_16(vsi->vsi_id);
5155 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
5156 vsi->offset_loaded, &oes->rx_bytes,
5158 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
5159 vsi->offset_loaded, &oes->rx_unicast,
5161 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
5162 vsi->offset_loaded, &oes->rx_multicast,
5163 &nes->rx_multicast);
5164 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
5165 vsi->offset_loaded, &oes->rx_broadcast,
5166 &nes->rx_broadcast);
5167 /* enlarge the limitation when rx_bytes overflowed */
5168 if (vsi->offset_loaded) {
5169 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
5170 nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5171 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
5173 vsi->old_rx_bytes = nes->rx_bytes;
5174 /* exclude CRC bytes */
5175 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
5176 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
5178 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
5179 &oes->rx_discards, &nes->rx_discards);
5180 /* GLV_REPC not supported */
5181 /* GLV_RMPC not supported */
5182 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
5183 &oes->rx_unknown_protocol,
5184 &nes->rx_unknown_protocol);
5185 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
5186 vsi->offset_loaded, &oes->tx_bytes,
5188 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
5189 vsi->offset_loaded, &oes->tx_unicast,
5191 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
5192 vsi->offset_loaded, &oes->tx_multicast,
5193 &nes->tx_multicast);
5194 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
5195 vsi->offset_loaded, &oes->tx_broadcast,
5196 &nes->tx_broadcast);
5197 /* GLV_TDPC not supported */
5198 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
5199 &oes->tx_errors, &nes->tx_errors);
5200 /* enlarge the limitation when tx_bytes overflowed */
5201 if (vsi->offset_loaded) {
5202 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
5203 nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5204 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
5206 vsi->old_tx_bytes = nes->tx_bytes;
5207 vsi->offset_loaded = true;
5209 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
5211 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
5212 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
5213 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
5214 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
5215 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
5216 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
5217 nes->rx_unknown_protocol);
5218 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
5219 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
5220 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
5221 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
5222 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
5223 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
5224 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
5229 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
5231 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5232 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
5234 /* Get statistics of struct ice_eth_stats */
5235 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
5236 GLPRT_GORCL(hw->port_info->lport),
5237 pf->offset_loaded, &os->eth.rx_bytes,
5239 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
5240 GLPRT_UPRCL(hw->port_info->lport),
5241 pf->offset_loaded, &os->eth.rx_unicast,
5242 &ns->eth.rx_unicast);
5243 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
5244 GLPRT_MPRCL(hw->port_info->lport),
5245 pf->offset_loaded, &os->eth.rx_multicast,
5246 &ns->eth.rx_multicast);
5247 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
5248 GLPRT_BPRCL(hw->port_info->lport),
5249 pf->offset_loaded, &os->eth.rx_broadcast,
5250 &ns->eth.rx_broadcast);
5251 ice_stat_update_32(hw, PRTRPB_RDPC,
5252 pf->offset_loaded, &os->eth.rx_discards,
5253 &ns->eth.rx_discards);
5254 /* enlarge the limitation when rx_bytes overflowed */
5255 if (pf->offset_loaded) {
5256 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
5257 ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5258 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
5260 pf->old_rx_bytes = ns->eth.rx_bytes;
5262 /* Workaround: CRC size should not be included in byte statistics,
5263 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
5266 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
5267 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
5269 /* GLPRT_REPC not supported */
5270 /* GLPRT_RMPC not supported */
5271 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
5273 &os->eth.rx_unknown_protocol,
5274 &ns->eth.rx_unknown_protocol);
5275 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
5276 GLPRT_GOTCL(hw->port_info->lport),
5277 pf->offset_loaded, &os->eth.tx_bytes,
5279 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
5280 GLPRT_UPTCL(hw->port_info->lport),
5281 pf->offset_loaded, &os->eth.tx_unicast,
5282 &ns->eth.tx_unicast);
5283 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
5284 GLPRT_MPTCL(hw->port_info->lport),
5285 pf->offset_loaded, &os->eth.tx_multicast,
5286 &ns->eth.tx_multicast);
5287 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
5288 GLPRT_BPTCL(hw->port_info->lport),
5289 pf->offset_loaded, &os->eth.tx_broadcast,
5290 &ns->eth.tx_broadcast);
5291 /* enlarge the limitation when tx_bytes overflowed */
5292 if (pf->offset_loaded) {
5293 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
5294 ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5295 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
5297 pf->old_tx_bytes = ns->eth.tx_bytes;
5298 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
5299 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
5301 /* GLPRT_TEPC not supported */
5303 /* additional port specific stats */
5304 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
5305 pf->offset_loaded, &os->tx_dropped_link_down,
5306 &ns->tx_dropped_link_down);
5307 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
5308 pf->offset_loaded, &os->crc_errors,
5310 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
5311 pf->offset_loaded, &os->illegal_bytes,
5312 &ns->illegal_bytes);
5313 /* GLPRT_ERRBC not supported */
5314 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
5315 pf->offset_loaded, &os->mac_local_faults,
5316 &ns->mac_local_faults);
5317 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
5318 pf->offset_loaded, &os->mac_remote_faults,
5319 &ns->mac_remote_faults);
5321 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
5322 pf->offset_loaded, &os->rx_len_errors,
5323 &ns->rx_len_errors);
5325 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
5326 pf->offset_loaded, &os->link_xon_rx,
5328 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
5329 pf->offset_loaded, &os->link_xoff_rx,
5331 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
5332 pf->offset_loaded, &os->link_xon_tx,
5334 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
5335 pf->offset_loaded, &os->link_xoff_tx,
5337 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
5338 GLPRT_PRC64L(hw->port_info->lport),
5339 pf->offset_loaded, &os->rx_size_64,
5341 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
5342 GLPRT_PRC127L(hw->port_info->lport),
5343 pf->offset_loaded, &os->rx_size_127,
5345 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
5346 GLPRT_PRC255L(hw->port_info->lport),
5347 pf->offset_loaded, &os->rx_size_255,
5349 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
5350 GLPRT_PRC511L(hw->port_info->lport),
5351 pf->offset_loaded, &os->rx_size_511,
5353 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
5354 GLPRT_PRC1023L(hw->port_info->lport),
5355 pf->offset_loaded, &os->rx_size_1023,
5357 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
5358 GLPRT_PRC1522L(hw->port_info->lport),
5359 pf->offset_loaded, &os->rx_size_1522,
5361 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
5362 GLPRT_PRC9522L(hw->port_info->lport),
5363 pf->offset_loaded, &os->rx_size_big,
5365 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
5366 pf->offset_loaded, &os->rx_undersize,
5368 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
5369 pf->offset_loaded, &os->rx_fragments,
5371 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
5372 pf->offset_loaded, &os->rx_oversize,
5374 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
5375 pf->offset_loaded, &os->rx_jabber,
5377 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
5378 GLPRT_PTC64L(hw->port_info->lport),
5379 pf->offset_loaded, &os->tx_size_64,
5381 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
5382 GLPRT_PTC127L(hw->port_info->lport),
5383 pf->offset_loaded, &os->tx_size_127,
5385 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
5386 GLPRT_PTC255L(hw->port_info->lport),
5387 pf->offset_loaded, &os->tx_size_255,
5389 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
5390 GLPRT_PTC511L(hw->port_info->lport),
5391 pf->offset_loaded, &os->tx_size_511,
5393 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
5394 GLPRT_PTC1023L(hw->port_info->lport),
5395 pf->offset_loaded, &os->tx_size_1023,
5397 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
5398 GLPRT_PTC1522L(hw->port_info->lport),
5399 pf->offset_loaded, &os->tx_size_1522,
5401 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
5402 GLPRT_PTC9522L(hw->port_info->lport),
5403 pf->offset_loaded, &os->tx_size_big,
5406 /* GLPRT_MSPDC not supported */
5407 /* GLPRT_XEC not supported */
5409 pf->offset_loaded = true;
5412 ice_update_vsi_stats(pf->main_vsi);
5415 /* Get all statistics of a port */
5417 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
5419 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5420 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5421 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5423 /* call read registers - updates values, now write them to struct */
5424 ice_read_stats_registers(pf, hw);
5426 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
5427 pf->main_vsi->eth_stats.rx_multicast +
5428 pf->main_vsi->eth_stats.rx_broadcast -
5429 pf->main_vsi->eth_stats.rx_discards;
5430 stats->opackets = ns->eth.tx_unicast +
5431 ns->eth.tx_multicast +
5432 ns->eth.tx_broadcast;
5433 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
5434 stats->obytes = ns->eth.tx_bytes;
5435 stats->oerrors = ns->eth.tx_errors +
5436 pf->main_vsi->eth_stats.tx_errors;
5439 stats->imissed = ns->eth.rx_discards +
5440 pf->main_vsi->eth_stats.rx_discards;
5441 stats->ierrors = ns->crc_errors +
5443 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
5445 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
5446 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
5447 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
5448 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
5449 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
5450 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
5451 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
5452 pf->main_vsi->eth_stats.rx_discards);
5453 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
5454 ns->eth.rx_unknown_protocol);
5455 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
5456 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
5457 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
5458 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
5459 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
5460 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
5461 pf->main_vsi->eth_stats.tx_discards);
5462 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
5464 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
5465 ns->tx_dropped_link_down);
5466 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
5467 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
5469 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
5470 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
5471 ns->mac_local_faults);
5472 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
5473 ns->mac_remote_faults);
5474 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
5475 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
5476 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
5477 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
5478 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
5479 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
5480 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
5481 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
5482 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
5483 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
5484 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
5485 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
5486 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
5487 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
5488 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
5489 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
5490 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
5491 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
5492 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
5493 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
5494 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
5495 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
5496 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
5497 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
5501 /* Reset the statistics */
5503 ice_stats_reset(struct rte_eth_dev *dev)
5505 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5506 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5508 /* Mark PF and VSI stats to update the offset, aka "reset" */
5509 pf->offset_loaded = false;
5511 pf->main_vsi->offset_loaded = false;
5513 /* read the stats, reading current register values into offset */
5514 ice_read_stats_registers(pf, hw);
5520 ice_xstats_calc_num(void)
5524 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5530 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5533 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5534 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5537 struct ice_hw_port_stats *hw_stats = &pf->stats;
5539 count = ice_xstats_calc_num();
5543 ice_read_stats_registers(pf, hw);
5550 /* Get stats from ice_eth_stats struct */
5551 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5552 xstats[count].value =
5553 *(uint64_t *)((char *)&hw_stats->eth +
5554 ice_stats_strings[i].offset);
5555 xstats[count].id = count;
5559 /* Get individual stats from ice_hw_port struct */
5560 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5561 xstats[count].value =
5562 *(uint64_t *)((char *)hw_stats +
5563 ice_hw_port_strings[i].offset);
5564 xstats[count].id = count;
5571 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5572 struct rte_eth_xstat_name *xstats_names,
5573 __rte_unused unsigned int limit)
5575 unsigned int count = 0;
5579 return ice_xstats_calc_num();
5581 /* Note: limit checked in rte_eth_xstats_names() */
5583 /* Get stats from ice_eth_stats struct */
5584 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5585 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5586 sizeof(xstats_names[count].name));
5590 /* Get individual stats from ice_hw_port struct */
5591 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5592 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5593 sizeof(xstats_names[count].name));
5601 ice_dev_flow_ops_get(struct rte_eth_dev *dev,
5602 const struct rte_flow_ops **ops)
5607 *ops = &ice_flow_ops;
5611 /* Add UDP tunneling port */
5613 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5614 struct rte_eth_udp_tunnel *udp_tunnel)
5617 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5619 if (udp_tunnel == NULL)
5622 switch (udp_tunnel->prot_type) {
5623 case RTE_ETH_TUNNEL_TYPE_VXLAN:
5624 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5627 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5635 /* Delete UDP tunneling port */
5637 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5638 struct rte_eth_udp_tunnel *udp_tunnel)
5641 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5643 if (udp_tunnel == NULL)
5646 switch (udp_tunnel->prot_type) {
5647 case RTE_ETH_TUNNEL_TYPE_VXLAN:
5648 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5651 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5660 ice_timesync_enable(struct rte_eth_dev *dev)
5662 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5663 struct ice_adapter *ad =
5664 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5667 if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
5668 RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
5669 PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
5673 if (hw->func_caps.ts_func_info.src_tmr_owned) {
5674 ret = ice_ptp_init_phc(hw);
5676 PMD_DRV_LOG(ERR, "Failed to initialize PHC");
5680 ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
5683 "Failed to write PHC increment time value");
5688 /* Initialize cycle counters for system time/RX/TX timestamp */
5689 memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
5690 memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5691 memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5693 ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5694 ad->systime_tc.cc_shift = 0;
5695 ad->systime_tc.nsec_mask = 0;
5697 ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5698 ad->rx_tstamp_tc.cc_shift = 0;
5699 ad->rx_tstamp_tc.nsec_mask = 0;
5701 ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5702 ad->tx_tstamp_tc.cc_shift = 0;
5703 ad->tx_tstamp_tc.nsec_mask = 0;
5711 ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
5712 struct timespec *timestamp, uint32_t flags)
5714 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5715 struct ice_adapter *ad =
5716 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5717 struct ice_rx_queue *rxq;
5721 rxq = dev->data->rx_queues[flags];
5723 ts_high = rxq->time_high;
5724 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, ts_high);
5725 ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
5726 *timestamp = rte_ns_to_timespec(ns);
5732 ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
5733 struct timespec *timestamp)
5735 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5736 struct ice_adapter *ad =
5737 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5739 uint64_t ts_ns, ns, tstamp;
5740 const uint64_t mask = 0xFFFFFFFF;
5743 lport = hw->port_info->lport;
5745 ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
5747 PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
5751 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, (tstamp >> 8) & mask);
5752 ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
5753 *timestamp = rte_ns_to_timespec(ns);
5759 ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
5761 struct ice_adapter *ad =
5762 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5764 ad->systime_tc.nsec += delta;
5765 ad->rx_tstamp_tc.nsec += delta;
5766 ad->tx_tstamp_tc.nsec += delta;
5772 ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
5774 struct ice_adapter *ad =
5775 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5778 ns = rte_timespec_to_ns(ts);
5780 ad->systime_tc.nsec = ns;
5781 ad->rx_tstamp_tc.nsec = ns;
5782 ad->tx_tstamp_tc.nsec = ns;
5788 ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
5790 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5791 struct ice_adapter *ad =
5792 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5793 uint32_t hi, lo, lo2;
5796 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5797 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
5798 lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5801 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5802 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
5805 time = ((uint64_t)hi << 32) | lo;
5806 ns = rte_timecounter_update(&ad->systime_tc, time);
5807 *ts = rte_ns_to_timespec(ns);
5813 ice_timesync_disable(struct rte_eth_dev *dev)
5815 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5816 struct ice_adapter *ad =
5817 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5821 lport = hw->port_info->lport;
5823 ice_clear_phy_tstamp(hw, lport, 0);
5825 val = ICE_READ_REG(hw, GLTSYN_ENA(0));
5826 val &= ~GLTSYN_ENA_TSYN_ENA_M;
5827 ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
5829 ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
5830 ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
5838 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5839 struct rte_pci_device *pci_dev)
5841 return rte_eth_dev_pci_generic_probe(pci_dev,
5842 sizeof(struct ice_adapter),
5847 ice_pci_remove(struct rte_pci_device *pci_dev)
5849 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5852 static struct rte_pci_driver rte_ice_pmd = {
5853 .id_table = pci_id_ice_map,
5854 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5855 .probe = ice_pci_probe,
5856 .remove = ice_pci_remove,
5860 * Driver initialization routine.
5861 * Invoked once at EAL init time.
5862 * Register itself as the [Poll Mode] Driver of PCI devices.
5864 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5865 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5866 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5867 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5868 ICE_HW_DEBUG_MASK_ARG "=0xXXX"
5869 ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5870 ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5871 ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
5872 ICE_RX_LOW_LATENCY_ARG "=<0|1>");
5874 RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE);
5875 RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE);
5876 #ifdef RTE_ETHDEV_DEBUG_RX
5877 RTE_LOG_REGISTER_SUFFIX(ice_logtype_rx, rx, DEBUG);
5879 #ifdef RTE_ETHDEV_DEBUG_TX
5880 RTE_LOG_REGISTER_SUFFIX(ice_logtype_tx, tx, DEBUG);