net/ice: clear QoS bandwidth on DCF close
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7
8 #include <stdio.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12
13 #include <rte_tailq.h>
14
15 #include "eal_firmware.h"
16
17 #include "base/ice_sched.h"
18 #include "base/ice_flow.h"
19 #include "base/ice_dcb.h"
20 #include "base/ice_common.h"
21
22 #include "rte_pmd_ice.h"
23 #include "ice_ethdev.h"
24 #include "ice_rxtx.h"
25 #include "ice_generic_flow.h"
26
27 /* devargs */
28 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
29 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
30 #define ICE_PROTO_XTR_ARG         "proto_xtr"
31 #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
32
33 static const char * const ice_valid_args[] = {
34         ICE_SAFE_MODE_SUPPORT_ARG,
35         ICE_PIPELINE_MODE_SUPPORT_ARG,
36         ICE_PROTO_XTR_ARG,
37         ICE_HW_DEBUG_MASK_ARG,
38         NULL
39 };
40
41 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
42         .name = "intel_pmd_dynfield_proto_xtr_metadata",
43         .size = sizeof(uint32_t),
44         .align = __alignof__(uint32_t),
45         .flags = 0,
46 };
47
48 struct proto_xtr_ol_flag {
49         const struct rte_mbuf_dynflag param;
50         uint64_t *ol_flag;
51         bool required;
52 };
53
54 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
55
56 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
57         [PROTO_XTR_VLAN] = {
58                 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
59                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
60         [PROTO_XTR_IPV4] = {
61                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
62                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
63         [PROTO_XTR_IPV6] = {
64                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
65                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
66         [PROTO_XTR_IPV6_FLOW] = {
67                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
68                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
69         [PROTO_XTR_TCP] = {
70                 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
71                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
72         [PROTO_XTR_IP_OFFSET] = {
73                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
74                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
75 };
76
77 #define ICE_OS_DEFAULT_PKG_NAME         "ICE OS Default Package"
78 #define ICE_COMMS_PKG_NAME                      "ICE COMMS Package"
79 #define ICE_MAX_RES_DESC_NUM        1024
80
81 static int ice_dev_configure(struct rte_eth_dev *dev);
82 static int ice_dev_start(struct rte_eth_dev *dev);
83 static int ice_dev_stop(struct rte_eth_dev *dev);
84 static int ice_dev_close(struct rte_eth_dev *dev);
85 static int ice_dev_reset(struct rte_eth_dev *dev);
86 static int ice_dev_info_get(struct rte_eth_dev *dev,
87                             struct rte_eth_dev_info *dev_info);
88 static int ice_link_update(struct rte_eth_dev *dev,
89                            int wait_to_complete);
90 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
91 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
92
93 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
94 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
95 static int ice_rss_reta_update(struct rte_eth_dev *dev,
96                                struct rte_eth_rss_reta_entry64 *reta_conf,
97                                uint16_t reta_size);
98 static int ice_rss_reta_query(struct rte_eth_dev *dev,
99                               struct rte_eth_rss_reta_entry64 *reta_conf,
100                               uint16_t reta_size);
101 static int ice_rss_hash_update(struct rte_eth_dev *dev,
102                                struct rte_eth_rss_conf *rss_conf);
103 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
104                                  struct rte_eth_rss_conf *rss_conf);
105 static int ice_promisc_enable(struct rte_eth_dev *dev);
106 static int ice_promisc_disable(struct rte_eth_dev *dev);
107 static int ice_allmulti_enable(struct rte_eth_dev *dev);
108 static int ice_allmulti_disable(struct rte_eth_dev *dev);
109 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
110                                uint16_t vlan_id,
111                                int on);
112 static int ice_macaddr_set(struct rte_eth_dev *dev,
113                            struct rte_ether_addr *mac_addr);
114 static int ice_macaddr_add(struct rte_eth_dev *dev,
115                            struct rte_ether_addr *mac_addr,
116                            __rte_unused uint32_t index,
117                            uint32_t pool);
118 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
119 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
120                                     uint16_t queue_id);
121 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
122                                      uint16_t queue_id);
123 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
124                               size_t fw_size);
125 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
126                              uint16_t pvid, int on);
127 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
128 static int ice_get_eeprom(struct rte_eth_dev *dev,
129                           struct rte_dev_eeprom_info *eeprom);
130 static int ice_stats_get(struct rte_eth_dev *dev,
131                          struct rte_eth_stats *stats);
132 static int ice_stats_reset(struct rte_eth_dev *dev);
133 static int ice_xstats_get(struct rte_eth_dev *dev,
134                           struct rte_eth_xstat *xstats, unsigned int n);
135 static int ice_xstats_get_names(struct rte_eth_dev *dev,
136                                 struct rte_eth_xstat_name *xstats_names,
137                                 unsigned int limit);
138 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
139                                 const struct rte_flow_ops **ops);
140 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
141                         struct rte_eth_udp_tunnel *udp_tunnel);
142 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
143                         struct rte_eth_udp_tunnel *udp_tunnel);
144
145 static const struct rte_pci_id pci_id_ice_map[] = {
146         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
147         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
148         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
149         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
150         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
151         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
152         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
153         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
154         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
155         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
156         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
157         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) },
158         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) },
159         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
160         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
161         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
162         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
163         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
164         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
165         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
166         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
167         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
168         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
169         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
170         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
171         { .vendor_id = 0, /* sentinel */ },
172 };
173
174 static const struct eth_dev_ops ice_eth_dev_ops = {
175         .dev_configure                = ice_dev_configure,
176         .dev_start                    = ice_dev_start,
177         .dev_stop                     = ice_dev_stop,
178         .dev_close                    = ice_dev_close,
179         .dev_reset                    = ice_dev_reset,
180         .dev_set_link_up              = ice_dev_set_link_up,
181         .dev_set_link_down            = ice_dev_set_link_down,
182         .rx_queue_start               = ice_rx_queue_start,
183         .rx_queue_stop                = ice_rx_queue_stop,
184         .tx_queue_start               = ice_tx_queue_start,
185         .tx_queue_stop                = ice_tx_queue_stop,
186         .rx_queue_setup               = ice_rx_queue_setup,
187         .rx_queue_release             = ice_rx_queue_release,
188         .tx_queue_setup               = ice_tx_queue_setup,
189         .tx_queue_release             = ice_tx_queue_release,
190         .dev_infos_get                = ice_dev_info_get,
191         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
192         .link_update                  = ice_link_update,
193         .mtu_set                      = ice_mtu_set,
194         .mac_addr_set                 = ice_macaddr_set,
195         .mac_addr_add                 = ice_macaddr_add,
196         .mac_addr_remove              = ice_macaddr_remove,
197         .vlan_filter_set              = ice_vlan_filter_set,
198         .vlan_offload_set             = ice_vlan_offload_set,
199         .reta_update                  = ice_rss_reta_update,
200         .reta_query                   = ice_rss_reta_query,
201         .rss_hash_update              = ice_rss_hash_update,
202         .rss_hash_conf_get            = ice_rss_hash_conf_get,
203         .promiscuous_enable           = ice_promisc_enable,
204         .promiscuous_disable          = ice_promisc_disable,
205         .allmulticast_enable          = ice_allmulti_enable,
206         .allmulticast_disable         = ice_allmulti_disable,
207         .rx_queue_intr_enable         = ice_rx_queue_intr_enable,
208         .rx_queue_intr_disable        = ice_rx_queue_intr_disable,
209         .fw_version_get               = ice_fw_version_get,
210         .vlan_pvid_set                = ice_vlan_pvid_set,
211         .rxq_info_get                 = ice_rxq_info_get,
212         .txq_info_get                 = ice_txq_info_get,
213         .rx_burst_mode_get            = ice_rx_burst_mode_get,
214         .tx_burst_mode_get            = ice_tx_burst_mode_get,
215         .get_eeprom_length            = ice_get_eeprom_length,
216         .get_eeprom                   = ice_get_eeprom,
217         .stats_get                    = ice_stats_get,
218         .stats_reset                  = ice_stats_reset,
219         .xstats_get                   = ice_xstats_get,
220         .xstats_get_names             = ice_xstats_get_names,
221         .xstats_reset                 = ice_stats_reset,
222         .flow_ops_get                 = ice_dev_flow_ops_get,
223         .udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
224         .udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
225         .tx_done_cleanup              = ice_tx_done_cleanup,
226         .get_monitor_addr             = ice_get_monitor_addr,
227 };
228
229 /* store statistics names and its offset in stats structure */
230 struct ice_xstats_name_off {
231         char name[RTE_ETH_XSTATS_NAME_SIZE];
232         unsigned int offset;
233 };
234
235 static const struct ice_xstats_name_off ice_stats_strings[] = {
236         {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
237         {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
238         {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
239         {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
240         {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
241                 rx_unknown_protocol)},
242         {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
243         {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
244         {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
245         {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
246 };
247
248 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
249                 sizeof(ice_stats_strings[0]))
250
251 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
252         {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
253                 tx_dropped_link_down)},
254         {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
255         {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
256                 illegal_bytes)},
257         {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
258         {"mac_local_errors", offsetof(struct ice_hw_port_stats,
259                 mac_local_faults)},
260         {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
261                 mac_remote_faults)},
262         {"rx_len_errors", offsetof(struct ice_hw_port_stats,
263                 rx_len_errors)},
264         {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
265         {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
266         {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
267         {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
268         {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
269         {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
270                 rx_size_127)},
271         {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
272                 rx_size_255)},
273         {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
274                 rx_size_511)},
275         {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
276                 rx_size_1023)},
277         {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
278                 rx_size_1522)},
279         {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
280                 rx_size_big)},
281         {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
282                 rx_undersize)},
283         {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
284                 rx_oversize)},
285         {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
286                 mac_short_pkt_dropped)},
287         {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
288                 rx_fragments)},
289         {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
290         {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
291         {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
292                 tx_size_127)},
293         {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
294                 tx_size_255)},
295         {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
296                 tx_size_511)},
297         {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
298                 tx_size_1023)},
299         {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
300                 tx_size_1522)},
301         {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
302                 tx_size_big)},
303 };
304
305 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
306                 sizeof(ice_hw_port_strings[0]))
307
308 static void
309 ice_init_controlq_parameter(struct ice_hw *hw)
310 {
311         /* fields for adminq */
312         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
313         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
314         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
315         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
316
317         /* fields for mailboxq, DPDK used as PF host */
318         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
319         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
320         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
321         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
322 }
323
324 static int
325 lookup_proto_xtr_type(const char *xtr_name)
326 {
327         static struct {
328                 const char *name;
329                 enum proto_xtr_type type;
330         } xtr_type_map[] = {
331                 { "vlan",      PROTO_XTR_VLAN      },
332                 { "ipv4",      PROTO_XTR_IPV4      },
333                 { "ipv6",      PROTO_XTR_IPV6      },
334                 { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
335                 { "tcp",       PROTO_XTR_TCP       },
336                 { "ip_offset", PROTO_XTR_IP_OFFSET },
337         };
338         uint32_t i;
339
340         for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
341                 if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
342                         return xtr_type_map[i].type;
343         }
344
345         return -1;
346 }
347
348 /*
349  * Parse elem, the elem could be single number/range or '(' ')' group
350  * 1) A single number elem, it's just a simple digit. e.g. 9
351  * 2) A single range elem, two digits with a '-' between. e.g. 2-6
352  * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
353  *    Within group elem, '-' used for a range separator;
354  *                       ',' used for a single number.
355  */
356 static int
357 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
358 {
359         const char *str = input;
360         char *end = NULL;
361         uint32_t min, max;
362         uint32_t idx;
363
364         while (isblank(*str))
365                 str++;
366
367         if (!isdigit(*str) && *str != '(')
368                 return -1;
369
370         /* process single number or single range of number */
371         if (*str != '(') {
372                 errno = 0;
373                 idx = strtoul(str, &end, 10);
374                 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
375                         return -1;
376
377                 while (isblank(*end))
378                         end++;
379
380                 min = idx;
381                 max = idx;
382
383                 /* process single <number>-<number> */
384                 if (*end == '-') {
385                         end++;
386                         while (isblank(*end))
387                                 end++;
388                         if (!isdigit(*end))
389                                 return -1;
390
391                         errno = 0;
392                         idx = strtoul(end, &end, 10);
393                         if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
394                                 return -1;
395
396                         max = idx;
397                         while (isblank(*end))
398                                 end++;
399                 }
400
401                 if (*end != ':')
402                         return -1;
403
404                 for (idx = RTE_MIN(min, max);
405                      idx <= RTE_MAX(min, max); idx++)
406                         devargs->proto_xtr[idx] = xtr_type;
407
408                 return 0;
409         }
410
411         /* process set within bracket */
412         str++;
413         while (isblank(*str))
414                 str++;
415         if (*str == '\0')
416                 return -1;
417
418         min = ICE_MAX_QUEUE_NUM;
419         do {
420                 /* go ahead to the first digit */
421                 while (isblank(*str))
422                         str++;
423                 if (!isdigit(*str))
424                         return -1;
425
426                 /* get the digit value */
427                 errno = 0;
428                 idx = strtoul(str, &end, 10);
429                 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
430                         return -1;
431
432                 /* go ahead to separator '-',',' and ')' */
433                 while (isblank(*end))
434                         end++;
435                 if (*end == '-') {
436                         if (min == ICE_MAX_QUEUE_NUM)
437                                 min = idx;
438                         else /* avoid continuous '-' */
439                                 return -1;
440                 } else if (*end == ',' || *end == ')') {
441                         max = idx;
442                         if (min == ICE_MAX_QUEUE_NUM)
443                                 min = idx;
444
445                         for (idx = RTE_MIN(min, max);
446                              idx <= RTE_MAX(min, max); idx++)
447                                 devargs->proto_xtr[idx] = xtr_type;
448
449                         min = ICE_MAX_QUEUE_NUM;
450                 } else {
451                         return -1;
452                 }
453
454                 str = end + 1;
455         } while (*end != ')' && *end != '\0');
456
457         return 0;
458 }
459
460 static int
461 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
462 {
463         const char *queue_start;
464         uint32_t idx;
465         int xtr_type;
466         char xtr_name[32];
467
468         while (isblank(*queues))
469                 queues++;
470
471         if (*queues != '[') {
472                 xtr_type = lookup_proto_xtr_type(queues);
473                 if (xtr_type < 0)
474                         return -1;
475
476                 devargs->proto_xtr_dflt = xtr_type;
477
478                 return 0;
479         }
480
481         queues++;
482         do {
483                 while (isblank(*queues))
484                         queues++;
485                 if (*queues == '\0')
486                         return -1;
487
488                 queue_start = queues;
489
490                 /* go across a complete bracket */
491                 if (*queue_start == '(') {
492                         queues += strcspn(queues, ")");
493                         if (*queues != ')')
494                                 return -1;
495                 }
496
497                 /* scan the separator ':' */
498                 queues += strcspn(queues, ":");
499                 if (*queues++ != ':')
500                         return -1;
501                 while (isblank(*queues))
502                         queues++;
503
504                 for (idx = 0; ; idx++) {
505                         if (isblank(queues[idx]) ||
506                             queues[idx] == ',' ||
507                             queues[idx] == ']' ||
508                             queues[idx] == '\0')
509                                 break;
510
511                         if (idx > sizeof(xtr_name) - 2)
512                                 return -1;
513
514                         xtr_name[idx] = queues[idx];
515                 }
516                 xtr_name[idx] = '\0';
517                 xtr_type = lookup_proto_xtr_type(xtr_name);
518                 if (xtr_type < 0)
519                         return -1;
520
521                 queues += idx;
522
523                 while (isblank(*queues) || *queues == ',' || *queues == ']')
524                         queues++;
525
526                 if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
527                         return -1;
528         } while (*queues != '\0');
529
530         return 0;
531 }
532
533 static int
534 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
535                      void *extra_args)
536 {
537         struct ice_devargs *devargs = extra_args;
538
539         if (value == NULL || extra_args == NULL)
540                 return -EINVAL;
541
542         if (parse_queue_proto_xtr(value, devargs) < 0) {
543                 PMD_DRV_LOG(ERR,
544                             "The protocol extraction parameter is wrong : '%s'",
545                             value);
546                 return -1;
547         }
548
549         return 0;
550 }
551
552 static void
553 ice_check_proto_xtr_support(struct ice_hw *hw)
554 {
555 #define FLX_REG(val, fld, idx) \
556         (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
557          GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
558         static struct {
559                 uint32_t rxdid;
560                 uint8_t opcode;
561                 uint8_t protid_0;
562                 uint8_t protid_1;
563         } xtr_sets[] = {
564                 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
565                                      ICE_RX_OPC_EXTRACT,
566                                      ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
567                 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
568                                      ICE_RX_OPC_EXTRACT,
569                                      ICE_PROT_IPV4_OF_OR_S,
570                                      ICE_PROT_IPV4_OF_OR_S },
571                 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
572                                      ICE_RX_OPC_EXTRACT,
573                                      ICE_PROT_IPV6_OF_OR_S,
574                                      ICE_PROT_IPV6_OF_OR_S },
575                 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
576                                           ICE_RX_OPC_EXTRACT,
577                                           ICE_PROT_IPV6_OF_OR_S,
578                                           ICE_PROT_IPV6_OF_OR_S },
579                 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
580                                     ICE_RX_OPC_EXTRACT,
581                                     ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
582                 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
583                                           ICE_RX_OPC_PROTID,
584                                           ICE_PROT_IPV4_OF_OR_S,
585                                           ICE_PROT_IPV6_OF_OR_S },
586         };
587         uint32_t i;
588
589         for (i = 0; i < RTE_DIM(xtr_sets); i++) {
590                 uint32_t rxdid = xtr_sets[i].rxdid;
591                 uint32_t v;
592
593                 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
594                         v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
595
596                         if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
597                             FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
598                                 ice_proto_xtr_hw_support[i] = true;
599                 }
600
601                 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
602                         v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
603
604                         if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
605                             FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
606                                 ice_proto_xtr_hw_support[i] = true;
607                 }
608         }
609 }
610
611 static int
612 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
613                   uint32_t num)
614 {
615         struct pool_entry *entry;
616
617         if (!pool || !num)
618                 return -EINVAL;
619
620         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
621         if (!entry) {
622                 PMD_INIT_LOG(ERR,
623                              "Failed to allocate memory for resource pool");
624                 return -ENOMEM;
625         }
626
627         /* queue heap initialize */
628         pool->num_free = num;
629         pool->num_alloc = 0;
630         pool->base = base;
631         LIST_INIT(&pool->alloc_list);
632         LIST_INIT(&pool->free_list);
633
634         /* Initialize element  */
635         entry->base = 0;
636         entry->len = num;
637
638         LIST_INSERT_HEAD(&pool->free_list, entry, next);
639         return 0;
640 }
641
642 static int
643 ice_res_pool_alloc(struct ice_res_pool_info *pool,
644                    uint16_t num)
645 {
646         struct pool_entry *entry, *valid_entry;
647
648         if (!pool || !num) {
649                 PMD_INIT_LOG(ERR, "Invalid parameter");
650                 return -EINVAL;
651         }
652
653         if (pool->num_free < num) {
654                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
655                              num, pool->num_free);
656                 return -ENOMEM;
657         }
658
659         valid_entry = NULL;
660         /* Lookup  in free list and find most fit one */
661         LIST_FOREACH(entry, &pool->free_list, next) {
662                 if (entry->len >= num) {
663                         /* Find best one */
664                         if (entry->len == num) {
665                                 valid_entry = entry;
666                                 break;
667                         }
668                         if (!valid_entry ||
669                             valid_entry->len > entry->len)
670                                 valid_entry = entry;
671                 }
672         }
673
674         /* Not find one to satisfy the request, return */
675         if (!valid_entry) {
676                 PMD_INIT_LOG(ERR, "No valid entry found");
677                 return -ENOMEM;
678         }
679         /**
680          * The entry have equal queue number as requested,
681          * remove it from alloc_list.
682          */
683         if (valid_entry->len == num) {
684                 LIST_REMOVE(valid_entry, next);
685         } else {
686                 /**
687                  * The entry have more numbers than requested,
688                  * create a new entry for alloc_list and minus its
689                  * queue base and number in free_list.
690                  */
691                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
692                 if (!entry) {
693                         PMD_INIT_LOG(ERR,
694                                      "Failed to allocate memory for "
695                                      "resource pool");
696                         return -ENOMEM;
697                 }
698                 entry->base = valid_entry->base;
699                 entry->len = num;
700                 valid_entry->base += num;
701                 valid_entry->len -= num;
702                 valid_entry = entry;
703         }
704
705         /* Insert it into alloc list, not sorted */
706         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
707
708         pool->num_free -= valid_entry->len;
709         pool->num_alloc += valid_entry->len;
710
711         return valid_entry->base + pool->base;
712 }
713
714 static void
715 ice_res_pool_destroy(struct ice_res_pool_info *pool)
716 {
717         struct pool_entry *entry, *next_entry;
718
719         if (!pool)
720                 return;
721
722         for (entry = LIST_FIRST(&pool->alloc_list);
723              entry && (next_entry = LIST_NEXT(entry, next), 1);
724              entry = next_entry) {
725                 LIST_REMOVE(entry, next);
726                 rte_free(entry);
727         }
728
729         for (entry = LIST_FIRST(&pool->free_list);
730              entry && (next_entry = LIST_NEXT(entry, next), 1);
731              entry = next_entry) {
732                 LIST_REMOVE(entry, next);
733                 rte_free(entry);
734         }
735
736         pool->num_free = 0;
737         pool->num_alloc = 0;
738         pool->base = 0;
739         LIST_INIT(&pool->alloc_list);
740         LIST_INIT(&pool->free_list);
741 }
742
743 static void
744 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
745 {
746         /* Set VSI LUT selection */
747         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
748                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
749         /* Set Hash scheme */
750         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
751                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
752         /* enable TC */
753         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
754 }
755
756 static enum ice_status
757 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
758                                 struct ice_aqc_vsi_props *info,
759                                 uint8_t enabled_tcmap)
760 {
761         uint16_t bsf, qp_idx;
762
763         /* default tc 0 now. Multi-TC supporting need to be done later.
764          * Configure TC and queue mapping parameters, for enabled TC,
765          * allocate qpnum_per_tc queues to this traffic.
766          */
767         if (enabled_tcmap != 0x01) {
768                 PMD_INIT_LOG(ERR, "only TC0 is supported");
769                 return -ENOTSUP;
770         }
771
772         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
773         bsf = rte_bsf32(vsi->nb_qps);
774         /* Adjust the queue number to actual queues that can be applied */
775         vsi->nb_qps = 0x1 << bsf;
776
777         qp_idx = 0;
778         /* Set tc and queue mapping with VSI */
779         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
780                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
781                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
782
783         /* Associate queue number with VSI */
784         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
785         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
786         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
787         info->valid_sections |=
788                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
789         /* Set the info.ingress_table and info.egress_table
790          * for UP translate table. Now just set it to 1:1 map by default
791          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
792          */
793 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
794         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
795         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
796         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
797         return 0;
798 }
799
800 static int
801 ice_init_mac_address(struct rte_eth_dev *dev)
802 {
803         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
804
805         if (!rte_is_unicast_ether_addr
806                 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
807                 PMD_INIT_LOG(ERR, "Invalid MAC address");
808                 return -EINVAL;
809         }
810
811         rte_ether_addr_copy(
812                 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
813                 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
814
815         dev->data->mac_addrs =
816                 rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0);
817         if (!dev->data->mac_addrs) {
818                 PMD_INIT_LOG(ERR,
819                              "Failed to allocate memory to store mac address");
820                 return -ENOMEM;
821         }
822         /* store it to dev data */
823         rte_ether_addr_copy(
824                 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
825                 &dev->data->mac_addrs[0]);
826         return 0;
827 }
828
829 /* Find out specific MAC filter */
830 static struct ice_mac_filter *
831 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
832 {
833         struct ice_mac_filter *f;
834
835         TAILQ_FOREACH(f, &vsi->mac_list, next) {
836                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
837                         return f;
838         }
839
840         return NULL;
841 }
842
843 static int
844 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
845 {
846         struct ice_fltr_list_entry *m_list_itr = NULL;
847         struct ice_mac_filter *f;
848         struct LIST_HEAD_TYPE list_head;
849         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
850         int ret = 0;
851
852         /* If it's added and configured, return */
853         f = ice_find_mac_filter(vsi, mac_addr);
854         if (f) {
855                 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
856                 return 0;
857         }
858
859         INIT_LIST_HEAD(&list_head);
860
861         m_list_itr = (struct ice_fltr_list_entry *)
862                 ice_malloc(hw, sizeof(*m_list_itr));
863         if (!m_list_itr) {
864                 ret = -ENOMEM;
865                 goto DONE;
866         }
867         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
868                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
869         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
870         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
871         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
872         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
873         m_list_itr->fltr_info.vsi_handle = vsi->idx;
874
875         LIST_ADD(&m_list_itr->list_entry, &list_head);
876
877         /* Add the mac */
878         ret = ice_add_mac(hw, &list_head);
879         if (ret != ICE_SUCCESS) {
880                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
881                 ret = -EINVAL;
882                 goto DONE;
883         }
884         /* Add the mac addr into mac list */
885         f = rte_zmalloc(NULL, sizeof(*f), 0);
886         if (!f) {
887                 PMD_DRV_LOG(ERR, "failed to allocate memory");
888                 ret = -ENOMEM;
889                 goto DONE;
890         }
891         rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
892         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
893         vsi->mac_num++;
894
895         ret = 0;
896
897 DONE:
898         rte_free(m_list_itr);
899         return ret;
900 }
901
902 static int
903 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
904 {
905         struct ice_fltr_list_entry *m_list_itr = NULL;
906         struct ice_mac_filter *f;
907         struct LIST_HEAD_TYPE list_head;
908         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
909         int ret = 0;
910
911         /* Can't find it, return an error */
912         f = ice_find_mac_filter(vsi, mac_addr);
913         if (!f)
914                 return -EINVAL;
915
916         INIT_LIST_HEAD(&list_head);
917
918         m_list_itr = (struct ice_fltr_list_entry *)
919                 ice_malloc(hw, sizeof(*m_list_itr));
920         if (!m_list_itr) {
921                 ret = -ENOMEM;
922                 goto DONE;
923         }
924         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
925                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
926         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
927         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
928         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
929         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
930         m_list_itr->fltr_info.vsi_handle = vsi->idx;
931
932         LIST_ADD(&m_list_itr->list_entry, &list_head);
933
934         /* remove the mac filter */
935         ret = ice_remove_mac(hw, &list_head);
936         if (ret != ICE_SUCCESS) {
937                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
938                 ret = -EINVAL;
939                 goto DONE;
940         }
941
942         /* Remove the mac addr from mac list */
943         TAILQ_REMOVE(&vsi->mac_list, f, next);
944         rte_free(f);
945         vsi->mac_num--;
946
947         ret = 0;
948 DONE:
949         rte_free(m_list_itr);
950         return ret;
951 }
952
953 /* Find out specific VLAN filter */
954 static struct ice_vlan_filter *
955 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
956 {
957         struct ice_vlan_filter *f;
958
959         TAILQ_FOREACH(f, &vsi->vlan_list, next) {
960                 if (vlan->tpid == f->vlan_info.vlan.tpid &&
961                     vlan->vid == f->vlan_info.vlan.vid)
962                         return f;
963         }
964
965         return NULL;
966 }
967
968 static int
969 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
970 {
971         struct ice_fltr_list_entry *v_list_itr = NULL;
972         struct ice_vlan_filter *f;
973         struct LIST_HEAD_TYPE list_head;
974         struct ice_hw *hw;
975         int ret = 0;
976
977         if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
978                 return -EINVAL;
979
980         hw = ICE_VSI_TO_HW(vsi);
981
982         /* If it's added and configured, return. */
983         f = ice_find_vlan_filter(vsi, vlan);
984         if (f) {
985                 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
986                 return 0;
987         }
988
989         if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
990                 return 0;
991
992         INIT_LIST_HEAD(&list_head);
993
994         v_list_itr = (struct ice_fltr_list_entry *)
995                       ice_malloc(hw, sizeof(*v_list_itr));
996         if (!v_list_itr) {
997                 ret = -ENOMEM;
998                 goto DONE;
999         }
1000         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1001         v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1002         v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1003         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1004         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1005         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1006         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1007         v_list_itr->fltr_info.vsi_handle = vsi->idx;
1008
1009         LIST_ADD(&v_list_itr->list_entry, &list_head);
1010
1011         /* Add the vlan */
1012         ret = ice_add_vlan(hw, &list_head);
1013         if (ret != ICE_SUCCESS) {
1014                 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1015                 ret = -EINVAL;
1016                 goto DONE;
1017         }
1018
1019         /* Add vlan into vlan list */
1020         f = rte_zmalloc(NULL, sizeof(*f), 0);
1021         if (!f) {
1022                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1023                 ret = -ENOMEM;
1024                 goto DONE;
1025         }
1026         f->vlan_info.vlan.tpid = vlan->tpid;
1027         f->vlan_info.vlan.vid = vlan->vid;
1028         TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1029         vsi->vlan_num++;
1030
1031         ret = 0;
1032
1033 DONE:
1034         rte_free(v_list_itr);
1035         return ret;
1036 }
1037
1038 static int
1039 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1040 {
1041         struct ice_fltr_list_entry *v_list_itr = NULL;
1042         struct ice_vlan_filter *f;
1043         struct LIST_HEAD_TYPE list_head;
1044         struct ice_hw *hw;
1045         int ret = 0;
1046
1047         if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1048                 return -EINVAL;
1049
1050         hw = ICE_VSI_TO_HW(vsi);
1051
1052         /* Can't find it, return an error */
1053         f = ice_find_vlan_filter(vsi, vlan);
1054         if (!f)
1055                 return -EINVAL;
1056
1057         INIT_LIST_HEAD(&list_head);
1058
1059         v_list_itr = (struct ice_fltr_list_entry *)
1060                       ice_malloc(hw, sizeof(*v_list_itr));
1061         if (!v_list_itr) {
1062                 ret = -ENOMEM;
1063                 goto DONE;
1064         }
1065
1066         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1067         v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1068         v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1069         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1070         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1071         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1072         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1073         v_list_itr->fltr_info.vsi_handle = vsi->idx;
1074
1075         LIST_ADD(&v_list_itr->list_entry, &list_head);
1076
1077         /* remove the vlan filter */
1078         ret = ice_remove_vlan(hw, &list_head);
1079         if (ret != ICE_SUCCESS) {
1080                 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1081                 ret = -EINVAL;
1082                 goto DONE;
1083         }
1084
1085         /* Remove the vlan id from vlan list */
1086         TAILQ_REMOVE(&vsi->vlan_list, f, next);
1087         rte_free(f);
1088         vsi->vlan_num--;
1089
1090         ret = 0;
1091 DONE:
1092         rte_free(v_list_itr);
1093         return ret;
1094 }
1095
1096 static int
1097 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1098 {
1099         struct ice_mac_filter *m_f;
1100         struct ice_vlan_filter *v_f;
1101         void *temp;
1102         int ret = 0;
1103
1104         if (!vsi || !vsi->mac_num)
1105                 return -EINVAL;
1106
1107         TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) {
1108                 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1109                 if (ret != ICE_SUCCESS) {
1110                         ret = -EINVAL;
1111                         goto DONE;
1112                 }
1113         }
1114
1115         if (vsi->vlan_num == 0)
1116                 return 0;
1117
1118         TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) {
1119                 ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan);
1120                 if (ret != ICE_SUCCESS) {
1121                         ret = -EINVAL;
1122                         goto DONE;
1123                 }
1124         }
1125
1126 DONE:
1127         return ret;
1128 }
1129
1130 /* Enable IRQ0 */
1131 static void
1132 ice_pf_enable_irq0(struct ice_hw *hw)
1133 {
1134         /* reset the registers */
1135         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1136         ICE_READ_REG(hw, PFINT_OICR);
1137
1138 #ifdef ICE_LSE_SPT
1139         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1140                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1141                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1142
1143         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1144                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1145                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1146                        PFINT_OICR_CTL_ITR_INDX_M) |
1147                       PFINT_OICR_CTL_CAUSE_ENA_M);
1148
1149         ICE_WRITE_REG(hw, PFINT_FW_CTL,
1150                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1151                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1152                        PFINT_FW_CTL_ITR_INDX_M) |
1153                       PFINT_FW_CTL_CAUSE_ENA_M);
1154 #else
1155         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1156 #endif
1157
1158         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1159                       GLINT_DYN_CTL_INTENA_M |
1160                       GLINT_DYN_CTL_CLEARPBA_M |
1161                       GLINT_DYN_CTL_ITR_INDX_M);
1162
1163         ice_flush(hw);
1164 }
1165
1166 /* Disable IRQ0 */
1167 static void
1168 ice_pf_disable_irq0(struct ice_hw *hw)
1169 {
1170         /* Disable all interrupt types */
1171         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1172         ice_flush(hw);
1173 }
1174
1175 #ifdef ICE_LSE_SPT
1176 static void
1177 ice_handle_aq_msg(struct rte_eth_dev *dev)
1178 {
1179         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1180         struct ice_ctl_q_info *cq = &hw->adminq;
1181         struct ice_rq_event_info event;
1182         uint16_t pending, opcode;
1183         int ret;
1184
1185         event.buf_len = ICE_AQ_MAX_BUF_LEN;
1186         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1187         if (!event.msg_buf) {
1188                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
1189                 return;
1190         }
1191
1192         pending = 1;
1193         while (pending) {
1194                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1195
1196                 if (ret != ICE_SUCCESS) {
1197                         PMD_DRV_LOG(INFO,
1198                                     "Failed to read msg from AdminQ, "
1199                                     "adminq_err: %u",
1200                                     hw->adminq.sq_last_status);
1201                         break;
1202                 }
1203                 opcode = rte_le_to_cpu_16(event.desc.opcode);
1204
1205                 switch (opcode) {
1206                 case ice_aqc_opc_get_link_status:
1207                         ret = ice_link_update(dev, 0);
1208                         if (!ret)
1209                                 rte_eth_dev_callback_process
1210                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1211                         break;
1212                 default:
1213                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1214                                     opcode);
1215                         break;
1216                 }
1217         }
1218         rte_free(event.msg_buf);
1219 }
1220 #endif
1221
1222 /**
1223  * Interrupt handler triggered by NIC for handling
1224  * specific interrupt.
1225  *
1226  * @param handle
1227  *  Pointer to interrupt handle.
1228  * @param param
1229  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1230  *
1231  * @return
1232  *  void
1233  */
1234 static void
1235 ice_interrupt_handler(void *param)
1236 {
1237         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1238         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1239         uint32_t oicr;
1240         uint32_t reg;
1241         uint8_t pf_num;
1242         uint8_t event;
1243         uint16_t queue;
1244         int ret;
1245 #ifdef ICE_LSE_SPT
1246         uint32_t int_fw_ctl;
1247 #endif
1248
1249         /* Disable interrupt */
1250         ice_pf_disable_irq0(hw);
1251
1252         /* read out interrupt causes */
1253         oicr = ICE_READ_REG(hw, PFINT_OICR);
1254 #ifdef ICE_LSE_SPT
1255         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1256 #endif
1257
1258         /* No interrupt event indicated */
1259         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1260                 PMD_DRV_LOG(INFO, "No interrupt event");
1261                 goto done;
1262         }
1263
1264 #ifdef ICE_LSE_SPT
1265         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1266                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1267                 ice_handle_aq_msg(dev);
1268         }
1269 #else
1270         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1271                 PMD_DRV_LOG(INFO, "OICR: link state change event");
1272                 ret = ice_link_update(dev, 0);
1273                 if (!ret)
1274                         rte_eth_dev_callback_process
1275                                 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1276         }
1277 #endif
1278
1279         if (oicr & PFINT_OICR_MAL_DETECT_M) {
1280                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1281                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1282                 if (reg & GL_MDET_TX_PQM_VALID_M) {
1283                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1284                                  GL_MDET_TX_PQM_PF_NUM_S;
1285                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1286                                 GL_MDET_TX_PQM_MAL_TYPE_S;
1287                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1288                                 GL_MDET_TX_PQM_QNUM_S;
1289
1290                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1291                                     "%d by PQM on TX queue %d PF# %d",
1292                                     event, queue, pf_num);
1293                 }
1294
1295                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1296                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1297                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1298                                  GL_MDET_TX_TCLAN_PF_NUM_S;
1299                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1300                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1301                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1302                                 GL_MDET_TX_TCLAN_QNUM_S;
1303
1304                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1305                                     "%d by TCLAN on TX queue %d PF# %d",
1306                                     event, queue, pf_num);
1307                 }
1308         }
1309 done:
1310         /* Enable interrupt */
1311         ice_pf_enable_irq0(hw);
1312         rte_intr_ack(dev->intr_handle);
1313 }
1314
1315 static void
1316 ice_init_proto_xtr(struct rte_eth_dev *dev)
1317 {
1318         struct ice_adapter *ad =
1319                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1320         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1321         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1322         const struct proto_xtr_ol_flag *ol_flag;
1323         bool proto_xtr_enable = false;
1324         int offset;
1325         uint16_t i;
1326
1327         pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1328         if (unlikely(pf->proto_xtr == NULL)) {
1329                 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1330                 return;
1331         }
1332
1333         for (i = 0; i < pf->lan_nb_qps; i++) {
1334                 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1335                                    ad->devargs.proto_xtr[i] :
1336                                    ad->devargs.proto_xtr_dflt;
1337
1338                 if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1339                         uint8_t type = pf->proto_xtr[i];
1340
1341                         ice_proto_xtr_ol_flag_params[type].required = true;
1342                         proto_xtr_enable = true;
1343                 }
1344         }
1345
1346         if (likely(!proto_xtr_enable))
1347                 return;
1348
1349         ice_check_proto_xtr_support(hw);
1350
1351         offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1352         if (unlikely(offset == -1)) {
1353                 PMD_DRV_LOG(ERR,
1354                             "Protocol extraction metadata is disabled in mbuf with error %d",
1355                             -rte_errno);
1356                 return;
1357         }
1358
1359         PMD_DRV_LOG(DEBUG,
1360                     "Protocol extraction metadata offset in mbuf is : %d",
1361                     offset);
1362         rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1363
1364         for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1365                 ol_flag = &ice_proto_xtr_ol_flag_params[i];
1366
1367                 if (!ol_flag->required)
1368                         continue;
1369
1370                 if (!ice_proto_xtr_hw_support[i]) {
1371                         PMD_DRV_LOG(ERR,
1372                                     "Protocol extraction type %u is not supported in hardware",
1373                                     i);
1374                         rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1375                         break;
1376                 }
1377
1378                 offset = rte_mbuf_dynflag_register(&ol_flag->param);
1379                 if (unlikely(offset == -1)) {
1380                         PMD_DRV_LOG(ERR,
1381                                     "Protocol extraction offload '%s' failed to register with error %d",
1382                                     ol_flag->param.name, -rte_errno);
1383
1384                         rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1385                         break;
1386                 }
1387
1388                 PMD_DRV_LOG(DEBUG,
1389                             "Protocol extraction offload '%s' offset in mbuf is : %d",
1390                             ol_flag->param.name, offset);
1391                 *ol_flag->ol_flag = 1ULL << offset;
1392         }
1393 }
1394
1395 /*  Initialize SW parameters of PF */
1396 static int
1397 ice_pf_sw_init(struct rte_eth_dev *dev)
1398 {
1399         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1400         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1401
1402         pf->lan_nb_qp_max =
1403                 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1404                                   hw->func_caps.common_cap.num_rxq);
1405
1406         pf->lan_nb_qps = pf->lan_nb_qp_max;
1407
1408         ice_init_proto_xtr(dev);
1409
1410         if (hw->func_caps.fd_fltr_guar > 0 ||
1411             hw->func_caps.fd_fltr_best_effort > 0) {
1412                 pf->flags |= ICE_FLAG_FDIR;
1413                 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1414                 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1415         } else {
1416                 pf->fdir_nb_qps = 0;
1417         }
1418         pf->fdir_qp_offset = 0;
1419
1420         return 0;
1421 }
1422
1423 struct ice_vsi *
1424 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1425 {
1426         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1427         struct ice_vsi *vsi = NULL;
1428         struct ice_vsi_ctx vsi_ctx;
1429         int ret;
1430         struct rte_ether_addr broadcast = {
1431                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1432         struct rte_ether_addr mac_addr;
1433         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1434         uint8_t tc_bitmap = 0x1;
1435         uint16_t cfg;
1436
1437         /* hw->num_lports = 1 in NIC mode */
1438         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1439         if (!vsi)
1440                 return NULL;
1441
1442         vsi->idx = pf->next_vsi_idx;
1443         pf->next_vsi_idx++;
1444         vsi->type = type;
1445         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1446         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1447         vsi->vlan_anti_spoof_on = 0;
1448         vsi->vlan_filter_on = 1;
1449         TAILQ_INIT(&vsi->mac_list);
1450         TAILQ_INIT(&vsi->vlan_list);
1451
1452         /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1453         pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1454                         ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1455                         hw->func_caps.common_cap.rss_table_size;
1456         pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1457
1458         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1459         switch (type) {
1460         case ICE_VSI_PF:
1461                 vsi->nb_qps = pf->lan_nb_qps;
1462                 vsi->base_queue = 1;
1463                 ice_vsi_config_default_rss(&vsi_ctx.info);
1464                 vsi_ctx.alloc_from_pool = true;
1465                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1466                 /* switch_id is queried by get_switch_config aq, which is done
1467                  * by ice_init_hw
1468                  */
1469                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1470                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1471                 /* Allow all untagged or tagged packets */
1472                 vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
1473                 vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
1474                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1475                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1476                 if (ice_is_dvm_ena(hw)) {
1477                         vsi_ctx.info.outer_vlan_flags =
1478                                 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
1479                                  ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
1480                                 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
1481                         vsi_ctx.info.outer_vlan_flags |=
1482                                 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
1483                                  ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
1484                                 ICE_AQ_VSI_OUTER_TAG_TYPE_M;
1485                 }
1486
1487                 /* FDIR */
1488                 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1489                         ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1490                 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1491                 cfg = ICE_AQ_VSI_FD_ENABLE;
1492                 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1493                 vsi_ctx.info.max_fd_fltr_dedicated =
1494                         rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1495                 vsi_ctx.info.max_fd_fltr_shared =
1496                         rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1497
1498                 /* Enable VLAN/UP trip */
1499                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1500                                                       &vsi_ctx.info,
1501                                                       ICE_DEFAULT_TCMAP);
1502                 if (ret) {
1503                         PMD_INIT_LOG(ERR,
1504                                      "tc queue mapping with vsi failed, "
1505                                      "err = %d",
1506                                      ret);
1507                         goto fail_mem;
1508                 }
1509
1510                 break;
1511         case ICE_VSI_CTRL:
1512                 vsi->nb_qps = pf->fdir_nb_qps;
1513                 vsi->base_queue = ICE_FDIR_QUEUE_ID;
1514                 vsi_ctx.alloc_from_pool = true;
1515                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1516
1517                 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1518                 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1519                 cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1520                 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1521                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1522                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1523                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1524                                                       &vsi_ctx.info,
1525                                                       ICE_DEFAULT_TCMAP);
1526                 if (ret) {
1527                         PMD_INIT_LOG(ERR,
1528                                      "tc queue mapping with vsi failed, "
1529                                      "err = %d",
1530                                      ret);
1531                         goto fail_mem;
1532                 }
1533                 break;
1534         default:
1535                 /* for other types of VSI */
1536                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1537                 goto fail_mem;
1538         }
1539
1540         /* VF has MSIX interrupt in VF range, don't allocate here */
1541         if (type == ICE_VSI_PF) {
1542                 ret = ice_res_pool_alloc(&pf->msix_pool,
1543                                          RTE_MIN(vsi->nb_qps,
1544                                                  RTE_MAX_RXTX_INTR_VEC_ID));
1545                 if (ret < 0) {
1546                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1547                                      vsi->vsi_id, ret);
1548                 }
1549                 vsi->msix_intr = ret;
1550                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1551         } else if (type == ICE_VSI_CTRL) {
1552                 ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1553                 if (ret < 0) {
1554                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1555                                     vsi->vsi_id, ret);
1556                 }
1557                 vsi->msix_intr = ret;
1558                 vsi->nb_msix = 1;
1559         } else {
1560                 vsi->msix_intr = 0;
1561                 vsi->nb_msix = 0;
1562         }
1563         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1564         if (ret != ICE_SUCCESS) {
1565                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1566                 goto fail_mem;
1567         }
1568         /* store vsi information is SW structure */
1569         vsi->vsi_id = vsi_ctx.vsi_num;
1570         vsi->info = vsi_ctx.info;
1571         pf->vsis_allocated = vsi_ctx.vsis_allocd;
1572         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1573
1574         if (type == ICE_VSI_PF) {
1575                 /* MAC configuration */
1576                 rte_ether_addr_copy((struct rte_ether_addr *)
1577                                         hw->port_info->mac.perm_addr,
1578                                     &pf->dev_addr);
1579
1580                 rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1581                 ret = ice_add_mac_filter(vsi, &mac_addr);
1582                 if (ret != ICE_SUCCESS)
1583                         PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1584
1585                 rte_ether_addr_copy(&broadcast, &mac_addr);
1586                 ret = ice_add_mac_filter(vsi, &mac_addr);
1587                 if (ret != ICE_SUCCESS)
1588                         PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1589         }
1590
1591         /* At the beginning, only TC0. */
1592         /* What we need here is the maximam number of the TX queues.
1593          * Currently vsi->nb_qps means it.
1594          * Correct it if any change.
1595          */
1596         max_txqs[0] = vsi->nb_qps;
1597         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1598                               tc_bitmap, max_txqs);
1599         if (ret != ICE_SUCCESS)
1600                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1601
1602         return vsi;
1603 fail_mem:
1604         rte_free(vsi);
1605         pf->next_vsi_idx--;
1606         return NULL;
1607 }
1608
1609 static int
1610 ice_send_driver_ver(struct ice_hw *hw)
1611 {
1612         struct ice_driver_ver dv;
1613
1614         /* we don't have driver version use 0 for dummy */
1615         dv.major_ver = 0;
1616         dv.minor_ver = 0;
1617         dv.build_ver = 0;
1618         dv.subbuild_ver = 0;
1619         strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1620
1621         return ice_aq_send_driver_ver(hw, &dv, NULL);
1622 }
1623
1624 static int
1625 ice_pf_setup(struct ice_pf *pf)
1626 {
1627         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1628         struct ice_vsi *vsi;
1629         uint16_t unused;
1630
1631         /* Clear all stats counters */
1632         pf->offset_loaded = false;
1633         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1634         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1635         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1636         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1637
1638         /* force guaranteed filter pool for PF */
1639         ice_alloc_fd_guar_item(hw, &unused,
1640                                hw->func_caps.fd_fltr_guar);
1641         /* force shared filter pool for PF */
1642         ice_alloc_fd_shrd_item(hw, &unused,
1643                                hw->func_caps.fd_fltr_best_effort);
1644
1645         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1646         if (!vsi) {
1647                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1648                 return -EINVAL;
1649         }
1650
1651         pf->main_vsi = vsi;
1652
1653         return 0;
1654 }
1655
1656 static enum ice_pkg_type
1657 ice_load_pkg_type(struct ice_hw *hw)
1658 {
1659         enum ice_pkg_type package_type;
1660
1661         /* store the activated package type (OS default or Comms) */
1662         if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1663                 ICE_PKG_NAME_SIZE))
1664                 package_type = ICE_PKG_TYPE_OS_DEFAULT;
1665         else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1666                 ICE_PKG_NAME_SIZE))
1667                 package_type = ICE_PKG_TYPE_COMMS;
1668         else
1669                 package_type = ICE_PKG_TYPE_UNKNOWN;
1670
1671         PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)",
1672                 hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1673                 hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1674                 hw->active_pkg_name,
1675                 ice_is_dvm_ena(hw) ? "double" : "single");
1676
1677         return package_type;
1678 }
1679
1680 int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn)
1681 {
1682         struct ice_hw *hw = &adapter->hw;
1683         char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1684         char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1685         void *buf;
1686         size_t bufsz;
1687         int err;
1688
1689         if (!use_dsn)
1690                 goto no_dsn;
1691
1692         memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1693         snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1694                 "ice-%016" PRIx64 ".pkg", dsn);
1695         strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1696                 ICE_MAX_PKG_FILENAME_SIZE);
1697         strcat(pkg_file, opt_ddp_filename);
1698         if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1699                 goto load_fw;
1700
1701         strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1702                 ICE_MAX_PKG_FILENAME_SIZE);
1703         strcat(pkg_file, opt_ddp_filename);
1704         if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1705                 goto load_fw;
1706
1707 no_dsn:
1708         strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1709         if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1710                 goto load_fw;
1711
1712         strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1713         if (rte_firmware_read(pkg_file, &buf, &bufsz) < 0) {
1714                 PMD_INIT_LOG(ERR, "failed to search file path\n");
1715                 return -1;
1716         }
1717
1718 load_fw:
1719         PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_file);
1720
1721         err = ice_copy_and_init_pkg(hw, buf, bufsz);
1722         if (err) {
1723                 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1724                 goto out;
1725         }
1726
1727         /* store the loaded pkg type info */
1728         adapter->active_pkg_type = ice_load_pkg_type(hw);
1729
1730 out:
1731         free(buf);
1732         return err;
1733 }
1734
1735 static void
1736 ice_base_queue_get(struct ice_pf *pf)
1737 {
1738         uint32_t reg;
1739         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1740
1741         reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1742         if (reg & PFLAN_RX_QALLOC_VALID_M) {
1743                 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1744         } else {
1745                 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1746                                         " index");
1747         }
1748 }
1749
1750 static int
1751 parse_bool(const char *key, const char *value, void *args)
1752 {
1753         int *i = (int *)args;
1754         char *end;
1755         int num;
1756
1757         num = strtoul(value, &end, 10);
1758
1759         if (num != 0 && num != 1) {
1760                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1761                         "value must be 0 or 1",
1762                         value, key);
1763                 return -1;
1764         }
1765
1766         *i = num;
1767         return 0;
1768 }
1769
1770 static int
1771 parse_u64(const char *key, const char *value, void *args)
1772 {
1773         u64 *num = (u64 *)args;
1774         u64 tmp;
1775
1776         errno = 0;
1777         tmp = strtoull(value, NULL, 16);
1778         if (errno) {
1779                 PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u64",
1780                             key, value);
1781                 return -1;
1782         }
1783
1784         *num = tmp;
1785
1786         return 0;
1787 }
1788
1789 static int ice_parse_devargs(struct rte_eth_dev *dev)
1790 {
1791         struct ice_adapter *ad =
1792                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1793         struct rte_devargs *devargs = dev->device->devargs;
1794         struct rte_kvargs *kvlist;
1795         int ret;
1796
1797         if (devargs == NULL)
1798                 return 0;
1799
1800         kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1801         if (kvlist == NULL) {
1802                 PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1803                 return -EINVAL;
1804         }
1805
1806         ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1807         memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1808                sizeof(ad->devargs.proto_xtr));
1809
1810         ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1811                                  &handle_proto_xtr_arg, &ad->devargs);
1812         if (ret)
1813                 goto bail;
1814
1815         ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1816                                  &parse_bool, &ad->devargs.safe_mode_support);
1817         if (ret)
1818                 goto bail;
1819
1820         ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1821                                  &parse_bool, &ad->devargs.pipe_mode_support);
1822         if (ret)
1823                 goto bail;
1824
1825         ret = rte_kvargs_process(kvlist, ICE_HW_DEBUG_MASK_ARG,
1826                                  &parse_u64, &ad->hw.debug_mask);
1827         if (ret)
1828                 goto bail;
1829
1830 bail:
1831         rte_kvargs_free(kvlist);
1832         return ret;
1833 }
1834
1835 /* Forward LLDP packets to default VSI by set switch rules */
1836 static int
1837 ice_vsi_config_sw_lldp(struct ice_vsi *vsi,  bool on)
1838 {
1839         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1840         struct ice_fltr_list_entry *s_list_itr = NULL;
1841         struct LIST_HEAD_TYPE list_head;
1842         int ret = 0;
1843
1844         INIT_LIST_HEAD(&list_head);
1845
1846         s_list_itr = (struct ice_fltr_list_entry *)
1847                         ice_malloc(hw, sizeof(*s_list_itr));
1848         if (!s_list_itr)
1849                 return -ENOMEM;
1850         s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
1851         s_list_itr->fltr_info.vsi_handle = vsi->idx;
1852         s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
1853                         RTE_ETHER_TYPE_LLDP;
1854         s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1855         s_list_itr->fltr_info.flag = ICE_FLTR_RX;
1856         s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
1857         LIST_ADD(&s_list_itr->list_entry, &list_head);
1858         if (on)
1859                 ret = ice_add_eth_mac(hw, &list_head);
1860         else
1861                 ret = ice_remove_eth_mac(hw, &list_head);
1862
1863         rte_free(s_list_itr);
1864         return ret;
1865 }
1866
1867 static enum ice_status
1868 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
1869                 uint16_t num, uint16_t desc_id,
1870                 uint16_t *prof_buf, uint16_t *num_prof)
1871 {
1872         struct ice_aqc_res_elem *resp_buf;
1873         int ret;
1874         uint16_t buf_len;
1875         bool res_shared = 1;
1876         struct ice_aq_desc aq_desc;
1877         struct ice_sq_cd *cd = NULL;
1878         struct ice_aqc_get_allocd_res_desc *cmd =
1879                         &aq_desc.params.get_res_desc;
1880
1881         buf_len = sizeof(*resp_buf) * num;
1882         resp_buf = ice_malloc(hw, buf_len);
1883         if (!resp_buf)
1884                 return -ENOMEM;
1885
1886         ice_fill_dflt_direct_cmd_desc(&aq_desc,
1887                         ice_aqc_opc_get_allocd_res_desc);
1888
1889         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
1890                                 ICE_AQC_RES_TYPE_M) | (res_shared ?
1891                                 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
1892         cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
1893
1894         ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
1895         if (!ret)
1896                 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
1897         else
1898                 goto exit;
1899
1900         ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
1901                         (*num_prof), ICE_NONDMA_TO_NONDMA);
1902
1903 exit:
1904         rte_free(resp_buf);
1905         return ret;
1906 }
1907 static int
1908 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
1909 {
1910         int ret;
1911         uint16_t prof_id;
1912         uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
1913         uint16_t first_desc = 1;
1914         uint16_t num_prof = 0;
1915
1916         ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
1917                         first_desc, prof_buf, &num_prof);
1918         if (ret) {
1919                 PMD_INIT_LOG(ERR, "Failed to get fxp resource");
1920                 return ret;
1921         }
1922
1923         for (prof_id = 0; prof_id < num_prof; prof_id++) {
1924                 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
1925                 if (ret) {
1926                         PMD_INIT_LOG(ERR, "Failed to free fxp resource");
1927                         return ret;
1928                 }
1929         }
1930         return 0;
1931 }
1932
1933 static int
1934 ice_reset_fxp_resource(struct ice_hw *hw)
1935 {
1936         int ret;
1937
1938         ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
1939         if (ret) {
1940                 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
1941                 return ret;
1942         }
1943
1944         ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
1945         if (ret) {
1946                 PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
1947                 return ret;
1948         }
1949
1950         return 0;
1951 }
1952
1953 static void
1954 ice_rss_ctx_init(struct ice_pf *pf)
1955 {
1956         memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
1957 }
1958
1959 static uint64_t
1960 ice_get_supported_rxdid(struct ice_hw *hw)
1961 {
1962         uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
1963         uint32_t regval;
1964         int i;
1965
1966         supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
1967
1968         for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
1969                 regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
1970                 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
1971                         & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
1972                         supported_rxdid |= BIT(i);
1973         }
1974         return supported_rxdid;
1975 }
1976
1977 static int
1978 ice_dev_init(struct rte_eth_dev *dev)
1979 {
1980         struct rte_pci_device *pci_dev;
1981         struct rte_intr_handle *intr_handle;
1982         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1983         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1984         struct ice_adapter *ad =
1985                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1986         struct ice_vsi *vsi;
1987         int ret;
1988 #ifndef RTE_EXEC_ENV_WINDOWS
1989         off_t pos;
1990         uint32_t dsn_low, dsn_high;
1991         uint64_t dsn;
1992         bool use_dsn;
1993 #endif
1994
1995         dev->dev_ops = &ice_eth_dev_ops;
1996         dev->rx_queue_count = ice_rx_queue_count;
1997         dev->rx_descriptor_status = ice_rx_descriptor_status;
1998         dev->tx_descriptor_status = ice_tx_descriptor_status;
1999         dev->rx_pkt_burst = ice_recv_pkts;
2000         dev->tx_pkt_burst = ice_xmit_pkts;
2001         dev->tx_pkt_prepare = ice_prep_pkts;
2002
2003         /* for secondary processes, we don't initialise any further as primary
2004          * has already done this work.
2005          */
2006         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2007                 ice_set_rx_function(dev);
2008                 ice_set_tx_function(dev);
2009                 return 0;
2010         }
2011
2012         dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2013
2014         ice_set_default_ptype_table(dev);
2015         pci_dev = RTE_DEV_TO_PCI(dev->device);
2016         intr_handle = &pci_dev->intr_handle;
2017
2018         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2019         pf->dev_data = dev->data;
2020         hw->back = pf->adapter;
2021         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2022         hw->vendor_id = pci_dev->id.vendor_id;
2023         hw->device_id = pci_dev->id.device_id;
2024         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2025         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2026         hw->bus.device = pci_dev->addr.devid;
2027         hw->bus.func = pci_dev->addr.function;
2028
2029         ret = ice_parse_devargs(dev);
2030         if (ret) {
2031                 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2032                 return -EINVAL;
2033         }
2034
2035         ice_init_controlq_parameter(hw);
2036
2037         ret = ice_init_hw(hw);
2038         if (ret) {
2039                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
2040                 return -EINVAL;
2041         }
2042
2043 #ifndef RTE_EXEC_ENV_WINDOWS
2044         use_dsn = false;
2045         dsn = 0;
2046         pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
2047         if (pos) {
2048                 if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0 ||
2049                                 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) {
2050                         PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
2051                 } else {
2052                         use_dsn = true;
2053                         dsn = (uint64_t)dsn_high << 32 | dsn_low;
2054                 }
2055         } else {
2056                 PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
2057         }
2058
2059         ret = ice_load_pkg(pf->adapter, use_dsn, dsn);
2060         if (ret == 0) {
2061                 ret = ice_init_hw_tbls(hw);
2062                 if (ret) {
2063                         PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", ret);
2064                         rte_free(hw->pkg_copy);
2065                 }
2066         }
2067
2068         if (ret) {
2069                 if (ad->devargs.safe_mode_support == 0) {
2070                         PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2071                                         "Use safe-mode-support=1 to enter Safe Mode");
2072                         goto err_init_fw;
2073                 }
2074
2075                 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2076                                         "Entering Safe Mode");
2077                 ad->is_safe_mode = 1;
2078         }
2079 #endif
2080
2081         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2082                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2083                      hw->api_maj_ver, hw->api_min_ver);
2084
2085         ice_pf_sw_init(dev);
2086         ret = ice_init_mac_address(dev);
2087         if (ret) {
2088                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2089                 goto err_init_mac;
2090         }
2091
2092         ret = ice_res_pool_init(&pf->msix_pool, 1,
2093                                 hw->func_caps.common_cap.num_msix_vectors - 1);
2094         if (ret) {
2095                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2096                 goto err_msix_pool_init;
2097         }
2098
2099         ret = ice_pf_setup(pf);
2100         if (ret) {
2101                 PMD_INIT_LOG(ERR, "Failed to setup PF");
2102                 goto err_pf_setup;
2103         }
2104
2105         ret = ice_send_driver_ver(hw);
2106         if (ret) {
2107                 PMD_INIT_LOG(ERR, "Failed to send driver version");
2108                 goto err_pf_setup;
2109         }
2110
2111         vsi = pf->main_vsi;
2112
2113         ret = ice_aq_stop_lldp(hw, true, false, NULL);
2114         if (ret != ICE_SUCCESS)
2115                 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2116         ret = ice_init_dcb(hw, true);
2117         if (ret != ICE_SUCCESS)
2118                 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2119         /* Forward LLDP packets to default VSI */
2120         ret = ice_vsi_config_sw_lldp(vsi, true);
2121         if (ret != ICE_SUCCESS)
2122                 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2123         /* register callback func to eal lib */
2124         rte_intr_callback_register(intr_handle,
2125                                    ice_interrupt_handler, dev);
2126
2127         ice_pf_enable_irq0(hw);
2128
2129         /* enable uio intr after callback register */
2130         rte_intr_enable(intr_handle);
2131
2132         /* get base queue pairs index  in the device */
2133         ice_base_queue_get(pf);
2134
2135         /* Initialize RSS context for gtpu_eh */
2136         ice_rss_ctx_init(pf);
2137
2138         if (!ad->is_safe_mode) {
2139                 ret = ice_flow_init(ad);
2140                 if (ret) {
2141                         PMD_INIT_LOG(ERR, "Failed to initialize flow");
2142                         return ret;
2143                 }
2144         }
2145
2146         ret = ice_reset_fxp_resource(hw);
2147         if (ret) {
2148                 PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2149                 return ret;
2150         }
2151
2152         pf->supported_rxdid = ice_get_supported_rxdid(hw);
2153
2154         return 0;
2155
2156 err_pf_setup:
2157         ice_res_pool_destroy(&pf->msix_pool);
2158 err_msix_pool_init:
2159         rte_free(dev->data->mac_addrs);
2160         dev->data->mac_addrs = NULL;
2161 err_init_mac:
2162         rte_free(pf->proto_xtr);
2163 #ifndef RTE_EXEC_ENV_WINDOWS
2164 err_init_fw:
2165 #endif
2166         ice_deinit_hw(hw);
2167
2168         return ret;
2169 }
2170
2171 int
2172 ice_release_vsi(struct ice_vsi *vsi)
2173 {
2174         struct ice_hw *hw;
2175         struct ice_vsi_ctx vsi_ctx;
2176         enum ice_status ret;
2177         int error = 0;
2178
2179         if (!vsi)
2180                 return error;
2181
2182         hw = ICE_VSI_TO_HW(vsi);
2183
2184         ice_remove_all_mac_vlan_filters(vsi);
2185
2186         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2187
2188         vsi_ctx.vsi_num = vsi->vsi_id;
2189         vsi_ctx.info = vsi->info;
2190         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2191         if (ret != ICE_SUCCESS) {
2192                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2193                 error = -1;
2194         }
2195
2196         rte_free(vsi->rss_lut);
2197         rte_free(vsi->rss_key);
2198         rte_free(vsi);
2199         return error;
2200 }
2201
2202 void
2203 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2204 {
2205         struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
2206         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2207         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2208         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2209         uint16_t msix_intr, i;
2210
2211         /* disable interrupt and also clear all the exist config */
2212         for (i = 0; i < vsi->nb_qps; i++) {
2213                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2214                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2215                 rte_wmb();
2216         }
2217
2218         if (rte_intr_allow_others(intr_handle))
2219                 /* vfio-pci */
2220                 for (i = 0; i < vsi->nb_msix; i++) {
2221                         msix_intr = vsi->msix_intr + i;
2222                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2223                                       GLINT_DYN_CTL_WB_ON_ITR_M);
2224                 }
2225         else
2226                 /* igb_uio */
2227                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2228 }
2229
2230 static int
2231 ice_dev_stop(struct rte_eth_dev *dev)
2232 {
2233         struct rte_eth_dev_data *data = dev->data;
2234         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2235         struct ice_vsi *main_vsi = pf->main_vsi;
2236         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2237         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2238         uint16_t i;
2239
2240         /* avoid stopping again */
2241         if (pf->adapter_stopped)
2242                 return 0;
2243
2244         /* stop and clear all Rx queues */
2245         for (i = 0; i < data->nb_rx_queues; i++)
2246                 ice_rx_queue_stop(dev, i);
2247
2248         /* stop and clear all Tx queues */
2249         for (i = 0; i < data->nb_tx_queues; i++)
2250                 ice_tx_queue_stop(dev, i);
2251
2252         /* disable all queue interrupts */
2253         ice_vsi_disable_queues_intr(main_vsi);
2254
2255         if (pf->init_link_up)
2256                 ice_dev_set_link_up(dev);
2257         else
2258                 ice_dev_set_link_down(dev);
2259
2260         /* Clean datapath event and queue/vec mapping */
2261         rte_intr_efd_disable(intr_handle);
2262         if (intr_handle->intr_vec) {
2263                 rte_free(intr_handle->intr_vec);
2264                 intr_handle->intr_vec = NULL;
2265         }
2266
2267         pf->adapter_stopped = true;
2268         dev->data->dev_started = 0;
2269
2270         return 0;
2271 }
2272
2273 static int
2274 ice_dev_close(struct rte_eth_dev *dev)
2275 {
2276         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2277         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2278         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2279         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2280         struct ice_adapter *ad =
2281                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2282         int ret;
2283
2284         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2285                 return 0;
2286
2287         /* Since stop will make link down, then the link event will be
2288          * triggered, disable the irq firstly to avoid the port_infoe etc
2289          * resources deallocation causing the interrupt service thread
2290          * crash.
2291          */
2292         ice_pf_disable_irq0(hw);
2293
2294         ret = ice_dev_stop(dev);
2295
2296         if (!ad->is_safe_mode)
2297                 ice_flow_uninit(ad);
2298
2299         /* release all queue resource */
2300         ice_free_queues(dev);
2301
2302         ice_res_pool_destroy(&pf->msix_pool);
2303         ice_release_vsi(pf->main_vsi);
2304         ice_sched_cleanup_all(hw);
2305         ice_free_hw_tbls(hw);
2306         rte_free(hw->port_info);
2307         hw->port_info = NULL;
2308         ice_shutdown_all_ctrlq(hw);
2309         rte_free(pf->proto_xtr);
2310         pf->proto_xtr = NULL;
2311
2312         /* disable uio intr before callback unregister */
2313         rte_intr_disable(intr_handle);
2314
2315         /* unregister callback func from eal lib */
2316         rte_intr_callback_unregister(intr_handle,
2317                                      ice_interrupt_handler, dev);
2318
2319         return ret;
2320 }
2321
2322 static int
2323 ice_dev_uninit(struct rte_eth_dev *dev)
2324 {
2325         ice_dev_close(dev);
2326
2327         return 0;
2328 }
2329
2330 static bool
2331 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
2332 {
2333         return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false;
2334 }
2335
2336 static void
2337 hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
2338 {
2339         cfg->hash_flds = 0;
2340         cfg->addl_hdrs = 0;
2341         cfg->symm = 0;
2342         cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
2343 }
2344
2345 static int
2346 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2347 {
2348         enum ice_status status = ICE_SUCCESS;
2349         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2350         struct ice_vsi *vsi = pf->main_vsi;
2351
2352         if (!is_hash_cfg_valid(cfg))
2353                 return -ENOENT;
2354
2355         status = ice_rem_rss_cfg(hw, vsi->idx, cfg);
2356         if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2357                 PMD_DRV_LOG(ERR,
2358                             "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
2359                             vsi->idx, status);
2360                 return -EBUSY;
2361         }
2362
2363         return 0;
2364 }
2365
2366 static int
2367 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2368 {
2369         enum ice_status status = ICE_SUCCESS;
2370         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2371         struct ice_vsi *vsi = pf->main_vsi;
2372
2373         if (!is_hash_cfg_valid(cfg))
2374                 return -ENOENT;
2375
2376         status = ice_add_rss_cfg(hw, vsi->idx, cfg);
2377         if (status) {
2378                 PMD_DRV_LOG(ERR,
2379                             "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
2380                             vsi->idx, status);
2381                 return -EBUSY;
2382         }
2383
2384         return 0;
2385 }
2386
2387 static int
2388 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2389 {
2390         int ret;
2391
2392         ret = ice_hash_moveout(pf, cfg);
2393         if (ret && (ret != -ENOENT))
2394                 return ret;
2395
2396         hash_cfg_reset(cfg);
2397
2398         return 0;
2399 }
2400
2401 static int
2402 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2403                          u8 ctx_idx)
2404 {
2405         int ret;
2406
2407         switch (ctx_idx) {
2408         case ICE_HASH_GTPU_CTX_EH_IP:
2409                 ret = ice_hash_remove(pf,
2410                                       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2411                 if (ret && (ret != -ENOENT))
2412                         return ret;
2413
2414                 ret = ice_hash_remove(pf,
2415                                       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2416                 if (ret && (ret != -ENOENT))
2417                         return ret;
2418
2419                 ret = ice_hash_remove(pf,
2420                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2421                 if (ret && (ret != -ENOENT))
2422                         return ret;
2423
2424                 ret = ice_hash_remove(pf,
2425                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2426                 if (ret && (ret != -ENOENT))
2427                         return ret;
2428
2429                 ret = ice_hash_remove(pf,
2430                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2431                 if (ret && (ret != -ENOENT))
2432                         return ret;
2433
2434                 ret = ice_hash_remove(pf,
2435                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2436                 if (ret && (ret != -ENOENT))
2437                         return ret;
2438
2439                 ret = ice_hash_remove(pf,
2440                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2441                 if (ret && (ret != -ENOENT))
2442                         return ret;
2443
2444                 ret = ice_hash_remove(pf,
2445                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2446                 if (ret && (ret != -ENOENT))
2447                         return ret;
2448
2449                 break;
2450         case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2451                 ret = ice_hash_remove(pf,
2452                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2453                 if (ret && (ret != -ENOENT))
2454                         return ret;
2455
2456                 ret = ice_hash_remove(pf,
2457                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2458                 if (ret && (ret != -ENOENT))
2459                         return ret;
2460
2461                 ret = ice_hash_moveout(pf,
2462                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2463                 if (ret && (ret != -ENOENT))
2464                         return ret;
2465
2466                 ret = ice_hash_moveout(pf,
2467                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2468                 if (ret && (ret != -ENOENT))
2469                         return ret;
2470
2471                 ret = ice_hash_moveout(pf,
2472                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2473                 if (ret && (ret != -ENOENT))
2474                         return ret;
2475
2476                 ret = ice_hash_moveout(pf,
2477                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2478                 if (ret && (ret != -ENOENT))
2479                         return ret;
2480
2481                 break;
2482         case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2483                 ret = ice_hash_remove(pf,
2484                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2485                 if (ret && (ret != -ENOENT))
2486                         return ret;
2487
2488                 ret = ice_hash_remove(pf,
2489                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2490                 if (ret && (ret != -ENOENT))
2491                         return ret;
2492
2493                 ret = ice_hash_moveout(pf,
2494                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2495                 if (ret && (ret != -ENOENT))
2496                         return ret;
2497
2498                 ret = ice_hash_moveout(pf,
2499                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2500                 if (ret && (ret != -ENOENT))
2501                         return ret;
2502
2503                 ret = ice_hash_moveout(pf,
2504                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2505                 if (ret && (ret != -ENOENT))
2506                         return ret;
2507
2508                 ret = ice_hash_moveout(pf,
2509                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2510                 if (ret && (ret != -ENOENT))
2511                         return ret;
2512
2513                 break;
2514         case ICE_HASH_GTPU_CTX_UP_IP:
2515                 ret = ice_hash_remove(pf,
2516                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2517                 if (ret && (ret != -ENOENT))
2518                         return ret;
2519
2520                 ret = ice_hash_remove(pf,
2521                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2522                 if (ret && (ret != -ENOENT))
2523                         return ret;
2524
2525                 ret = ice_hash_moveout(pf,
2526                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2527                 if (ret && (ret != -ENOENT))
2528                         return ret;
2529
2530                 ret = ice_hash_moveout(pf,
2531                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2532                 if (ret && (ret != -ENOENT))
2533                         return ret;
2534
2535                 ret = ice_hash_moveout(pf,
2536                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2537                 if (ret && (ret != -ENOENT))
2538                         return ret;
2539
2540                 break;
2541         case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2542         case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2543                 ret = ice_hash_moveout(pf,
2544                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2545                 if (ret && (ret != -ENOENT))
2546                         return ret;
2547
2548                 ret = ice_hash_moveout(pf,
2549                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2550                 if (ret && (ret != -ENOENT))
2551                         return ret;
2552
2553                 ret = ice_hash_moveout(pf,
2554                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2555                 if (ret && (ret != -ENOENT))
2556                         return ret;
2557
2558                 break;
2559         case ICE_HASH_GTPU_CTX_DW_IP:
2560                 ret = ice_hash_remove(pf,
2561                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2562                 if (ret && (ret != -ENOENT))
2563                         return ret;
2564
2565                 ret = ice_hash_remove(pf,
2566                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2567                 if (ret && (ret != -ENOENT))
2568                         return ret;
2569
2570                 ret = ice_hash_moveout(pf,
2571                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2572                 if (ret && (ret != -ENOENT))
2573                         return ret;
2574
2575                 ret = ice_hash_moveout(pf,
2576                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2577                 if (ret && (ret != -ENOENT))
2578                         return ret;
2579
2580                 ret = ice_hash_moveout(pf,
2581                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2582                 if (ret && (ret != -ENOENT))
2583                         return ret;
2584
2585                 break;
2586         case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2587         case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2588                 ret = ice_hash_moveout(pf,
2589                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2590                 if (ret && (ret != -ENOENT))
2591                         return ret;
2592
2593                 ret = ice_hash_moveout(pf,
2594                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2595                 if (ret && (ret != -ENOENT))
2596                         return ret;
2597
2598                 ret = ice_hash_moveout(pf,
2599                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2600                 if (ret && (ret != -ENOENT))
2601                         return ret;
2602
2603                 break;
2604         default:
2605                 break;
2606         }
2607
2608         return 0;
2609 }
2610
2611 static u8 calc_gtpu_ctx_idx(uint32_t hdr)
2612 {
2613         u8 eh_idx, ip_idx;
2614
2615         if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
2616                 eh_idx = 0;
2617         else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
2618                 eh_idx = 1;
2619         else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
2620                 eh_idx = 2;
2621         else
2622                 return ICE_HASH_GTPU_CTX_MAX;
2623
2624         ip_idx = 0;
2625         if (hdr & ICE_FLOW_SEG_HDR_UDP)
2626                 ip_idx = 1;
2627         else if (hdr & ICE_FLOW_SEG_HDR_TCP)
2628                 ip_idx = 2;
2629
2630         if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
2631                 return eh_idx * 3 + ip_idx;
2632         else
2633                 return ICE_HASH_GTPU_CTX_MAX;
2634 }
2635
2636 static int
2637 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2638 {
2639         u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2640
2641         if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2642                 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
2643                                                 gtpu_ctx_idx);
2644         else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2645                 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
2646                                                 gtpu_ctx_idx);
2647
2648         return 0;
2649 }
2650
2651 static int
2652 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2653                           u8 ctx_idx, struct ice_rss_hash_cfg *cfg)
2654 {
2655         int ret;
2656
2657         if (ctx_idx < ICE_HASH_GTPU_CTX_MAX)
2658                 ctx->ctx[ctx_idx] = *cfg;
2659
2660         switch (ctx_idx) {
2661         case ICE_HASH_GTPU_CTX_EH_IP:
2662                 break;
2663         case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2664                 ret = ice_hash_moveback(pf,
2665                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2666                 if (ret && (ret != -ENOENT))
2667                         return ret;
2668
2669                 ret = ice_hash_moveback(pf,
2670                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2671                 if (ret && (ret != -ENOENT))
2672                         return ret;
2673
2674                 ret = ice_hash_moveback(pf,
2675                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2676                 if (ret && (ret != -ENOENT))
2677                         return ret;
2678
2679                 ret = ice_hash_moveback(pf,
2680                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2681                 if (ret && (ret != -ENOENT))
2682                         return ret;
2683
2684                 break;
2685         case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2686                 ret = ice_hash_moveback(pf,
2687                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2688                 if (ret && (ret != -ENOENT))
2689                         return ret;
2690
2691                 ret = ice_hash_moveback(pf,
2692                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2693                 if (ret && (ret != -ENOENT))
2694                         return ret;
2695
2696                 ret = ice_hash_moveback(pf,
2697                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2698                 if (ret && (ret != -ENOENT))
2699                         return ret;
2700
2701                 ret = ice_hash_moveback(pf,
2702                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2703                 if (ret && (ret != -ENOENT))
2704                         return ret;
2705
2706                 break;
2707         case ICE_HASH_GTPU_CTX_UP_IP:
2708         case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2709         case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2710         case ICE_HASH_GTPU_CTX_DW_IP:
2711         case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2712         case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2713                 ret = ice_hash_moveback(pf,
2714                                         &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2715                 if (ret && (ret != -ENOENT))
2716                         return ret;
2717
2718                 ret = ice_hash_moveback(pf,
2719                                         &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2720                 if (ret && (ret != -ENOENT))
2721                         return ret;
2722
2723                 ret = ice_hash_moveback(pf,
2724                                         &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2725                 if (ret && (ret != -ENOENT))
2726                         return ret;
2727
2728                 break;
2729         default:
2730                 break;
2731         }
2732
2733         return 0;
2734 }
2735
2736 static int
2737 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2738 {
2739         u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs);
2740
2741         if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
2742                 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4,
2743                                                  gtpu_ctx_idx, cfg);
2744         else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
2745                 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6,
2746                                                  gtpu_ctx_idx, cfg);
2747
2748         return 0;
2749 }
2750
2751 static void
2752 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2753 {
2754         u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2755
2756         if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
2757                 return;
2758
2759         if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2760                 hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
2761         else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2762                 hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
2763 }
2764
2765 int
2766 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2767                      struct ice_rss_hash_cfg *cfg)
2768 {
2769         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2770         int ret;
2771
2772         ret = ice_rem_rss_cfg(hw, vsi_id, cfg);
2773         if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2774                 PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2775
2776         ice_rem_rss_cfg_post(pf, cfg->addl_hdrs);
2777
2778         return 0;
2779 }
2780
2781 int
2782 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2783                      struct ice_rss_hash_cfg *cfg)
2784 {
2785         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2786         int ret;
2787
2788         ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs);
2789         if (ret)
2790                 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2791
2792         ret = ice_add_rss_cfg(hw, vsi_id, cfg);
2793         if (ret)
2794                 PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2795
2796         ret = ice_add_rss_cfg_post(pf, cfg);
2797         if (ret)
2798                 PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2799
2800         return 0;
2801 }
2802
2803 static void
2804 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2805 {
2806         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2807         struct ice_vsi *vsi = pf->main_vsi;
2808         struct ice_rss_hash_cfg cfg;
2809         int ret;
2810
2811 #define ICE_RSS_HF_ALL ( \
2812         ETH_RSS_IPV4 | \
2813         ETH_RSS_IPV6 | \
2814         ETH_RSS_NONFRAG_IPV4_UDP | \
2815         ETH_RSS_NONFRAG_IPV6_UDP | \
2816         ETH_RSS_NONFRAG_IPV4_TCP | \
2817         ETH_RSS_NONFRAG_IPV6_TCP | \
2818         ETH_RSS_NONFRAG_IPV4_SCTP | \
2819         ETH_RSS_NONFRAG_IPV6_SCTP | \
2820         ETH_RSS_FRAG_IPV4 | \
2821         ETH_RSS_FRAG_IPV6)
2822
2823         ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
2824         if (ret)
2825                 PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d",
2826                             __func__, ret);
2827
2828         cfg.symm = 0;
2829         cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
2830         /* Configure RSS for IPv4 with src/dst addr as input set */
2831         if (rss_hf & ETH_RSS_IPV4) {
2832                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2833                 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2834                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2835                 if (ret)
2836                         PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
2837                                     __func__, ret);
2838         }
2839
2840         /* Configure RSS for IPv6 with src/dst addr as input set */
2841         if (rss_hf & ETH_RSS_IPV6) {
2842                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2843                 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2844                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2845                 if (ret)
2846                         PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
2847                                     __func__, ret);
2848         }
2849
2850         /* Configure RSS for udp4 with src/dst addr and port as input set */
2851         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2852                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
2853                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2854                 cfg.hash_flds = ICE_HASH_UDP_IPV4;
2855                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2856                 if (ret)
2857                         PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
2858                                     __func__, ret);
2859         }
2860
2861         /* Configure RSS for udp6 with src/dst addr and port as input set */
2862         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2863                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
2864                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2865                 cfg.hash_flds = ICE_HASH_UDP_IPV6;
2866                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2867                 if (ret)
2868                         PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
2869                                     __func__, ret);
2870         }
2871
2872         /* Configure RSS for tcp4 with src/dst addr and port as input set */
2873         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2874                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
2875                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2876                 cfg.hash_flds = ICE_HASH_TCP_IPV4;
2877                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2878                 if (ret)
2879                         PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
2880                                     __func__, ret);
2881         }
2882
2883         /* Configure RSS for tcp6 with src/dst addr and port as input set */
2884         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2885                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
2886                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2887                 cfg.hash_flds = ICE_HASH_TCP_IPV6;
2888                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2889                 if (ret)
2890                         PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
2891                                     __func__, ret);
2892         }
2893
2894         /* Configure RSS for sctp4 with src/dst addr and port as input set */
2895         if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2896                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
2897                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2898                 cfg.hash_flds = ICE_HASH_SCTP_IPV4;
2899                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2900                 if (ret)
2901                         PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2902                                     __func__, ret);
2903         }
2904
2905         /* Configure RSS for sctp6 with src/dst addr and port as input set */
2906         if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2907                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
2908                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2909                 cfg.hash_flds = ICE_HASH_SCTP_IPV6;
2910                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2911                 if (ret)
2912                         PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2913                                     __func__, ret);
2914         }
2915
2916         if (rss_hf & ETH_RSS_IPV4) {
2917                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
2918                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2919                 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2920                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2921                 if (ret)
2922                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
2923                                     __func__, ret);
2924         }
2925
2926         if (rss_hf & ETH_RSS_IPV6) {
2927                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
2928                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2929                 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2930                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2931                 if (ret)
2932                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
2933                                     __func__, ret);
2934         }
2935
2936         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2937                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
2938                                 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2939                 cfg.hash_flds = ICE_HASH_UDP_IPV4;
2940                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2941                 if (ret)
2942                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
2943                                     __func__, ret);
2944         }
2945
2946         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2947                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
2948                                 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2949                 cfg.hash_flds = ICE_HASH_UDP_IPV6;
2950                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2951                 if (ret)
2952                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
2953                                     __func__, ret);
2954         }
2955
2956         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2957                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
2958                                 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2959                 cfg.hash_flds = ICE_HASH_TCP_IPV4;
2960                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2961                 if (ret)
2962                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
2963                                     __func__, ret);
2964         }
2965
2966         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2967                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
2968                                 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2969                 cfg.hash_flds = ICE_HASH_TCP_IPV6;
2970                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2971                 if (ret)
2972                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
2973                                     __func__, ret);
2974         }
2975
2976         if (rss_hf & ETH_RSS_FRAG_IPV4) {
2977                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_FRAG;
2978                 cfg.hash_flds = ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
2979                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2980                 if (ret)
2981                         PMD_DRV_LOG(ERR, "%s IPV4_FRAG rss flow fail %d",
2982                                     __func__, ret);
2983         }
2984
2985         if (rss_hf & ETH_RSS_FRAG_IPV6) {
2986                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_FRAG;
2987                 cfg.hash_flds = ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
2988                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2989                 if (ret)
2990                         PMD_DRV_LOG(ERR, "%s IPV6_FRAG rss flow fail %d",
2991                                     __func__, ret);
2992         }
2993
2994         pf->rss_hf = rss_hf & ICE_RSS_HF_ALL;
2995 }
2996
2997 static void
2998 ice_get_default_rss_key(uint8_t *rss_key, uint32_t rss_key_size)
2999 {
3000         static struct ice_aqc_get_set_rss_keys default_key;
3001         static bool default_key_done;
3002         uint8_t *key = (uint8_t *)&default_key;
3003         size_t i;
3004
3005         if (rss_key_size > sizeof(default_key)) {
3006                 PMD_DRV_LOG(WARNING,
3007                             "requested size %u is larger than default %zu, "
3008                             "only %zu bytes are gotten for key\n",
3009                             rss_key_size, sizeof(default_key),
3010                             sizeof(default_key));
3011         }
3012
3013         if (!default_key_done) {
3014                 /* Calculate the default hash key */
3015                 for (i = 0; i < sizeof(default_key); i++)
3016                         key[i] = (uint8_t)rte_rand();
3017                 default_key_done = true;
3018         }
3019         rte_memcpy(rss_key, key, RTE_MIN(rss_key_size, sizeof(default_key)));
3020 }
3021
3022 static int ice_init_rss(struct ice_pf *pf)
3023 {
3024         struct ice_hw *hw = ICE_PF_TO_HW(pf);
3025         struct ice_vsi *vsi = pf->main_vsi;
3026         struct rte_eth_dev_data *dev_data = pf->dev_data;
3027         struct ice_aq_get_set_rss_lut_params lut_params;
3028         struct rte_eth_rss_conf *rss_conf;
3029         struct ice_aqc_get_set_rss_keys key;
3030         uint16_t i, nb_q;
3031         int ret = 0;
3032         bool is_safe_mode = pf->adapter->is_safe_mode;
3033         uint32_t reg;
3034
3035         rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
3036         nb_q = dev_data->nb_rx_queues;
3037         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3038         vsi->rss_lut_size = pf->hash_lut_size;
3039
3040         if (nb_q == 0) {
3041                 PMD_DRV_LOG(WARNING,
3042                         "RSS is not supported as rx queues number is zero\n");
3043                 return 0;
3044         }
3045
3046         if (is_safe_mode) {
3047                 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3048                 return 0;
3049         }
3050
3051         if (!vsi->rss_key) {
3052                 vsi->rss_key = rte_zmalloc(NULL,
3053                                            vsi->rss_key_size, 0);
3054                 if (vsi->rss_key == NULL) {
3055                         PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3056                         return -ENOMEM;
3057                 }
3058         }
3059         if (!vsi->rss_lut) {
3060                 vsi->rss_lut = rte_zmalloc(NULL,
3061                                            vsi->rss_lut_size, 0);
3062                 if (vsi->rss_lut == NULL) {
3063                         PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3064                         rte_free(vsi->rss_key);
3065                         vsi->rss_key = NULL;
3066                         return -ENOMEM;
3067                 }
3068         }
3069         /* configure RSS key */
3070         if (!rss_conf->rss_key)
3071                 ice_get_default_rss_key(vsi->rss_key, vsi->rss_key_size);
3072         else
3073                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3074                            RTE_MIN(rss_conf->rss_key_len,
3075                                    vsi->rss_key_size));
3076
3077         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3078         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3079         if (ret)
3080                 goto out;
3081
3082         /* init RSS LUT table */
3083         for (i = 0; i < vsi->rss_lut_size; i++)
3084                 vsi->rss_lut[i] = i % nb_q;
3085
3086         lut_params.vsi_handle = vsi->idx;
3087         lut_params.lut_size = vsi->rss_lut_size;
3088         lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
3089         lut_params.lut = vsi->rss_lut;
3090         lut_params.global_lut_id = 0;
3091         ret = ice_aq_set_rss_lut(hw, &lut_params);
3092         if (ret)
3093                 goto out;
3094
3095         /* Enable registers for symmetric_toeplitz function. */
3096         reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3097         reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3098                 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3099         ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3100
3101         /* RSS hash configuration */
3102         ice_rss_hash_set(pf, rss_conf->rss_hf);
3103
3104         return 0;
3105 out:
3106         rte_free(vsi->rss_key);
3107         vsi->rss_key = NULL;
3108         rte_free(vsi->rss_lut);
3109         vsi->rss_lut = NULL;
3110         return -EINVAL;
3111 }
3112
3113 static int
3114 ice_dev_configure(struct rte_eth_dev *dev)
3115 {
3116         struct ice_adapter *ad =
3117                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3118         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3119         int ret;
3120
3121         /* Initialize to TRUE. If any of Rx queues doesn't meet the
3122          * bulk allocation or vector Rx preconditions we will reset it.
3123          */
3124         ad->rx_bulk_alloc_allowed = true;
3125         ad->tx_simple_allowed = true;
3126
3127         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
3128                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
3129
3130         if (dev->data->nb_rx_queues) {
3131                 ret = ice_init_rss(pf);
3132                 if (ret) {
3133                         PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3134                         return ret;
3135                 }
3136         }
3137
3138         return 0;
3139 }
3140
3141 static void
3142 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3143                        int base_queue, int nb_queue)
3144 {
3145         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3146         uint32_t val, val_tx;
3147         int i;
3148
3149         for (i = 0; i < nb_queue; i++) {
3150                 /*do actual bind*/
3151                 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3152                       (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3153                 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3154                          (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3155
3156                 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3157                             base_queue + i, msix_vect);
3158                 /* set ITR0 value */
3159                 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
3160                 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3161                 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3162         }
3163 }
3164
3165 void
3166 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3167 {
3168         struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3169         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3170         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3171         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3172         uint16_t msix_vect = vsi->msix_intr;
3173         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
3174         uint16_t queue_idx = 0;
3175         int record = 0;
3176         int i;
3177
3178         /* clear Rx/Tx queue interrupt */
3179         for (i = 0; i < vsi->nb_used_qps; i++) {
3180                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3181                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3182         }
3183
3184         /* PF bind interrupt */
3185         if (rte_intr_dp_is_en(intr_handle)) {
3186                 queue_idx = 0;
3187                 record = 1;
3188         }
3189
3190         for (i = 0; i < vsi->nb_used_qps; i++) {
3191                 if (nb_msix <= 1) {
3192                         if (!rte_intr_allow_others(intr_handle))
3193                                 msix_vect = ICE_MISC_VEC_ID;
3194
3195                         /* uio mapping all queue to one msix_vect */
3196                         __vsi_queues_bind_intr(vsi, msix_vect,
3197                                                vsi->base_queue + i,
3198                                                vsi->nb_used_qps - i);
3199
3200                         for (; !!record && i < vsi->nb_used_qps; i++)
3201                                 intr_handle->intr_vec[queue_idx + i] =
3202                                         msix_vect;
3203                         break;
3204                 }
3205
3206                 /* vfio 1:1 queue/msix_vect mapping */
3207                 __vsi_queues_bind_intr(vsi, msix_vect,
3208                                        vsi->base_queue + i, 1);
3209
3210                 if (!!record)
3211                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
3212
3213                 msix_vect++;
3214                 nb_msix--;
3215         }
3216 }
3217
3218 void
3219 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3220 {
3221         struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3222         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3223         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3224         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3225         uint16_t msix_intr, i;
3226
3227         if (rte_intr_allow_others(intr_handle))
3228                 for (i = 0; i < vsi->nb_used_qps; i++) {
3229                         msix_intr = vsi->msix_intr + i;
3230                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3231                                       GLINT_DYN_CTL_INTENA_M |
3232                                       GLINT_DYN_CTL_CLEARPBA_M |
3233                                       GLINT_DYN_CTL_ITR_INDX_M |
3234                                       GLINT_DYN_CTL_WB_ON_ITR_M);
3235                 }
3236         else
3237                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3238                               GLINT_DYN_CTL_INTENA_M |
3239                               GLINT_DYN_CTL_CLEARPBA_M |
3240                               GLINT_DYN_CTL_ITR_INDX_M |
3241                               GLINT_DYN_CTL_WB_ON_ITR_M);
3242 }
3243
3244 static int
3245 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3246 {
3247         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3248         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3249         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3250         struct ice_vsi *vsi = pf->main_vsi;
3251         uint32_t intr_vector = 0;
3252
3253         rte_intr_disable(intr_handle);
3254
3255         /* check and configure queue intr-vector mapping */
3256         if ((rte_intr_cap_multiple(intr_handle) ||
3257              !RTE_ETH_DEV_SRIOV(dev).active) &&
3258             dev->data->dev_conf.intr_conf.rxq != 0) {
3259                 intr_vector = dev->data->nb_rx_queues;
3260                 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3261                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3262                                     ICE_MAX_INTR_QUEUE_NUM);
3263                         return -ENOTSUP;
3264                 }
3265                 if (rte_intr_efd_enable(intr_handle, intr_vector))
3266                         return -1;
3267         }
3268
3269         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3270                 intr_handle->intr_vec =
3271                 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
3272                             0);
3273                 if (!intr_handle->intr_vec) {
3274                         PMD_DRV_LOG(ERR,
3275                                     "Failed to allocate %d rx_queues intr_vec",
3276                                     dev->data->nb_rx_queues);
3277                         return -ENOMEM;
3278                 }
3279         }
3280
3281         /* Map queues with MSIX interrupt */
3282         vsi->nb_used_qps = dev->data->nb_rx_queues;
3283         ice_vsi_queues_bind_intr(vsi);
3284
3285         /* Enable interrupts for all the queues */
3286         ice_vsi_enable_queues_intr(vsi);
3287
3288         rte_intr_enable(intr_handle);
3289
3290         return 0;
3291 }
3292
3293 static void
3294 ice_get_init_link_status(struct rte_eth_dev *dev)
3295 {
3296         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3297         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3298         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3299         struct ice_link_status link_status;
3300         int ret;
3301
3302         ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3303                                    &link_status, NULL);
3304         if (ret != ICE_SUCCESS) {
3305                 PMD_DRV_LOG(ERR, "Failed to get link info");
3306                 pf->init_link_up = false;
3307                 return;
3308         }
3309
3310         if (link_status.link_info & ICE_AQ_LINK_UP)
3311                 pf->init_link_up = true;
3312 }
3313
3314 static int
3315 ice_dev_start(struct rte_eth_dev *dev)
3316 {
3317         struct rte_eth_dev_data *data = dev->data;
3318         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3319         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3320         struct ice_vsi *vsi = pf->main_vsi;
3321         uint16_t nb_rxq = 0;
3322         uint16_t nb_txq, i;
3323         uint16_t max_frame_size;
3324         int mask, ret;
3325
3326         /* program Tx queues' context in hardware */
3327         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3328                 ret = ice_tx_queue_start(dev, nb_txq);
3329                 if (ret) {
3330                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3331                         goto tx_err;
3332                 }
3333         }
3334
3335         /* program Rx queues' context in hardware*/
3336         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3337                 ret = ice_rx_queue_start(dev, nb_rxq);
3338                 if (ret) {
3339                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3340                         goto rx_err;
3341                 }
3342         }
3343
3344         ice_set_rx_function(dev);
3345         ice_set_tx_function(dev);
3346
3347         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
3348                         ETH_VLAN_EXTEND_MASK;
3349         ret = ice_vlan_offload_set(dev, mask);
3350         if (ret) {
3351                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3352                 goto rx_err;
3353         }
3354
3355         /* enable Rx interrput and mapping Rx queue to interrupt vector */
3356         if (ice_rxq_intr_setup(dev))
3357                 return -EIO;
3358
3359         /* Enable receiving broadcast packets and transmitting packets */
3360         ret = ice_set_vsi_promisc(hw, vsi->idx,
3361                                   ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3362                                   ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3363                                   0);
3364         if (ret != ICE_SUCCESS)
3365                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3366
3367         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3368                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3369                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3370                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3371                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3372                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
3373                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3374                                      NULL);
3375         if (ret != ICE_SUCCESS)
3376                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3377
3378         ice_get_init_link_status(dev);
3379
3380         ice_dev_set_link_up(dev);
3381
3382         /* Call get_link_info aq commond to enable/disable LSE */
3383         ice_link_update(dev, 0);
3384
3385         pf->adapter_stopped = false;
3386
3387         /* Set the max frame size to default value*/
3388         max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3389                 pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3390                 ICE_FRAME_SIZE_MAX;
3391
3392         /* Set the max frame size to HW*/
3393         ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3394
3395         return 0;
3396
3397         /* stop the started queues if failed to start all queues */
3398 rx_err:
3399         for (i = 0; i < nb_rxq; i++)
3400                 ice_rx_queue_stop(dev, i);
3401 tx_err:
3402         for (i = 0; i < nb_txq; i++)
3403                 ice_tx_queue_stop(dev, i);
3404
3405         return -EIO;
3406 }
3407
3408 static int
3409 ice_dev_reset(struct rte_eth_dev *dev)
3410 {
3411         int ret;
3412
3413         if (dev->data->sriov.active)
3414                 return -ENOTSUP;
3415
3416         ret = ice_dev_uninit(dev);
3417         if (ret) {
3418                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3419                 return -ENXIO;
3420         }
3421
3422         ret = ice_dev_init(dev);
3423         if (ret) {
3424                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3425                 return -ENXIO;
3426         }
3427
3428         return 0;
3429 }
3430
3431 static int
3432 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3433 {
3434         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3435         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3436         struct ice_vsi *vsi = pf->main_vsi;
3437         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3438         bool is_safe_mode = pf->adapter->is_safe_mode;
3439         u64 phy_type_low;
3440         u64 phy_type_high;
3441
3442         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3443         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3444         dev_info->max_rx_queues = vsi->nb_qps;
3445         dev_info->max_tx_queues = vsi->nb_qps;
3446         dev_info->max_mac_addrs = vsi->max_macaddrs;
3447         dev_info->max_vfs = pci_dev->max_vfs;
3448         dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3449         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3450
3451         dev_info->rx_offload_capa =
3452                 DEV_RX_OFFLOAD_VLAN_STRIP |
3453                 DEV_RX_OFFLOAD_JUMBO_FRAME |
3454                 DEV_RX_OFFLOAD_KEEP_CRC |
3455                 DEV_RX_OFFLOAD_SCATTER |
3456                 DEV_RX_OFFLOAD_VLAN_FILTER;
3457         dev_info->tx_offload_capa =
3458                 DEV_TX_OFFLOAD_VLAN_INSERT |
3459                 DEV_TX_OFFLOAD_TCP_TSO |
3460                 DEV_TX_OFFLOAD_MULTI_SEGS |
3461                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3462         dev_info->flow_type_rss_offloads = 0;
3463
3464         if (!is_safe_mode) {
3465                 dev_info->rx_offload_capa |=
3466                         DEV_RX_OFFLOAD_IPV4_CKSUM |
3467                         DEV_RX_OFFLOAD_UDP_CKSUM |
3468                         DEV_RX_OFFLOAD_TCP_CKSUM |
3469                         DEV_RX_OFFLOAD_QINQ_STRIP |
3470                         DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3471                         DEV_RX_OFFLOAD_VLAN_EXTEND |
3472                         DEV_RX_OFFLOAD_RSS_HASH;
3473                 dev_info->tx_offload_capa |=
3474                         DEV_TX_OFFLOAD_QINQ_INSERT |
3475                         DEV_TX_OFFLOAD_IPV4_CKSUM |
3476                         DEV_TX_OFFLOAD_UDP_CKSUM |
3477                         DEV_TX_OFFLOAD_TCP_CKSUM |
3478                         DEV_TX_OFFLOAD_SCTP_CKSUM |
3479                         DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3480                         DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3481                 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3482         }
3483
3484         dev_info->rx_queue_offload_capa = 0;
3485         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3486
3487         dev_info->reta_size = pf->hash_lut_size;
3488         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3489
3490         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3491                 .rx_thresh = {
3492                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
3493                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
3494                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
3495                 },
3496                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3497                 .rx_drop_en = 0,
3498                 .offloads = 0,
3499         };
3500
3501         dev_info->default_txconf = (struct rte_eth_txconf) {
3502                 .tx_thresh = {
3503                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
3504                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
3505                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
3506                 },
3507                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3508                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3509                 .offloads = 0,
3510         };
3511
3512         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3513                 .nb_max = ICE_MAX_RING_DESC,
3514                 .nb_min = ICE_MIN_RING_DESC,
3515                 .nb_align = ICE_ALIGN_RING_DESC,
3516         };
3517
3518         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3519                 .nb_max = ICE_MAX_RING_DESC,
3520                 .nb_min = ICE_MIN_RING_DESC,
3521                 .nb_align = ICE_ALIGN_RING_DESC,
3522         };
3523
3524         dev_info->speed_capa = ETH_LINK_SPEED_10M |
3525                                ETH_LINK_SPEED_100M |
3526                                ETH_LINK_SPEED_1G |
3527                                ETH_LINK_SPEED_2_5G |
3528                                ETH_LINK_SPEED_5G |
3529                                ETH_LINK_SPEED_10G |
3530                                ETH_LINK_SPEED_20G |
3531                                ETH_LINK_SPEED_25G;
3532
3533         phy_type_low = hw->port_info->phy.phy_type_low;
3534         phy_type_high = hw->port_info->phy.phy_type_high;
3535
3536         if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3537                 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3538
3539         if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3540                         ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3541                 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3542
3543         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3544         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3545
3546         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3547         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3548         dev_info->default_rxportconf.nb_queues = 1;
3549         dev_info->default_txportconf.nb_queues = 1;
3550         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3551         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3552
3553         return 0;
3554 }
3555
3556 static inline int
3557 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3558                             struct rte_eth_link *link)
3559 {
3560         struct rte_eth_link *dst = link;
3561         struct rte_eth_link *src = &dev->data->dev_link;
3562
3563         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3564                                 *(uint64_t *)src) == 0)
3565                 return -1;
3566
3567         return 0;
3568 }
3569
3570 static inline int
3571 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3572                              struct rte_eth_link *link)
3573 {
3574         struct rte_eth_link *dst = &dev->data->dev_link;
3575         struct rte_eth_link *src = link;
3576
3577         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3578                                 *(uint64_t *)src) == 0)
3579                 return -1;
3580
3581         return 0;
3582 }
3583
3584 static int
3585 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3586 {
3587 #define CHECK_INTERVAL 100  /* 100ms */
3588 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
3589         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3590         struct ice_link_status link_status;
3591         struct rte_eth_link link, old;
3592         int status;
3593         unsigned int rep_cnt = MAX_REPEAT_TIME;
3594         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3595
3596         memset(&link, 0, sizeof(link));
3597         memset(&old, 0, sizeof(old));
3598         memset(&link_status, 0, sizeof(link_status));
3599         ice_atomic_read_link_status(dev, &old);
3600
3601         do {
3602                 /* Get link status information from hardware */
3603                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
3604                                               &link_status, NULL);
3605                 if (status != ICE_SUCCESS) {
3606                         link.link_speed = ETH_SPEED_NUM_100M;
3607                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3608                         PMD_DRV_LOG(ERR, "Failed to get link info");
3609                         goto out;
3610                 }
3611
3612                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3613                 if (!wait_to_complete || link.link_status)
3614                         break;
3615
3616                 rte_delay_ms(CHECK_INTERVAL);
3617         } while (--rep_cnt);
3618
3619         if (!link.link_status)
3620                 goto out;
3621
3622         /* Full-duplex operation at all supported speeds */
3623         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3624
3625         /* Parse the link status */
3626         switch (link_status.link_speed) {
3627         case ICE_AQ_LINK_SPEED_10MB:
3628                 link.link_speed = ETH_SPEED_NUM_10M;
3629                 break;
3630         case ICE_AQ_LINK_SPEED_100MB:
3631                 link.link_speed = ETH_SPEED_NUM_100M;
3632                 break;
3633         case ICE_AQ_LINK_SPEED_1000MB:
3634                 link.link_speed = ETH_SPEED_NUM_1G;
3635                 break;
3636         case ICE_AQ_LINK_SPEED_2500MB:
3637                 link.link_speed = ETH_SPEED_NUM_2_5G;
3638                 break;
3639         case ICE_AQ_LINK_SPEED_5GB:
3640                 link.link_speed = ETH_SPEED_NUM_5G;
3641                 break;
3642         case ICE_AQ_LINK_SPEED_10GB:
3643                 link.link_speed = ETH_SPEED_NUM_10G;
3644                 break;
3645         case ICE_AQ_LINK_SPEED_20GB:
3646                 link.link_speed = ETH_SPEED_NUM_20G;
3647                 break;
3648         case ICE_AQ_LINK_SPEED_25GB:
3649                 link.link_speed = ETH_SPEED_NUM_25G;
3650                 break;
3651         case ICE_AQ_LINK_SPEED_40GB:
3652                 link.link_speed = ETH_SPEED_NUM_40G;
3653                 break;
3654         case ICE_AQ_LINK_SPEED_50GB:
3655                 link.link_speed = ETH_SPEED_NUM_50G;
3656                 break;
3657         case ICE_AQ_LINK_SPEED_100GB:
3658                 link.link_speed = ETH_SPEED_NUM_100G;
3659                 break;
3660         case ICE_AQ_LINK_SPEED_UNKNOWN:
3661                 PMD_DRV_LOG(ERR, "Unknown link speed");
3662                 link.link_speed = ETH_SPEED_NUM_UNKNOWN;
3663                 break;
3664         default:
3665                 PMD_DRV_LOG(ERR, "None link speed");
3666                 link.link_speed = ETH_SPEED_NUM_NONE;
3667                 break;
3668         }
3669
3670         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3671                               ETH_LINK_SPEED_FIXED);
3672
3673 out:
3674         ice_atomic_write_link_status(dev, &link);
3675         if (link.link_status == old.link_status)
3676                 return -1;
3677
3678         return 0;
3679 }
3680
3681 /* Force the physical link state by getting the current PHY capabilities from
3682  * hardware and setting the PHY config based on the determined capabilities. If
3683  * link changes, link event will be triggered because both the Enable Automatic
3684  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3685  */
3686 static enum ice_status
3687 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3688 {
3689         struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3690         struct ice_aqc_get_phy_caps_data *pcaps;
3691         struct ice_port_info *pi;
3692         enum ice_status status;
3693
3694         if (!hw || !hw->port_info)
3695                 return ICE_ERR_PARAM;
3696
3697         pi = hw->port_info;
3698
3699         pcaps = (struct ice_aqc_get_phy_caps_data *)
3700                 ice_malloc(hw, sizeof(*pcaps));
3701         if (!pcaps)
3702                 return ICE_ERR_NO_MEMORY;
3703
3704         status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3705                                      pcaps, NULL);
3706         if (status)
3707                 goto out;
3708
3709         /* No change in link */
3710         if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3711             link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3712                 goto out;
3713
3714         cfg.phy_type_low = pcaps->phy_type_low;
3715         cfg.phy_type_high = pcaps->phy_type_high;
3716         cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3717         cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3718         cfg.eee_cap = pcaps->eee_cap;
3719         cfg.eeer_value = pcaps->eeer_value;
3720         cfg.link_fec_opt = pcaps->link_fec_options;
3721         if (link_up)
3722                 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3723         else
3724                 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3725
3726         status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3727
3728 out:
3729         ice_free(hw, pcaps);
3730         return status;
3731 }
3732
3733 static int
3734 ice_dev_set_link_up(struct rte_eth_dev *dev)
3735 {
3736         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3737
3738         return ice_force_phys_link_state(hw, true);
3739 }
3740
3741 static int
3742 ice_dev_set_link_down(struct rte_eth_dev *dev)
3743 {
3744         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3745
3746         return ice_force_phys_link_state(hw, false);
3747 }
3748
3749 static int
3750 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3751 {
3752         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3753         struct rte_eth_dev_data *dev_data = pf->dev_data;
3754         uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3755
3756         /* check if mtu is within the allowed range */
3757         if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3758                 return -EINVAL;
3759
3760         /* mtu setting is forbidden if port is start */
3761         if (dev_data->dev_started) {
3762                 PMD_DRV_LOG(ERR,
3763                             "port %d must be stopped before configuration",
3764                             dev_data->port_id);
3765                 return -EBUSY;
3766         }
3767
3768         if (frame_size > ICE_ETH_MAX_LEN)
3769                 dev_data->dev_conf.rxmode.offloads |=
3770                         DEV_RX_OFFLOAD_JUMBO_FRAME;
3771         else
3772                 dev_data->dev_conf.rxmode.offloads &=
3773                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3774
3775         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3776
3777         return 0;
3778 }
3779
3780 static int ice_macaddr_set(struct rte_eth_dev *dev,
3781                            struct rte_ether_addr *mac_addr)
3782 {
3783         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3784         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3785         struct ice_vsi *vsi = pf->main_vsi;
3786         struct ice_mac_filter *f;
3787         uint8_t flags = 0;
3788         int ret;
3789
3790         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3791                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3792                 return -EINVAL;
3793         }
3794
3795         TAILQ_FOREACH(f, &vsi->mac_list, next) {
3796                 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3797                         break;
3798         }
3799
3800         if (!f) {
3801                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3802                 return -EIO;
3803         }
3804
3805         ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3806         if (ret != ICE_SUCCESS) {
3807                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3808                 return -EIO;
3809         }
3810         ret = ice_add_mac_filter(vsi, mac_addr);
3811         if (ret != ICE_SUCCESS) {
3812                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
3813                 return -EIO;
3814         }
3815         rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3816
3817         flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3818         ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3819         if (ret != ICE_SUCCESS)
3820                 PMD_DRV_LOG(ERR, "Failed to set manage mac");
3821
3822         return 0;
3823 }
3824
3825 /* Add a MAC address, and update filters */
3826 static int
3827 ice_macaddr_add(struct rte_eth_dev *dev,
3828                 struct rte_ether_addr *mac_addr,
3829                 __rte_unused uint32_t index,
3830                 __rte_unused uint32_t pool)
3831 {
3832         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3833         struct ice_vsi *vsi = pf->main_vsi;
3834         int ret;
3835
3836         ret = ice_add_mac_filter(vsi, mac_addr);
3837         if (ret != ICE_SUCCESS) {
3838                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3839                 return -EINVAL;
3840         }
3841
3842         return ICE_SUCCESS;
3843 }
3844
3845 /* Remove a MAC address, and update filters */
3846 static void
3847 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3848 {
3849         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3850         struct ice_vsi *vsi = pf->main_vsi;
3851         struct rte_eth_dev_data *data = dev->data;
3852         struct rte_ether_addr *macaddr;
3853         int ret;
3854
3855         macaddr = &data->mac_addrs[index];
3856         ret = ice_remove_mac_filter(vsi, macaddr);
3857         if (ret) {
3858                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3859                 return;
3860         }
3861 }
3862
3863 static int
3864 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3865 {
3866         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3867         struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id);
3868         struct ice_vsi *vsi = pf->main_vsi;
3869         int ret;
3870
3871         PMD_INIT_FUNC_TRACE();
3872
3873         /**
3874          * Vlan 0 is the generic filter for untagged packets
3875          * and can't be removed or added by user.
3876          */
3877         if (vlan_id == 0)
3878                 return 0;
3879
3880         if (on) {
3881                 ret = ice_add_vlan_filter(vsi, &vlan);
3882                 if (ret < 0) {
3883                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3884                         return -EINVAL;
3885                 }
3886         } else {
3887                 ret = ice_remove_vlan_filter(vsi, &vlan);
3888                 if (ret < 0) {
3889                         PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3890                         return -EINVAL;
3891                 }
3892         }
3893
3894         return 0;
3895 }
3896
3897 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are
3898  * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8)
3899  * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3900  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3901  *
3902  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3903  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3904  * traffic in SVM, since the VLAN TPID isn't part of filtering.
3905  *
3906  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3907  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3908  * part of filtering.
3909  */
3910 static int
3911 ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3912 {
3913         struct ice_vlan vlan;
3914         int err;
3915
3916         vlan = ICE_VLAN(0, 0);
3917         err = ice_add_vlan_filter(vsi, &vlan);
3918         if (err) {
3919                 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0");
3920                 return err;
3921         }
3922
3923         /* in SVM both VLAN 0 filters are identical */
3924         if (!ice_is_dvm_ena(&vsi->adapter->hw))
3925                 return 0;
3926
3927         vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
3928         err = ice_add_vlan_filter(vsi, &vlan);
3929         if (err) {
3930                 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode");
3931                 return err;
3932         }
3933
3934         return 0;
3935 }
3936
3937 /*
3938  * Delete the VLAN 0 filters in the same manner that they were added in
3939  * ice_vsi_add_vlan_zero.
3940  */
3941 static int
3942 ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3943 {
3944         struct ice_vlan vlan;
3945         int err;
3946
3947         vlan = ICE_VLAN(0, 0);
3948         err = ice_remove_vlan_filter(vsi, &vlan);
3949         if (err) {
3950                 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0");
3951                 return err;
3952         }
3953
3954         /* in SVM both VLAN 0 filters are identical */
3955         if (!ice_is_dvm_ena(&vsi->adapter->hw))
3956                 return 0;
3957
3958         vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
3959         err = ice_remove_vlan_filter(vsi, &vlan);
3960         if (err) {
3961                 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode");
3962                 return err;
3963         }
3964
3965         return 0;
3966 }
3967
3968 /* Configure vlan filter on or off */
3969 static int
3970 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
3971 {
3972         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3973         struct ice_vsi_ctx ctxt;
3974         uint8_t sw_flags2;
3975         int ret = 0;
3976
3977         sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3978
3979         if (on)
3980                 vsi->info.sw_flags2 |= sw_flags2;
3981         else
3982                 vsi->info.sw_flags2 &= ~sw_flags2;
3983
3984         vsi->info.sw_id = hw->port_info->sw_id;
3985         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3986         ctxt.info.valid_sections =
3987                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3988                                  ICE_AQ_VSI_PROP_SECURITY_VALID);
3989         ctxt.vsi_num = vsi->vsi_id;
3990
3991         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3992         if (ret) {
3993                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
3994                             on ? "enable" : "disable");
3995                 return -EINVAL;
3996         } else {
3997                 vsi->info.valid_sections |=
3998                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3999                                          ICE_AQ_VSI_PROP_SECURITY_VALID);
4000         }
4001
4002         /* consist with other drivers, allow untagged packet when vlan filter on */
4003         if (on)
4004                 ret = ice_vsi_add_vlan_zero(vsi);
4005         else
4006                 ret = ice_vsi_del_vlan_zero(vsi);
4007
4008         return 0;
4009 }
4010
4011 /* Manage VLAN stripping for the VSI for Rx */
4012 static int
4013 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4014 {
4015         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4016         struct ice_vsi_ctx ctxt;
4017         enum ice_status status;
4018         int err = 0;
4019
4020         /* do not allow modifying VLAN stripping when a port VLAN is configured
4021          * on this VSI
4022          */
4023         if (vsi->info.port_based_inner_vlan)
4024                 return 0;
4025
4026         memset(&ctxt, 0, sizeof(ctxt));
4027
4028         if (ena)
4029                 /* Strip VLAN tag from Rx packet and put it in the desc */
4030                 ctxt.info.inner_vlan_flags =
4031                                         ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
4032         else
4033                 /* Disable stripping. Leave tag in packet */
4034                 ctxt.info.inner_vlan_flags =
4035                                         ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4036
4037         /* Allow all packets untagged/tagged */
4038         ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
4039
4040         ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4041
4042         status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4043         if (status) {
4044                 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping",
4045                             ena ? "enable" : "disable");
4046                 err = -EIO;
4047         } else {
4048                 vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags;
4049         }
4050
4051         return err;
4052 }
4053
4054 static int
4055 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi)
4056 {
4057         return ice_vsi_manage_vlan_stripping(vsi, true);
4058 }
4059
4060 static int
4061 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
4062 {
4063         return ice_vsi_manage_vlan_stripping(vsi, false);
4064 }
4065
4066 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi)
4067 {
4068         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4069         struct ice_vsi_ctx ctxt;
4070         enum ice_status status;
4071         int err = 0;
4072
4073         /* do not allow modifying VLAN stripping when a port VLAN is configured
4074          * on this VSI
4075          */
4076         if (vsi->info.port_based_outer_vlan)
4077                 return 0;
4078
4079         memset(&ctxt, 0, sizeof(ctxt));
4080
4081         ctxt.info.valid_sections =
4082                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4083         /* clear current outer VLAN strip settings */
4084         ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4085                 ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
4086         ctxt.info.outer_vlan_flags |=
4087                 (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
4088                  ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
4089                 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
4090                  ICE_AQ_VSI_OUTER_TAG_TYPE_S);
4091
4092         status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4093         if (status) {
4094                 PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping");
4095                 err = -EIO;
4096         } else {
4097                 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4098         }
4099
4100         return err;
4101 }
4102
4103 static int
4104 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
4105 {
4106         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4107         struct ice_vsi_ctx ctxt;
4108         enum ice_status status;
4109         int err = 0;
4110
4111         if (vsi->info.port_based_outer_vlan)
4112                 return 0;
4113
4114         memset(&ctxt, 0, sizeof(ctxt));
4115
4116         ctxt.info.valid_sections =
4117                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4118         /* clear current outer VLAN strip settings */
4119         ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4120                 ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
4121         ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
4122                 ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
4123
4124         status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4125         if (status) {
4126                 PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping");
4127                 err = -EIO;
4128         } else {
4129                 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4130         }
4131
4132         return err;
4133 }
4134
4135 static int
4136 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena)
4137 {
4138         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4139         int ret;
4140
4141         if (ice_is_dvm_ena(hw)) {
4142                 if (ena)
4143                         ret = ice_vsi_ena_outer_stripping(vsi);
4144                 else
4145                         ret = ice_vsi_dis_outer_stripping(vsi);
4146         } else {
4147                 if (ena)
4148                         ret = ice_vsi_ena_inner_stripping(vsi);
4149                 else
4150                         ret = ice_vsi_dis_inner_stripping(vsi);
4151         }
4152
4153         return ret;
4154 }
4155
4156 static int
4157 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4158 {
4159         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4160         struct ice_vsi *vsi = pf->main_vsi;
4161         struct rte_eth_rxmode *rxmode;
4162
4163         rxmode = &dev->data->dev_conf.rxmode;
4164         if (mask & ETH_VLAN_FILTER_MASK) {
4165                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4166                         ice_vsi_config_vlan_filter(vsi, true);
4167                 else
4168                         ice_vsi_config_vlan_filter(vsi, false);
4169         }
4170
4171         if (mask & ETH_VLAN_STRIP_MASK) {
4172                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4173                         ice_vsi_config_vlan_stripping(vsi, true);
4174                 else
4175                         ice_vsi_config_vlan_stripping(vsi, false);
4176         }
4177
4178         return 0;
4179 }
4180
4181 static int
4182 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4183 {
4184         struct ice_aq_get_set_rss_lut_params lut_params;
4185         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4186         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4187         int ret;
4188
4189         if (!lut)
4190                 return -EINVAL;
4191
4192         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4193                 lut_params.vsi_handle = vsi->idx;
4194                 lut_params.lut_size = lut_size;
4195                 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4196                 lut_params.lut = lut;
4197                 lut_params.global_lut_id = 0;
4198                 ret = ice_aq_get_rss_lut(hw, &lut_params);
4199                 if (ret) {
4200                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4201                         return -EINVAL;
4202                 }
4203         } else {
4204                 uint64_t *lut_dw = (uint64_t *)lut;
4205                 uint16_t i, lut_size_dw = lut_size / 4;
4206
4207                 for (i = 0; i < lut_size_dw; i++)
4208                         lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4209         }
4210
4211         return 0;
4212 }
4213
4214 static int
4215 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4216 {
4217         struct ice_aq_get_set_rss_lut_params lut_params;
4218         struct ice_pf *pf;
4219         struct ice_hw *hw;
4220         int ret;
4221
4222         if (!vsi || !lut)
4223                 return -EINVAL;
4224
4225         pf = ICE_VSI_TO_PF(vsi);
4226         hw = ICE_VSI_TO_HW(vsi);
4227
4228         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4229                 lut_params.vsi_handle = vsi->idx;
4230                 lut_params.lut_size = lut_size;
4231                 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4232                 lut_params.lut = lut;
4233                 lut_params.global_lut_id = 0;
4234                 ret = ice_aq_set_rss_lut(hw, &lut_params);
4235                 if (ret) {
4236                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4237                         return -EINVAL;
4238                 }
4239         } else {
4240                 uint64_t *lut_dw = (uint64_t *)lut;
4241                 uint16_t i, lut_size_dw = lut_size / 4;
4242
4243                 for (i = 0; i < lut_size_dw; i++)
4244                         ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4245
4246                 ice_flush(hw);
4247         }
4248
4249         return 0;
4250 }
4251
4252 static int
4253 ice_rss_reta_update(struct rte_eth_dev *dev,
4254                     struct rte_eth_rss_reta_entry64 *reta_conf,
4255                     uint16_t reta_size)
4256 {
4257         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4258         uint16_t i, lut_size = pf->hash_lut_size;
4259         uint16_t idx, shift;
4260         uint8_t *lut;
4261         int ret;
4262
4263         if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4264             reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4265             reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4266                 PMD_DRV_LOG(ERR,
4267                             "The size of hash lookup table configured (%d)"
4268                             "doesn't match the number hardware can "
4269                             "supported (128, 512, 2048)",
4270                             reta_size);
4271                 return -EINVAL;
4272         }
4273
4274         /* It MUST use the current LUT size to get the RSS lookup table,
4275          * otherwise if will fail with -100 error code.
4276          */
4277         lut = rte_zmalloc(NULL,  RTE_MAX(reta_size, lut_size), 0);
4278         if (!lut) {
4279                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4280                 return -ENOMEM;
4281         }
4282         ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4283         if (ret)
4284                 goto out;
4285
4286         for (i = 0; i < reta_size; i++) {
4287                 idx = i / RTE_RETA_GROUP_SIZE;
4288                 shift = i % RTE_RETA_GROUP_SIZE;
4289                 if (reta_conf[idx].mask & (1ULL << shift))
4290                         lut[i] = reta_conf[idx].reta[shift];
4291         }
4292         ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4293         if (ret == 0 && lut_size != reta_size) {
4294                 PMD_DRV_LOG(INFO,
4295                             "The size of hash lookup table is changed from (%d) to (%d)",
4296                             lut_size, reta_size);
4297                 pf->hash_lut_size = reta_size;
4298         }
4299
4300 out:
4301         rte_free(lut);
4302
4303         return ret;
4304 }
4305
4306 static int
4307 ice_rss_reta_query(struct rte_eth_dev *dev,
4308                    struct rte_eth_rss_reta_entry64 *reta_conf,
4309                    uint16_t reta_size)
4310 {
4311         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4312         uint16_t i, lut_size = pf->hash_lut_size;
4313         uint16_t idx, shift;
4314         uint8_t *lut;
4315         int ret;
4316
4317         if (reta_size != lut_size) {
4318                 PMD_DRV_LOG(ERR,
4319                             "The size of hash lookup table configured (%d)"
4320                             "doesn't match the number hardware can "
4321                             "supported (%d)",
4322                             reta_size, lut_size);
4323                 return -EINVAL;
4324         }
4325
4326         lut = rte_zmalloc(NULL, reta_size, 0);
4327         if (!lut) {
4328                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4329                 return -ENOMEM;
4330         }
4331
4332         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4333         if (ret)
4334                 goto out;
4335
4336         for (i = 0; i < reta_size; i++) {
4337                 idx = i / RTE_RETA_GROUP_SIZE;
4338                 shift = i % RTE_RETA_GROUP_SIZE;
4339                 if (reta_conf[idx].mask & (1ULL << shift))
4340                         reta_conf[idx].reta[shift] = lut[i];
4341         }
4342
4343 out:
4344         rte_free(lut);
4345
4346         return ret;
4347 }
4348
4349 static int
4350 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4351 {
4352         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4353         int ret = 0;
4354
4355         if (!key || key_len == 0) {
4356                 PMD_DRV_LOG(DEBUG, "No key to be configured");
4357                 return 0;
4358         } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4359                    sizeof(uint32_t)) {
4360                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4361                 return -EINVAL;
4362         }
4363
4364         struct ice_aqc_get_set_rss_keys *key_dw =
4365                 (struct ice_aqc_get_set_rss_keys *)key;
4366
4367         ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4368         if (ret) {
4369                 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4370                 ret = -EINVAL;
4371         }
4372
4373         return ret;
4374 }
4375
4376 static int
4377 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4378 {
4379         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4380         int ret;
4381
4382         if (!key || !key_len)
4383                 return -EINVAL;
4384
4385         ret = ice_aq_get_rss_key
4386                 (hw, vsi->idx,
4387                  (struct ice_aqc_get_set_rss_keys *)key);
4388         if (ret) {
4389                 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4390                 return -EINVAL;
4391         }
4392         *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4393
4394         return 0;
4395 }
4396
4397 static int
4398 ice_rss_hash_update(struct rte_eth_dev *dev,
4399                     struct rte_eth_rss_conf *rss_conf)
4400 {
4401         enum ice_status status = ICE_SUCCESS;
4402         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4403         struct ice_vsi *vsi = pf->main_vsi;
4404
4405         /* set hash key */
4406         status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4407         if (status)
4408                 return status;
4409
4410         if (rss_conf->rss_hf == 0) {
4411                 pf->rss_hf = 0;
4412                 return 0;
4413         }
4414
4415         /* RSS hash configuration */
4416         ice_rss_hash_set(pf, rss_conf->rss_hf);
4417
4418         return 0;
4419 }
4420
4421 static int
4422 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4423                       struct rte_eth_rss_conf *rss_conf)
4424 {
4425         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4426         struct ice_vsi *vsi = pf->main_vsi;
4427
4428         ice_get_rss_key(vsi, rss_conf->rss_key,
4429                         &rss_conf->rss_key_len);
4430
4431         rss_conf->rss_hf = pf->rss_hf;
4432         return 0;
4433 }
4434
4435 static int
4436 ice_promisc_enable(struct rte_eth_dev *dev)
4437 {
4438         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4439         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4440         struct ice_vsi *vsi = pf->main_vsi;
4441         enum ice_status status;
4442         uint8_t pmask;
4443         int ret = 0;
4444
4445         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4446                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4447
4448         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4449         switch (status) {
4450         case ICE_ERR_ALREADY_EXISTS:
4451                 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4452         case ICE_SUCCESS:
4453                 break;
4454         default:
4455                 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4456                 ret = -EAGAIN;
4457         }
4458
4459         return ret;
4460 }
4461
4462 static int
4463 ice_promisc_disable(struct rte_eth_dev *dev)
4464 {
4465         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4466         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4467         struct ice_vsi *vsi = pf->main_vsi;
4468         enum ice_status status;
4469         uint8_t pmask;
4470         int ret = 0;
4471
4472         if (dev->data->all_multicast == 1)
4473                 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX;
4474         else
4475                 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4476                         ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4477
4478         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4479         if (status != ICE_SUCCESS) {
4480                 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4481                 ret = -EAGAIN;
4482         }
4483
4484         return ret;
4485 }
4486
4487 static int
4488 ice_allmulti_enable(struct rte_eth_dev *dev)
4489 {
4490         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4491         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4492         struct ice_vsi *vsi = pf->main_vsi;
4493         enum ice_status status;
4494         uint8_t pmask;
4495         int ret = 0;
4496
4497         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4498
4499         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4500
4501         switch (status) {
4502         case ICE_ERR_ALREADY_EXISTS:
4503                 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4504         case ICE_SUCCESS:
4505                 break;
4506         default:
4507                 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4508                 ret = -EAGAIN;
4509         }
4510
4511         return ret;
4512 }
4513
4514 static int
4515 ice_allmulti_disable(struct rte_eth_dev *dev)
4516 {
4517         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4518         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4519         struct ice_vsi *vsi = pf->main_vsi;
4520         enum ice_status status;
4521         uint8_t pmask;
4522         int ret = 0;
4523
4524         if (dev->data->promiscuous == 1)
4525                 return 0; /* must remain in all_multicast mode */
4526
4527         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4528
4529         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4530         if (status != ICE_SUCCESS) {
4531                 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4532                 ret = -EAGAIN;
4533         }
4534
4535         return ret;
4536 }
4537
4538 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4539                                     uint16_t queue_id)
4540 {
4541         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4542         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4543         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4544         uint32_t val;
4545         uint16_t msix_intr;
4546
4547         msix_intr = intr_handle->intr_vec[queue_id];
4548
4549         val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4550               GLINT_DYN_CTL_ITR_INDX_M;
4551         val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4552
4553         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4554         rte_intr_ack(&pci_dev->intr_handle);
4555
4556         return 0;
4557 }
4558
4559 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4560                                      uint16_t queue_id)
4561 {
4562         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4563         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4564         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4565         uint16_t msix_intr;
4566
4567         msix_intr = intr_handle->intr_vec[queue_id];
4568
4569         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4570
4571         return 0;
4572 }
4573
4574 static int
4575 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4576 {
4577         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4578         u8 ver, patch;
4579         u16 build;
4580         int ret;
4581
4582         ver = hw->flash.orom.major;
4583         patch = hw->flash.orom.patch;
4584         build = hw->flash.orom.build;
4585
4586         ret = snprintf(fw_version, fw_size,
4587                         "%x.%02x 0x%08x %d.%d.%d",
4588                         hw->flash.nvm.major,
4589                         hw->flash.nvm.minor,
4590                         hw->flash.nvm.eetrack,
4591                         ver, build, patch);
4592         if (ret < 0)
4593                 return -EINVAL;
4594
4595         /* add the size of '\0' */
4596         ret += 1;
4597         if (fw_size < (size_t)ret)
4598                 return ret;
4599         else
4600                 return 0;
4601 }
4602
4603 static int
4604 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4605 {
4606         struct ice_hw *hw;
4607         struct ice_vsi_ctx ctxt;
4608         uint8_t vlan_flags = 0;
4609         int ret;
4610
4611         if (!vsi || !info) {
4612                 PMD_DRV_LOG(ERR, "invalid parameters");
4613                 return -EINVAL;
4614         }
4615
4616         if (info->on) {
4617                 vsi->info.port_based_inner_vlan = info->config.pvid;
4618                 /**
4619                  * If insert pvid is enabled, only tagged pkts are
4620                  * allowed to be sent out.
4621                  */
4622                 vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4623                              ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4624         } else {
4625                 vsi->info.port_based_inner_vlan = 0;
4626                 if (info->config.reject.tagged == 0)
4627                         vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED;
4628
4629                 if (info->config.reject.untagged == 0)
4630                         vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4631         }
4632         vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4633                                   ICE_AQ_VSI_INNER_VLAN_EMODE_M);
4634         vsi->info.inner_vlan_flags |= vlan_flags;
4635         memset(&ctxt, 0, sizeof(ctxt));
4636         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4637         ctxt.info.valid_sections =
4638                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4639         ctxt.vsi_num = vsi->vsi_id;
4640
4641         hw = ICE_VSI_TO_HW(vsi);
4642         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4643         if (ret != ICE_SUCCESS) {
4644                 PMD_DRV_LOG(ERR,
4645                             "update VSI for VLAN insert failed, err %d",
4646                             ret);
4647                 return -EINVAL;
4648         }
4649
4650         vsi->info.valid_sections |=
4651                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4652
4653         return ret;
4654 }
4655
4656 static int
4657 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4658 {
4659         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4660         struct ice_vsi *vsi = pf->main_vsi;
4661         struct rte_eth_dev_data *data = pf->dev_data;
4662         struct ice_vsi_vlan_pvid_info info;
4663         int ret;
4664
4665         memset(&info, 0, sizeof(info));
4666         info.on = on;
4667         if (info.on) {
4668                 info.config.pvid = pvid;
4669         } else {
4670                 info.config.reject.tagged =
4671                         data->dev_conf.txmode.hw_vlan_reject_tagged;
4672                 info.config.reject.untagged =
4673                         data->dev_conf.txmode.hw_vlan_reject_untagged;
4674         }
4675
4676         ret = ice_vsi_vlan_pvid_set(vsi, &info);
4677         if (ret < 0) {
4678                 PMD_DRV_LOG(ERR, "Failed to set pvid.");
4679                 return -EINVAL;
4680         }
4681
4682         return 0;
4683 }
4684
4685 static int
4686 ice_get_eeprom_length(struct rte_eth_dev *dev)
4687 {
4688         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4689
4690         return hw->flash.flash_size;
4691 }
4692
4693 static int
4694 ice_get_eeprom(struct rte_eth_dev *dev,
4695                struct rte_dev_eeprom_info *eeprom)
4696 {
4697         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4698         enum ice_status status = ICE_SUCCESS;
4699         uint8_t *data = eeprom->data;
4700
4701         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4702
4703         status = ice_acquire_nvm(hw, ICE_RES_READ);
4704         if (status) {
4705                 PMD_DRV_LOG(ERR, "acquire nvm failed.");
4706                 return -EIO;
4707         }
4708
4709         status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4710                                    data, false);
4711
4712         ice_release_nvm(hw);
4713
4714         if (status) {
4715                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
4716                 return -EIO;
4717         }
4718
4719         return 0;
4720 }
4721
4722 static void
4723 ice_stat_update_32(struct ice_hw *hw,
4724                    uint32_t reg,
4725                    bool offset_loaded,
4726                    uint64_t *offset,
4727                    uint64_t *stat)
4728 {
4729         uint64_t new_data;
4730
4731         new_data = (uint64_t)ICE_READ_REG(hw, reg);
4732         if (!offset_loaded)
4733                 *offset = new_data;
4734
4735         if (new_data >= *offset)
4736                 *stat = (uint64_t)(new_data - *offset);
4737         else
4738                 *stat = (uint64_t)((new_data +
4739                                     ((uint64_t)1 << ICE_32_BIT_WIDTH))
4740                                    - *offset);
4741 }
4742
4743 static void
4744 ice_stat_update_40(struct ice_hw *hw,
4745                    uint32_t hireg,
4746                    uint32_t loreg,
4747                    bool offset_loaded,
4748                    uint64_t *offset,
4749                    uint64_t *stat)
4750 {
4751         uint64_t new_data;
4752
4753         new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4754         new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4755                     ICE_32_BIT_WIDTH;
4756
4757         if (!offset_loaded)
4758                 *offset = new_data;
4759
4760         if (new_data >= *offset)
4761                 *stat = new_data - *offset;
4762         else
4763                 *stat = (uint64_t)((new_data +
4764                                     ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4765                                    *offset);
4766
4767         *stat &= ICE_40_BIT_MASK;
4768 }
4769
4770 /* Get all the statistics of a VSI */
4771 static void
4772 ice_update_vsi_stats(struct ice_vsi *vsi)
4773 {
4774         struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4775         struct ice_eth_stats *nes = &vsi->eth_stats;
4776         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4777         int idx = rte_le_to_cpu_16(vsi->vsi_id);
4778
4779         ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4780                            vsi->offset_loaded, &oes->rx_bytes,
4781                            &nes->rx_bytes);
4782         ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4783                            vsi->offset_loaded, &oes->rx_unicast,
4784                            &nes->rx_unicast);
4785         ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4786                            vsi->offset_loaded, &oes->rx_multicast,
4787                            &nes->rx_multicast);
4788         ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4789                            vsi->offset_loaded, &oes->rx_broadcast,
4790                            &nes->rx_broadcast);
4791         /* enlarge the limitation when rx_bytes overflowed */
4792         if (vsi->offset_loaded) {
4793                 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
4794                         nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4795                 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
4796         }
4797         vsi->old_rx_bytes = nes->rx_bytes;
4798         /* exclude CRC bytes */
4799         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4800                           nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4801
4802         ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4803                            &oes->rx_discards, &nes->rx_discards);
4804         /* GLV_REPC not supported */
4805         /* GLV_RMPC not supported */
4806         ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4807                            &oes->rx_unknown_protocol,
4808                            &nes->rx_unknown_protocol);
4809         ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4810                            vsi->offset_loaded, &oes->tx_bytes,
4811                            &nes->tx_bytes);
4812         ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4813                            vsi->offset_loaded, &oes->tx_unicast,
4814                            &nes->tx_unicast);
4815         ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4816                            vsi->offset_loaded, &oes->tx_multicast,
4817                            &nes->tx_multicast);
4818         ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4819                            vsi->offset_loaded,  &oes->tx_broadcast,
4820                            &nes->tx_broadcast);
4821         /* GLV_TDPC not supported */
4822         ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4823                            &oes->tx_errors, &nes->tx_errors);
4824         /* enlarge the limitation when tx_bytes overflowed */
4825         if (vsi->offset_loaded) {
4826                 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
4827                         nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4828                 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
4829         }
4830         vsi->old_tx_bytes = nes->tx_bytes;
4831         vsi->offset_loaded = true;
4832
4833         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4834                     vsi->vsi_id);
4835         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
4836         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
4837         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
4838         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
4839         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
4840         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4841                     nes->rx_unknown_protocol);
4842         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
4843         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
4844         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
4845         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
4846         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
4847         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
4848         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4849                     vsi->vsi_id);
4850 }
4851
4852 static void
4853 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4854 {
4855         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4856         struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4857
4858         /* Get statistics of struct ice_eth_stats */
4859         ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4860                            GLPRT_GORCL(hw->port_info->lport),
4861                            pf->offset_loaded, &os->eth.rx_bytes,
4862                            &ns->eth.rx_bytes);
4863         ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4864                            GLPRT_UPRCL(hw->port_info->lport),
4865                            pf->offset_loaded, &os->eth.rx_unicast,
4866                            &ns->eth.rx_unicast);
4867         ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4868                            GLPRT_MPRCL(hw->port_info->lport),
4869                            pf->offset_loaded, &os->eth.rx_multicast,
4870                            &ns->eth.rx_multicast);
4871         ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4872                            GLPRT_BPRCL(hw->port_info->lport),
4873                            pf->offset_loaded, &os->eth.rx_broadcast,
4874                            &ns->eth.rx_broadcast);
4875         ice_stat_update_32(hw, PRTRPB_RDPC,
4876                            pf->offset_loaded, &os->eth.rx_discards,
4877                            &ns->eth.rx_discards);
4878         /* enlarge the limitation when rx_bytes overflowed */
4879         if (pf->offset_loaded) {
4880                 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
4881                         ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4882                 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
4883         }
4884         pf->old_rx_bytes = ns->eth.rx_bytes;
4885
4886         /* Workaround: CRC size should not be included in byte statistics,
4887          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4888          * packet.
4889          */
4890         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4891                              ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4892
4893         /* GLPRT_REPC not supported */
4894         /* GLPRT_RMPC not supported */
4895         ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4896                            pf->offset_loaded,
4897                            &os->eth.rx_unknown_protocol,
4898                            &ns->eth.rx_unknown_protocol);
4899         ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4900                            GLPRT_GOTCL(hw->port_info->lport),
4901                            pf->offset_loaded, &os->eth.tx_bytes,
4902                            &ns->eth.tx_bytes);
4903         ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4904                            GLPRT_UPTCL(hw->port_info->lport),
4905                            pf->offset_loaded, &os->eth.tx_unicast,
4906                            &ns->eth.tx_unicast);
4907         ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4908                            GLPRT_MPTCL(hw->port_info->lport),
4909                            pf->offset_loaded, &os->eth.tx_multicast,
4910                            &ns->eth.tx_multicast);
4911         ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4912                            GLPRT_BPTCL(hw->port_info->lport),
4913                            pf->offset_loaded, &os->eth.tx_broadcast,
4914                            &ns->eth.tx_broadcast);
4915         /* enlarge the limitation when tx_bytes overflowed */
4916         if (pf->offset_loaded) {
4917                 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
4918                         ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4919                 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
4920         }
4921         pf->old_tx_bytes = ns->eth.tx_bytes;
4922         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4923                              ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4924
4925         /* GLPRT_TEPC not supported */
4926
4927         /* additional port specific stats */
4928         ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4929                            pf->offset_loaded, &os->tx_dropped_link_down,
4930                            &ns->tx_dropped_link_down);
4931         ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4932                            pf->offset_loaded, &os->crc_errors,
4933                            &ns->crc_errors);
4934         ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4935                            pf->offset_loaded, &os->illegal_bytes,
4936                            &ns->illegal_bytes);
4937         /* GLPRT_ERRBC not supported */
4938         ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4939                            pf->offset_loaded, &os->mac_local_faults,
4940                            &ns->mac_local_faults);
4941         ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4942                            pf->offset_loaded, &os->mac_remote_faults,
4943                            &ns->mac_remote_faults);
4944
4945         ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4946                            pf->offset_loaded, &os->rx_len_errors,
4947                            &ns->rx_len_errors);
4948
4949         ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4950                            pf->offset_loaded, &os->link_xon_rx,
4951                            &ns->link_xon_rx);
4952         ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
4953                            pf->offset_loaded, &os->link_xoff_rx,
4954                            &ns->link_xoff_rx);
4955         ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
4956                            pf->offset_loaded, &os->link_xon_tx,
4957                            &ns->link_xon_tx);
4958         ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
4959                            pf->offset_loaded, &os->link_xoff_tx,
4960                            &ns->link_xoff_tx);
4961         ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
4962                            GLPRT_PRC64L(hw->port_info->lport),
4963                            pf->offset_loaded, &os->rx_size_64,
4964                            &ns->rx_size_64);
4965         ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
4966                            GLPRT_PRC127L(hw->port_info->lport),
4967                            pf->offset_loaded, &os->rx_size_127,
4968                            &ns->rx_size_127);
4969         ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
4970                            GLPRT_PRC255L(hw->port_info->lport),
4971                            pf->offset_loaded, &os->rx_size_255,
4972                            &ns->rx_size_255);
4973         ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
4974                            GLPRT_PRC511L(hw->port_info->lport),
4975                            pf->offset_loaded, &os->rx_size_511,
4976                            &ns->rx_size_511);
4977         ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
4978                            GLPRT_PRC1023L(hw->port_info->lport),
4979                            pf->offset_loaded, &os->rx_size_1023,
4980                            &ns->rx_size_1023);
4981         ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
4982                            GLPRT_PRC1522L(hw->port_info->lport),
4983                            pf->offset_loaded, &os->rx_size_1522,
4984                            &ns->rx_size_1522);
4985         ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
4986                            GLPRT_PRC9522L(hw->port_info->lport),
4987                            pf->offset_loaded, &os->rx_size_big,
4988                            &ns->rx_size_big);
4989         ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
4990                            pf->offset_loaded, &os->rx_undersize,
4991                            &ns->rx_undersize);
4992         ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
4993                            pf->offset_loaded, &os->rx_fragments,
4994                            &ns->rx_fragments);
4995         ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
4996                            pf->offset_loaded, &os->rx_oversize,
4997                            &ns->rx_oversize);
4998         ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
4999                            pf->offset_loaded, &os->rx_jabber,
5000                            &ns->rx_jabber);
5001         ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
5002                            GLPRT_PTC64L(hw->port_info->lport),
5003                            pf->offset_loaded, &os->tx_size_64,
5004                            &ns->tx_size_64);
5005         ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
5006                            GLPRT_PTC127L(hw->port_info->lport),
5007                            pf->offset_loaded, &os->tx_size_127,
5008                            &ns->tx_size_127);
5009         ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
5010                            GLPRT_PTC255L(hw->port_info->lport),
5011                            pf->offset_loaded, &os->tx_size_255,
5012                            &ns->tx_size_255);
5013         ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
5014                            GLPRT_PTC511L(hw->port_info->lport),
5015                            pf->offset_loaded, &os->tx_size_511,
5016                            &ns->tx_size_511);
5017         ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
5018                            GLPRT_PTC1023L(hw->port_info->lport),
5019                            pf->offset_loaded, &os->tx_size_1023,
5020                            &ns->tx_size_1023);
5021         ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
5022                            GLPRT_PTC1522L(hw->port_info->lport),
5023                            pf->offset_loaded, &os->tx_size_1522,
5024                            &ns->tx_size_1522);
5025         ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
5026                            GLPRT_PTC9522L(hw->port_info->lport),
5027                            pf->offset_loaded, &os->tx_size_big,
5028                            &ns->tx_size_big);
5029
5030         /* GLPRT_MSPDC not supported */
5031         /* GLPRT_XEC not supported */
5032
5033         pf->offset_loaded = true;
5034
5035         if (pf->main_vsi)
5036                 ice_update_vsi_stats(pf->main_vsi);
5037 }
5038
5039 /* Get all statistics of a port */
5040 static int
5041 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
5042 {
5043         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5044         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5045         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5046
5047         /* call read registers - updates values, now write them to struct */
5048         ice_read_stats_registers(pf, hw);
5049
5050         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
5051                           pf->main_vsi->eth_stats.rx_multicast +
5052                           pf->main_vsi->eth_stats.rx_broadcast -
5053                           pf->main_vsi->eth_stats.rx_discards;
5054         stats->opackets = ns->eth.tx_unicast +
5055                           ns->eth.tx_multicast +
5056                           ns->eth.tx_broadcast;
5057         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
5058         stats->obytes   = ns->eth.tx_bytes;
5059         stats->oerrors  = ns->eth.tx_errors +
5060                           pf->main_vsi->eth_stats.tx_errors;
5061
5062         /* Rx Errors */
5063         stats->imissed  = ns->eth.rx_discards +
5064                           pf->main_vsi->eth_stats.rx_discards;
5065         stats->ierrors  = ns->crc_errors +
5066                           ns->rx_undersize +
5067                           ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
5068
5069         PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
5070         PMD_DRV_LOG(DEBUG, "rx_bytes:   %"PRIu64"", ns->eth.rx_bytes);
5071         PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
5072         PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
5073         PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
5074         PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
5075         PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
5076                     pf->main_vsi->eth_stats.rx_discards);
5077         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
5078                     ns->eth.rx_unknown_protocol);
5079         PMD_DRV_LOG(DEBUG, "tx_bytes:   %"PRIu64"", ns->eth.tx_bytes);
5080         PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
5081         PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
5082         PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
5083         PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
5084         PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
5085                     pf->main_vsi->eth_stats.tx_discards);
5086         PMD_DRV_LOG(DEBUG, "tx_errors:          %"PRIu64"", ns->eth.tx_errors);
5087
5088         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:       %"PRIu64"",
5089                     ns->tx_dropped_link_down);
5090         PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
5091         PMD_DRV_LOG(DEBUG, "illegal_bytes:      %"PRIu64"",
5092                     ns->illegal_bytes);
5093         PMD_DRV_LOG(DEBUG, "error_bytes:        %"PRIu64"", ns->error_bytes);
5094         PMD_DRV_LOG(DEBUG, "mac_local_faults:   %"PRIu64"",
5095                     ns->mac_local_faults);
5096         PMD_DRV_LOG(DEBUG, "mac_remote_faults:  %"PRIu64"",
5097                     ns->mac_remote_faults);
5098         PMD_DRV_LOG(DEBUG, "link_xon_rx:        %"PRIu64"", ns->link_xon_rx);
5099         PMD_DRV_LOG(DEBUG, "link_xoff_rx:       %"PRIu64"", ns->link_xoff_rx);
5100         PMD_DRV_LOG(DEBUG, "link_xon_tx:        %"PRIu64"", ns->link_xon_tx);
5101         PMD_DRV_LOG(DEBUG, "link_xoff_tx:       %"PRIu64"", ns->link_xoff_tx);
5102         PMD_DRV_LOG(DEBUG, "rx_size_64:         %"PRIu64"", ns->rx_size_64);
5103         PMD_DRV_LOG(DEBUG, "rx_size_127:        %"PRIu64"", ns->rx_size_127);
5104         PMD_DRV_LOG(DEBUG, "rx_size_255:        %"PRIu64"", ns->rx_size_255);
5105         PMD_DRV_LOG(DEBUG, "rx_size_511:        %"PRIu64"", ns->rx_size_511);
5106         PMD_DRV_LOG(DEBUG, "rx_size_1023:       %"PRIu64"", ns->rx_size_1023);
5107         PMD_DRV_LOG(DEBUG, "rx_size_1522:       %"PRIu64"", ns->rx_size_1522);
5108         PMD_DRV_LOG(DEBUG, "rx_size_big:        %"PRIu64"", ns->rx_size_big);
5109         PMD_DRV_LOG(DEBUG, "rx_undersize:       %"PRIu64"", ns->rx_undersize);
5110         PMD_DRV_LOG(DEBUG, "rx_fragments:       %"PRIu64"", ns->rx_fragments);
5111         PMD_DRV_LOG(DEBUG, "rx_oversize:        %"PRIu64"", ns->rx_oversize);
5112         PMD_DRV_LOG(DEBUG, "rx_jabber:          %"PRIu64"", ns->rx_jabber);
5113         PMD_DRV_LOG(DEBUG, "tx_size_64:         %"PRIu64"", ns->tx_size_64);
5114         PMD_DRV_LOG(DEBUG, "tx_size_127:        %"PRIu64"", ns->tx_size_127);
5115         PMD_DRV_LOG(DEBUG, "tx_size_255:        %"PRIu64"", ns->tx_size_255);
5116         PMD_DRV_LOG(DEBUG, "tx_size_511:        %"PRIu64"", ns->tx_size_511);
5117         PMD_DRV_LOG(DEBUG, "tx_size_1023:       %"PRIu64"", ns->tx_size_1023);
5118         PMD_DRV_LOG(DEBUG, "tx_size_1522:       %"PRIu64"", ns->tx_size_1522);
5119         PMD_DRV_LOG(DEBUG, "tx_size_big:        %"PRIu64"", ns->tx_size_big);
5120         PMD_DRV_LOG(DEBUG, "rx_len_errors:      %"PRIu64"", ns->rx_len_errors);
5121         PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
5122         return 0;
5123 }
5124
5125 /* Reset the statistics */
5126 static int
5127 ice_stats_reset(struct rte_eth_dev *dev)
5128 {
5129         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5130         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5131
5132         /* Mark PF and VSI stats to update the offset, aka "reset" */
5133         pf->offset_loaded = false;
5134         if (pf->main_vsi)
5135                 pf->main_vsi->offset_loaded = false;
5136
5137         /* read the stats, reading current register values into offset */
5138         ice_read_stats_registers(pf, hw);
5139
5140         return 0;
5141 }
5142
5143 static uint32_t
5144 ice_xstats_calc_num(void)
5145 {
5146         uint32_t num;
5147
5148         num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5149
5150         return num;
5151 }
5152
5153 static int
5154 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5155                unsigned int n)
5156 {
5157         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5158         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5159         unsigned int i;
5160         unsigned int count;
5161         struct ice_hw_port_stats *hw_stats = &pf->stats;
5162
5163         count = ice_xstats_calc_num();
5164         if (n < count)
5165                 return count;
5166
5167         ice_read_stats_registers(pf, hw);
5168
5169         if (!xstats)
5170                 return 0;
5171
5172         count = 0;
5173
5174         /* Get stats from ice_eth_stats struct */
5175         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5176                 xstats[count].value =
5177                         *(uint64_t *)((char *)&hw_stats->eth +
5178                                       ice_stats_strings[i].offset);
5179                 xstats[count].id = count;
5180                 count++;
5181         }
5182
5183         /* Get individiual stats from ice_hw_port struct */
5184         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5185                 xstats[count].value =
5186                         *(uint64_t *)((char *)hw_stats +
5187                                       ice_hw_port_strings[i].offset);
5188                 xstats[count].id = count;
5189                 count++;
5190         }
5191
5192         return count;
5193 }
5194
5195 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5196                                 struct rte_eth_xstat_name *xstats_names,
5197                                 __rte_unused unsigned int limit)
5198 {
5199         unsigned int count = 0;
5200         unsigned int i;
5201
5202         if (!xstats_names)
5203                 return ice_xstats_calc_num();
5204
5205         /* Note: limit checked in rte_eth_xstats_names() */
5206
5207         /* Get stats from ice_eth_stats struct */
5208         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5209                 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5210                         sizeof(xstats_names[count].name));
5211                 count++;
5212         }
5213
5214         /* Get individiual stats from ice_hw_port struct */
5215         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5216                 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5217                         sizeof(xstats_names[count].name));
5218                 count++;
5219         }
5220
5221         return count;
5222 }
5223
5224 static int
5225 ice_dev_flow_ops_get(struct rte_eth_dev *dev,
5226                      const struct rte_flow_ops **ops)
5227 {
5228         if (!dev)
5229                 return -EINVAL;
5230
5231         *ops = &ice_flow_ops;
5232         return 0;
5233 }
5234
5235 /* Add UDP tunneling port */
5236 static int
5237 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5238                              struct rte_eth_udp_tunnel *udp_tunnel)
5239 {
5240         int ret = 0;
5241         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5242
5243         if (udp_tunnel == NULL)
5244                 return -EINVAL;
5245
5246         switch (udp_tunnel->prot_type) {
5247         case RTE_TUNNEL_TYPE_VXLAN:
5248                 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5249                 break;
5250         default:
5251                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5252                 ret = -EINVAL;
5253                 break;
5254         }
5255
5256         return ret;
5257 }
5258
5259 /* Delete UDP tunneling port */
5260 static int
5261 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5262                              struct rte_eth_udp_tunnel *udp_tunnel)
5263 {
5264         int ret = 0;
5265         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5266
5267         if (udp_tunnel == NULL)
5268                 return -EINVAL;
5269
5270         switch (udp_tunnel->prot_type) {
5271         case RTE_TUNNEL_TYPE_VXLAN:
5272                 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5273                 break;
5274         default:
5275                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5276                 ret = -EINVAL;
5277                 break;
5278         }
5279
5280         return ret;
5281 }
5282
5283 static int
5284 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5285               struct rte_pci_device *pci_dev)
5286 {
5287         return rte_eth_dev_pci_generic_probe(pci_dev,
5288                                              sizeof(struct ice_adapter),
5289                                              ice_dev_init);
5290 }
5291
5292 static int
5293 ice_pci_remove(struct rte_pci_device *pci_dev)
5294 {
5295         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5296 }
5297
5298 static struct rte_pci_driver rte_ice_pmd = {
5299         .id_table = pci_id_ice_map,
5300         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5301         .probe = ice_pci_probe,
5302         .remove = ice_pci_remove,
5303 };
5304
5305 /**
5306  * Driver initialization routine.
5307  * Invoked once at EAL init time.
5308  * Register itself as the [Poll Mode] Driver of PCI devices.
5309  */
5310 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5311 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5312 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5313 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5314                               ICE_HW_DEBUG_MASK_ARG "=0xXXX"
5315                               ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5316                               ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5317                               ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
5318
5319 RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE);
5320 RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE);
5321 #ifdef RTE_ETHDEV_DEBUG_RX
5322 RTE_LOG_REGISTER_SUFFIX(ice_logtype_rx, rx, DEBUG);
5323 #endif
5324 #ifdef RTE_ETHDEV_DEBUG_TX
5325 RTE_LOG_REGISTER_SUFFIX(ice_logtype_tx, tx, DEBUG);
5326 #endif