net/ice: fix RSS hash update
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7
8 #include <stdio.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "base/ice_common.h"
17
18 #include "rte_pmd_ice.h"
19 #include "ice_ethdev.h"
20 #include "ice_rxtx.h"
21 #include "ice_generic_flow.h"
22
23 /* devargs */
24 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
25 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
26 #define ICE_PROTO_XTR_ARG         "proto_xtr"
27
28 static const char * const ice_valid_args[] = {
29         ICE_SAFE_MODE_SUPPORT_ARG,
30         ICE_PIPELINE_MODE_SUPPORT_ARG,
31         ICE_PROTO_XTR_ARG,
32         NULL
33 };
34
35 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
36         .name = "intel_pmd_dynfield_proto_xtr_metadata",
37         .size = sizeof(uint32_t),
38         .align = __alignof__(uint32_t),
39         .flags = 0,
40 };
41
42 struct proto_xtr_ol_flag {
43         const struct rte_mbuf_dynflag param;
44         uint64_t *ol_flag;
45         bool required;
46 };
47
48 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
49
50 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
51         [PROTO_XTR_VLAN] = {
52                 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
53                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
54         [PROTO_XTR_IPV4] = {
55                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
56                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
57         [PROTO_XTR_IPV6] = {
58                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
59                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
60         [PROTO_XTR_IPV6_FLOW] = {
61                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
62                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
63         [PROTO_XTR_TCP] = {
64                 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
65                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
66         [PROTO_XTR_IP_OFFSET] = {
67                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
68                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
69 };
70
71 #define ICE_OS_DEFAULT_PKG_NAME         "ICE OS Default Package"
72 #define ICE_COMMS_PKG_NAME                      "ICE COMMS Package"
73 #define ICE_MAX_RES_DESC_NUM        1024
74
75 static int ice_dev_configure(struct rte_eth_dev *dev);
76 static int ice_dev_start(struct rte_eth_dev *dev);
77 static int ice_dev_stop(struct rte_eth_dev *dev);
78 static int ice_dev_close(struct rte_eth_dev *dev);
79 static int ice_dev_reset(struct rte_eth_dev *dev);
80 static int ice_dev_info_get(struct rte_eth_dev *dev,
81                             struct rte_eth_dev_info *dev_info);
82 static int ice_link_update(struct rte_eth_dev *dev,
83                            int wait_to_complete);
84 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
85 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
86
87 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
88 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
89 static int ice_rss_reta_update(struct rte_eth_dev *dev,
90                                struct rte_eth_rss_reta_entry64 *reta_conf,
91                                uint16_t reta_size);
92 static int ice_rss_reta_query(struct rte_eth_dev *dev,
93                               struct rte_eth_rss_reta_entry64 *reta_conf,
94                               uint16_t reta_size);
95 static int ice_rss_hash_update(struct rte_eth_dev *dev,
96                                struct rte_eth_rss_conf *rss_conf);
97 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
98                                  struct rte_eth_rss_conf *rss_conf);
99 static int ice_promisc_enable(struct rte_eth_dev *dev);
100 static int ice_promisc_disable(struct rte_eth_dev *dev);
101 static int ice_allmulti_enable(struct rte_eth_dev *dev);
102 static int ice_allmulti_disable(struct rte_eth_dev *dev);
103 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
104                                uint16_t vlan_id,
105                                int on);
106 static int ice_macaddr_set(struct rte_eth_dev *dev,
107                            struct rte_ether_addr *mac_addr);
108 static int ice_macaddr_add(struct rte_eth_dev *dev,
109                            struct rte_ether_addr *mac_addr,
110                            __rte_unused uint32_t index,
111                            uint32_t pool);
112 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
113 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
114                                     uint16_t queue_id);
115 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
116                                      uint16_t queue_id);
117 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
118                               size_t fw_size);
119 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
120                              uint16_t pvid, int on);
121 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
122 static int ice_get_eeprom(struct rte_eth_dev *dev,
123                           struct rte_dev_eeprom_info *eeprom);
124 static int ice_stats_get(struct rte_eth_dev *dev,
125                          struct rte_eth_stats *stats);
126 static int ice_stats_reset(struct rte_eth_dev *dev);
127 static int ice_xstats_get(struct rte_eth_dev *dev,
128                           struct rte_eth_xstat *xstats, unsigned int n);
129 static int ice_xstats_get_names(struct rte_eth_dev *dev,
130                                 struct rte_eth_xstat_name *xstats_names,
131                                 unsigned int limit);
132 static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
133                         enum rte_filter_type filter_type,
134                         enum rte_filter_op filter_op,
135                         void *arg);
136 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
137                         struct rte_eth_udp_tunnel *udp_tunnel);
138 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
139                         struct rte_eth_udp_tunnel *udp_tunnel);
140
141 static const struct rte_pci_id pci_id_ice_map[] = {
142         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
143         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
144         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
145         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
146         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
147         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
148         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
149         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
150         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
151         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
152         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
153         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) },
154         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) },
155         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
156         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
157         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
158         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
159         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
160         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
161         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
162         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
163         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
164         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
165         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
166         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
167         { .vendor_id = 0, /* sentinel */ },
168 };
169
170 static const struct eth_dev_ops ice_eth_dev_ops = {
171         .dev_configure                = ice_dev_configure,
172         .dev_start                    = ice_dev_start,
173         .dev_stop                     = ice_dev_stop,
174         .dev_close                    = ice_dev_close,
175         .dev_reset                    = ice_dev_reset,
176         .dev_set_link_up              = ice_dev_set_link_up,
177         .dev_set_link_down            = ice_dev_set_link_down,
178         .rx_queue_start               = ice_rx_queue_start,
179         .rx_queue_stop                = ice_rx_queue_stop,
180         .tx_queue_start               = ice_tx_queue_start,
181         .tx_queue_stop                = ice_tx_queue_stop,
182         .rx_queue_setup               = ice_rx_queue_setup,
183         .rx_queue_release             = ice_rx_queue_release,
184         .tx_queue_setup               = ice_tx_queue_setup,
185         .tx_queue_release             = ice_tx_queue_release,
186         .dev_infos_get                = ice_dev_info_get,
187         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
188         .link_update                  = ice_link_update,
189         .mtu_set                      = ice_mtu_set,
190         .mac_addr_set                 = ice_macaddr_set,
191         .mac_addr_add                 = ice_macaddr_add,
192         .mac_addr_remove              = ice_macaddr_remove,
193         .vlan_filter_set              = ice_vlan_filter_set,
194         .vlan_offload_set             = ice_vlan_offload_set,
195         .reta_update                  = ice_rss_reta_update,
196         .reta_query                   = ice_rss_reta_query,
197         .rss_hash_update              = ice_rss_hash_update,
198         .rss_hash_conf_get            = ice_rss_hash_conf_get,
199         .promiscuous_enable           = ice_promisc_enable,
200         .promiscuous_disable          = ice_promisc_disable,
201         .allmulticast_enable          = ice_allmulti_enable,
202         .allmulticast_disable         = ice_allmulti_disable,
203         .rx_queue_intr_enable         = ice_rx_queue_intr_enable,
204         .rx_queue_intr_disable        = ice_rx_queue_intr_disable,
205         .fw_version_get               = ice_fw_version_get,
206         .vlan_pvid_set                = ice_vlan_pvid_set,
207         .rxq_info_get                 = ice_rxq_info_get,
208         .txq_info_get                 = ice_txq_info_get,
209         .rx_burst_mode_get            = ice_rx_burst_mode_get,
210         .tx_burst_mode_get            = ice_tx_burst_mode_get,
211         .get_eeprom_length            = ice_get_eeprom_length,
212         .get_eeprom                   = ice_get_eeprom,
213         .stats_get                    = ice_stats_get,
214         .stats_reset                  = ice_stats_reset,
215         .xstats_get                   = ice_xstats_get,
216         .xstats_get_names             = ice_xstats_get_names,
217         .xstats_reset                 = ice_stats_reset,
218         .filter_ctrl                  = ice_dev_filter_ctrl,
219         .udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
220         .udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
221         .tx_done_cleanup              = ice_tx_done_cleanup,
222         .get_monitor_addr             = ice_get_monitor_addr,
223 };
224
225 /* store statistics names and its offset in stats structure */
226 struct ice_xstats_name_off {
227         char name[RTE_ETH_XSTATS_NAME_SIZE];
228         unsigned int offset;
229 };
230
231 static const struct ice_xstats_name_off ice_stats_strings[] = {
232         {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
233         {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
234         {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
235         {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
236         {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
237                 rx_unknown_protocol)},
238         {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
239         {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
240         {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
241         {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
242 };
243
244 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
245                 sizeof(ice_stats_strings[0]))
246
247 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
248         {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
249                 tx_dropped_link_down)},
250         {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
251         {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
252                 illegal_bytes)},
253         {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
254         {"mac_local_errors", offsetof(struct ice_hw_port_stats,
255                 mac_local_faults)},
256         {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
257                 mac_remote_faults)},
258         {"rx_len_errors", offsetof(struct ice_hw_port_stats,
259                 rx_len_errors)},
260         {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
261         {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
262         {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
263         {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
264         {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
265         {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
266                 rx_size_127)},
267         {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
268                 rx_size_255)},
269         {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
270                 rx_size_511)},
271         {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
272                 rx_size_1023)},
273         {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
274                 rx_size_1522)},
275         {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
276                 rx_size_big)},
277         {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
278                 rx_undersize)},
279         {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
280                 rx_oversize)},
281         {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
282                 mac_short_pkt_dropped)},
283         {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
284                 rx_fragments)},
285         {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
286         {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
287         {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
288                 tx_size_127)},
289         {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
290                 tx_size_255)},
291         {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
292                 tx_size_511)},
293         {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
294                 tx_size_1023)},
295         {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
296                 tx_size_1522)},
297         {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
298                 tx_size_big)},
299 };
300
301 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
302                 sizeof(ice_hw_port_strings[0]))
303
304 static void
305 ice_init_controlq_parameter(struct ice_hw *hw)
306 {
307         /* fields for adminq */
308         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
309         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
310         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
311         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
312
313         /* fields for mailboxq, DPDK used as PF host */
314         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
315         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
316         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
317         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
318 }
319
320 static int
321 lookup_proto_xtr_type(const char *xtr_name)
322 {
323         static struct {
324                 const char *name;
325                 enum proto_xtr_type type;
326         } xtr_type_map[] = {
327                 { "vlan",      PROTO_XTR_VLAN      },
328                 { "ipv4",      PROTO_XTR_IPV4      },
329                 { "ipv6",      PROTO_XTR_IPV6      },
330                 { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
331                 { "tcp",       PROTO_XTR_TCP       },
332                 { "ip_offset", PROTO_XTR_IP_OFFSET },
333         };
334         uint32_t i;
335
336         for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
337                 if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
338                         return xtr_type_map[i].type;
339         }
340
341         return -1;
342 }
343
344 /*
345  * Parse elem, the elem could be single number/range or '(' ')' group
346  * 1) A single number elem, it's just a simple digit. e.g. 9
347  * 2) A single range elem, two digits with a '-' between. e.g. 2-6
348  * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
349  *    Within group elem, '-' used for a range separator;
350  *                       ',' used for a single number.
351  */
352 static int
353 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
354 {
355         const char *str = input;
356         char *end = NULL;
357         uint32_t min, max;
358         uint32_t idx;
359
360         while (isblank(*str))
361                 str++;
362
363         if (!isdigit(*str) && *str != '(')
364                 return -1;
365
366         /* process single number or single range of number */
367         if (*str != '(') {
368                 errno = 0;
369                 idx = strtoul(str, &end, 10);
370                 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
371                         return -1;
372
373                 while (isblank(*end))
374                         end++;
375
376                 min = idx;
377                 max = idx;
378
379                 /* process single <number>-<number> */
380                 if (*end == '-') {
381                         end++;
382                         while (isblank(*end))
383                                 end++;
384                         if (!isdigit(*end))
385                                 return -1;
386
387                         errno = 0;
388                         idx = strtoul(end, &end, 10);
389                         if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
390                                 return -1;
391
392                         max = idx;
393                         while (isblank(*end))
394                                 end++;
395                 }
396
397                 if (*end != ':')
398                         return -1;
399
400                 for (idx = RTE_MIN(min, max);
401                      idx <= RTE_MAX(min, max); idx++)
402                         devargs->proto_xtr[idx] = xtr_type;
403
404                 return 0;
405         }
406
407         /* process set within bracket */
408         str++;
409         while (isblank(*str))
410                 str++;
411         if (*str == '\0')
412                 return -1;
413
414         min = ICE_MAX_QUEUE_NUM;
415         do {
416                 /* go ahead to the first digit */
417                 while (isblank(*str))
418                         str++;
419                 if (!isdigit(*str))
420                         return -1;
421
422                 /* get the digit value */
423                 errno = 0;
424                 idx = strtoul(str, &end, 10);
425                 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
426                         return -1;
427
428                 /* go ahead to separator '-',',' and ')' */
429                 while (isblank(*end))
430                         end++;
431                 if (*end == '-') {
432                         if (min == ICE_MAX_QUEUE_NUM)
433                                 min = idx;
434                         else /* avoid continuous '-' */
435                                 return -1;
436                 } else if (*end == ',' || *end == ')') {
437                         max = idx;
438                         if (min == ICE_MAX_QUEUE_NUM)
439                                 min = idx;
440
441                         for (idx = RTE_MIN(min, max);
442                              idx <= RTE_MAX(min, max); idx++)
443                                 devargs->proto_xtr[idx] = xtr_type;
444
445                         min = ICE_MAX_QUEUE_NUM;
446                 } else {
447                         return -1;
448                 }
449
450                 str = end + 1;
451         } while (*end != ')' && *end != '\0');
452
453         return 0;
454 }
455
456 static int
457 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
458 {
459         const char *queue_start;
460         uint32_t idx;
461         int xtr_type;
462         char xtr_name[32];
463
464         while (isblank(*queues))
465                 queues++;
466
467         if (*queues != '[') {
468                 xtr_type = lookup_proto_xtr_type(queues);
469                 if (xtr_type < 0)
470                         return -1;
471
472                 devargs->proto_xtr_dflt = xtr_type;
473
474                 return 0;
475         }
476
477         queues++;
478         do {
479                 while (isblank(*queues))
480                         queues++;
481                 if (*queues == '\0')
482                         return -1;
483
484                 queue_start = queues;
485
486                 /* go across a complete bracket */
487                 if (*queue_start == '(') {
488                         queues += strcspn(queues, ")");
489                         if (*queues != ')')
490                                 return -1;
491                 }
492
493                 /* scan the separator ':' */
494                 queues += strcspn(queues, ":");
495                 if (*queues++ != ':')
496                         return -1;
497                 while (isblank(*queues))
498                         queues++;
499
500                 for (idx = 0; ; idx++) {
501                         if (isblank(queues[idx]) ||
502                             queues[idx] == ',' ||
503                             queues[idx] == ']' ||
504                             queues[idx] == '\0')
505                                 break;
506
507                         if (idx > sizeof(xtr_name) - 2)
508                                 return -1;
509
510                         xtr_name[idx] = queues[idx];
511                 }
512                 xtr_name[idx] = '\0';
513                 xtr_type = lookup_proto_xtr_type(xtr_name);
514                 if (xtr_type < 0)
515                         return -1;
516
517                 queues += idx;
518
519                 while (isblank(*queues) || *queues == ',' || *queues == ']')
520                         queues++;
521
522                 if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
523                         return -1;
524         } while (*queues != '\0');
525
526         return 0;
527 }
528
529 static int
530 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
531                      void *extra_args)
532 {
533         struct ice_devargs *devargs = extra_args;
534
535         if (value == NULL || extra_args == NULL)
536                 return -EINVAL;
537
538         if (parse_queue_proto_xtr(value, devargs) < 0) {
539                 PMD_DRV_LOG(ERR,
540                             "The protocol extraction parameter is wrong : '%s'",
541                             value);
542                 return -1;
543         }
544
545         return 0;
546 }
547
548 static void
549 ice_check_proto_xtr_support(struct ice_hw *hw)
550 {
551 #define FLX_REG(val, fld, idx) \
552         (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
553          GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
554         static struct {
555                 uint32_t rxdid;
556                 uint8_t opcode;
557                 uint8_t protid_0;
558                 uint8_t protid_1;
559         } xtr_sets[] = {
560                 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
561                                      ICE_RX_OPC_EXTRACT,
562                                      ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
563                 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
564                                      ICE_RX_OPC_EXTRACT,
565                                      ICE_PROT_IPV4_OF_OR_S,
566                                      ICE_PROT_IPV4_OF_OR_S },
567                 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
568                                      ICE_RX_OPC_EXTRACT,
569                                      ICE_PROT_IPV6_OF_OR_S,
570                                      ICE_PROT_IPV6_OF_OR_S },
571                 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
572                                           ICE_RX_OPC_EXTRACT,
573                                           ICE_PROT_IPV6_OF_OR_S,
574                                           ICE_PROT_IPV6_OF_OR_S },
575                 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
576                                     ICE_RX_OPC_EXTRACT,
577                                     ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
578                 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
579                                           ICE_RX_OPC_PROTID,
580                                           ICE_PROT_IPV4_OF_OR_S,
581                                           ICE_PROT_IPV6_OF_OR_S },
582         };
583         uint32_t i;
584
585         for (i = 0; i < RTE_DIM(xtr_sets); i++) {
586                 uint32_t rxdid = xtr_sets[i].rxdid;
587                 uint32_t v;
588
589                 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
590                         v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
591
592                         if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
593                             FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
594                                 ice_proto_xtr_hw_support[i] = true;
595                 }
596
597                 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
598                         v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
599
600                         if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
601                             FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
602                                 ice_proto_xtr_hw_support[i] = true;
603                 }
604         }
605 }
606
607 static int
608 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
609                   uint32_t num)
610 {
611         struct pool_entry *entry;
612
613         if (!pool || !num)
614                 return -EINVAL;
615
616         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
617         if (!entry) {
618                 PMD_INIT_LOG(ERR,
619                              "Failed to allocate memory for resource pool");
620                 return -ENOMEM;
621         }
622
623         /* queue heap initialize */
624         pool->num_free = num;
625         pool->num_alloc = 0;
626         pool->base = base;
627         LIST_INIT(&pool->alloc_list);
628         LIST_INIT(&pool->free_list);
629
630         /* Initialize element  */
631         entry->base = 0;
632         entry->len = num;
633
634         LIST_INSERT_HEAD(&pool->free_list, entry, next);
635         return 0;
636 }
637
638 static int
639 ice_res_pool_alloc(struct ice_res_pool_info *pool,
640                    uint16_t num)
641 {
642         struct pool_entry *entry, *valid_entry;
643
644         if (!pool || !num) {
645                 PMD_INIT_LOG(ERR, "Invalid parameter");
646                 return -EINVAL;
647         }
648
649         if (pool->num_free < num) {
650                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
651                              num, pool->num_free);
652                 return -ENOMEM;
653         }
654
655         valid_entry = NULL;
656         /* Lookup  in free list and find most fit one */
657         LIST_FOREACH(entry, &pool->free_list, next) {
658                 if (entry->len >= num) {
659                         /* Find best one */
660                         if (entry->len == num) {
661                                 valid_entry = entry;
662                                 break;
663                         }
664                         if (!valid_entry ||
665                             valid_entry->len > entry->len)
666                                 valid_entry = entry;
667                 }
668         }
669
670         /* Not find one to satisfy the request, return */
671         if (!valid_entry) {
672                 PMD_INIT_LOG(ERR, "No valid entry found");
673                 return -ENOMEM;
674         }
675         /**
676          * The entry have equal queue number as requested,
677          * remove it from alloc_list.
678          */
679         if (valid_entry->len == num) {
680                 LIST_REMOVE(valid_entry, next);
681         } else {
682                 /**
683                  * The entry have more numbers than requested,
684                  * create a new entry for alloc_list and minus its
685                  * queue base and number in free_list.
686                  */
687                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
688                 if (!entry) {
689                         PMD_INIT_LOG(ERR,
690                                      "Failed to allocate memory for "
691                                      "resource pool");
692                         return -ENOMEM;
693                 }
694                 entry->base = valid_entry->base;
695                 entry->len = num;
696                 valid_entry->base += num;
697                 valid_entry->len -= num;
698                 valid_entry = entry;
699         }
700
701         /* Insert it into alloc list, not sorted */
702         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
703
704         pool->num_free -= valid_entry->len;
705         pool->num_alloc += valid_entry->len;
706
707         return valid_entry->base + pool->base;
708 }
709
710 static void
711 ice_res_pool_destroy(struct ice_res_pool_info *pool)
712 {
713         struct pool_entry *entry, *next_entry;
714
715         if (!pool)
716                 return;
717
718         for (entry = LIST_FIRST(&pool->alloc_list);
719              entry && (next_entry = LIST_NEXT(entry, next), 1);
720              entry = next_entry) {
721                 LIST_REMOVE(entry, next);
722                 rte_free(entry);
723         }
724
725         for (entry = LIST_FIRST(&pool->free_list);
726              entry && (next_entry = LIST_NEXT(entry, next), 1);
727              entry = next_entry) {
728                 LIST_REMOVE(entry, next);
729                 rte_free(entry);
730         }
731
732         pool->num_free = 0;
733         pool->num_alloc = 0;
734         pool->base = 0;
735         LIST_INIT(&pool->alloc_list);
736         LIST_INIT(&pool->free_list);
737 }
738
739 static void
740 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
741 {
742         /* Set VSI LUT selection */
743         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
744                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
745         /* Set Hash scheme */
746         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
747                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
748         /* enable TC */
749         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
750 }
751
752 static enum ice_status
753 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
754                                 struct ice_aqc_vsi_props *info,
755                                 uint8_t enabled_tcmap)
756 {
757         uint16_t bsf, qp_idx;
758
759         /* default tc 0 now. Multi-TC supporting need to be done later.
760          * Configure TC and queue mapping parameters, for enabled TC,
761          * allocate qpnum_per_tc queues to this traffic.
762          */
763         if (enabled_tcmap != 0x01) {
764                 PMD_INIT_LOG(ERR, "only TC0 is supported");
765                 return -ENOTSUP;
766         }
767
768         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
769         bsf = rte_bsf32(vsi->nb_qps);
770         /* Adjust the queue number to actual queues that can be applied */
771         vsi->nb_qps = 0x1 << bsf;
772
773         qp_idx = 0;
774         /* Set tc and queue mapping with VSI */
775         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
776                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
777                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
778
779         /* Associate queue number with VSI */
780         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
781         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
782         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
783         info->valid_sections |=
784                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
785         /* Set the info.ingress_table and info.egress_table
786          * for UP translate table. Now just set it to 1:1 map by default
787          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
788          */
789 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
790         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
791         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
792         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
793         return 0;
794 }
795
796 static int
797 ice_init_mac_address(struct rte_eth_dev *dev)
798 {
799         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
800
801         if (!rte_is_unicast_ether_addr
802                 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
803                 PMD_INIT_LOG(ERR, "Invalid MAC address");
804                 return -EINVAL;
805         }
806
807         rte_ether_addr_copy(
808                 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
809                 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
810
811         dev->data->mac_addrs =
812                 rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0);
813         if (!dev->data->mac_addrs) {
814                 PMD_INIT_LOG(ERR,
815                              "Failed to allocate memory to store mac address");
816                 return -ENOMEM;
817         }
818         /* store it to dev data */
819         rte_ether_addr_copy(
820                 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
821                 &dev->data->mac_addrs[0]);
822         return 0;
823 }
824
825 /* Find out specific MAC filter */
826 static struct ice_mac_filter *
827 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
828 {
829         struct ice_mac_filter *f;
830
831         TAILQ_FOREACH(f, &vsi->mac_list, next) {
832                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
833                         return f;
834         }
835
836         return NULL;
837 }
838
839 static int
840 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
841 {
842         struct ice_fltr_list_entry *m_list_itr = NULL;
843         struct ice_mac_filter *f;
844         struct LIST_HEAD_TYPE list_head;
845         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
846         int ret = 0;
847
848         /* If it's added and configured, return */
849         f = ice_find_mac_filter(vsi, mac_addr);
850         if (f) {
851                 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
852                 return 0;
853         }
854
855         INIT_LIST_HEAD(&list_head);
856
857         m_list_itr = (struct ice_fltr_list_entry *)
858                 ice_malloc(hw, sizeof(*m_list_itr));
859         if (!m_list_itr) {
860                 ret = -ENOMEM;
861                 goto DONE;
862         }
863         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
864                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
865         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
866         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
867         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
868         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
869         m_list_itr->fltr_info.vsi_handle = vsi->idx;
870
871         LIST_ADD(&m_list_itr->list_entry, &list_head);
872
873         /* Add the mac */
874         ret = ice_add_mac(hw, &list_head);
875         if (ret != ICE_SUCCESS) {
876                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
877                 ret = -EINVAL;
878                 goto DONE;
879         }
880         /* Add the mac addr into mac list */
881         f = rte_zmalloc(NULL, sizeof(*f), 0);
882         if (!f) {
883                 PMD_DRV_LOG(ERR, "failed to allocate memory");
884                 ret = -ENOMEM;
885                 goto DONE;
886         }
887         rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
888         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
889         vsi->mac_num++;
890
891         ret = 0;
892
893 DONE:
894         rte_free(m_list_itr);
895         return ret;
896 }
897
898 static int
899 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
900 {
901         struct ice_fltr_list_entry *m_list_itr = NULL;
902         struct ice_mac_filter *f;
903         struct LIST_HEAD_TYPE list_head;
904         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
905         int ret = 0;
906
907         /* Can't find it, return an error */
908         f = ice_find_mac_filter(vsi, mac_addr);
909         if (!f)
910                 return -EINVAL;
911
912         INIT_LIST_HEAD(&list_head);
913
914         m_list_itr = (struct ice_fltr_list_entry *)
915                 ice_malloc(hw, sizeof(*m_list_itr));
916         if (!m_list_itr) {
917                 ret = -ENOMEM;
918                 goto DONE;
919         }
920         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
921                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
922         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
923         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
924         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
925         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
926         m_list_itr->fltr_info.vsi_handle = vsi->idx;
927
928         LIST_ADD(&m_list_itr->list_entry, &list_head);
929
930         /* remove the mac filter */
931         ret = ice_remove_mac(hw, &list_head);
932         if (ret != ICE_SUCCESS) {
933                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
934                 ret = -EINVAL;
935                 goto DONE;
936         }
937
938         /* Remove the mac addr from mac list */
939         TAILQ_REMOVE(&vsi->mac_list, f, next);
940         rte_free(f);
941         vsi->mac_num--;
942
943         ret = 0;
944 DONE:
945         rte_free(m_list_itr);
946         return ret;
947 }
948
949 /* Find out specific VLAN filter */
950 static struct ice_vlan_filter *
951 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
952 {
953         struct ice_vlan_filter *f;
954
955         TAILQ_FOREACH(f, &vsi->vlan_list, next) {
956                 if (vlan->tpid == f->vlan_info.vlan.tpid &&
957                     vlan->vid == f->vlan_info.vlan.vid)
958                         return f;
959         }
960
961         return NULL;
962 }
963
964 static int
965 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
966 {
967         struct ice_fltr_list_entry *v_list_itr = NULL;
968         struct ice_vlan_filter *f;
969         struct LIST_HEAD_TYPE list_head;
970         struct ice_hw *hw;
971         int ret = 0;
972
973         if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
974                 return -EINVAL;
975
976         hw = ICE_VSI_TO_HW(vsi);
977
978         /* If it's added and configured, return. */
979         f = ice_find_vlan_filter(vsi, vlan);
980         if (f) {
981                 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
982                 return 0;
983         }
984
985         if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
986                 return 0;
987
988         INIT_LIST_HEAD(&list_head);
989
990         v_list_itr = (struct ice_fltr_list_entry *)
991                       ice_malloc(hw, sizeof(*v_list_itr));
992         if (!v_list_itr) {
993                 ret = -ENOMEM;
994                 goto DONE;
995         }
996         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
997         v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
998         v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
999         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1000         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1001         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1002         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1003         v_list_itr->fltr_info.vsi_handle = vsi->idx;
1004
1005         LIST_ADD(&v_list_itr->list_entry, &list_head);
1006
1007         /* Add the vlan */
1008         ret = ice_add_vlan(hw, &list_head);
1009         if (ret != ICE_SUCCESS) {
1010                 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1011                 ret = -EINVAL;
1012                 goto DONE;
1013         }
1014
1015         /* Add vlan into vlan list */
1016         f = rte_zmalloc(NULL, sizeof(*f), 0);
1017         if (!f) {
1018                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1019                 ret = -ENOMEM;
1020                 goto DONE;
1021         }
1022         f->vlan_info.vlan.tpid = vlan->tpid;
1023         f->vlan_info.vlan.vid = vlan->vid;
1024         TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1025         vsi->vlan_num++;
1026
1027         ret = 0;
1028
1029 DONE:
1030         rte_free(v_list_itr);
1031         return ret;
1032 }
1033
1034 static int
1035 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1036 {
1037         struct ice_fltr_list_entry *v_list_itr = NULL;
1038         struct ice_vlan_filter *f;
1039         struct LIST_HEAD_TYPE list_head;
1040         struct ice_hw *hw;
1041         int ret = 0;
1042
1043         if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1044                 return -EINVAL;
1045
1046         hw = ICE_VSI_TO_HW(vsi);
1047
1048         /* Can't find it, return an error */
1049         f = ice_find_vlan_filter(vsi, vlan);
1050         if (!f)
1051                 return -EINVAL;
1052
1053         INIT_LIST_HEAD(&list_head);
1054
1055         v_list_itr = (struct ice_fltr_list_entry *)
1056                       ice_malloc(hw, sizeof(*v_list_itr));
1057         if (!v_list_itr) {
1058                 ret = -ENOMEM;
1059                 goto DONE;
1060         }
1061
1062         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1063         v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1064         v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1065         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1066         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1067         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1068         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1069         v_list_itr->fltr_info.vsi_handle = vsi->idx;
1070
1071         LIST_ADD(&v_list_itr->list_entry, &list_head);
1072
1073         /* remove the vlan filter */
1074         ret = ice_remove_vlan(hw, &list_head);
1075         if (ret != ICE_SUCCESS) {
1076                 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1077                 ret = -EINVAL;
1078                 goto DONE;
1079         }
1080
1081         /* Remove the vlan id from vlan list */
1082         TAILQ_REMOVE(&vsi->vlan_list, f, next);
1083         rte_free(f);
1084         vsi->vlan_num--;
1085
1086         ret = 0;
1087 DONE:
1088         rte_free(v_list_itr);
1089         return ret;
1090 }
1091
1092 static int
1093 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1094 {
1095         struct ice_mac_filter *m_f;
1096         struct ice_vlan_filter *v_f;
1097         int ret = 0;
1098
1099         if (!vsi || !vsi->mac_num)
1100                 return -EINVAL;
1101
1102         TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
1103                 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1104                 if (ret != ICE_SUCCESS) {
1105                         ret = -EINVAL;
1106                         goto DONE;
1107                 }
1108         }
1109
1110         if (vsi->vlan_num == 0)
1111                 return 0;
1112
1113         TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
1114                 ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan);
1115                 if (ret != ICE_SUCCESS) {
1116                         ret = -EINVAL;
1117                         goto DONE;
1118                 }
1119         }
1120
1121 DONE:
1122         return ret;
1123 }
1124
1125 /* Enable IRQ0 */
1126 static void
1127 ice_pf_enable_irq0(struct ice_hw *hw)
1128 {
1129         /* reset the registers */
1130         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1131         ICE_READ_REG(hw, PFINT_OICR);
1132
1133 #ifdef ICE_LSE_SPT
1134         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1135                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1136                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1137
1138         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1139                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1140                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1141                        PFINT_OICR_CTL_ITR_INDX_M) |
1142                       PFINT_OICR_CTL_CAUSE_ENA_M);
1143
1144         ICE_WRITE_REG(hw, PFINT_FW_CTL,
1145                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1146                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1147                        PFINT_FW_CTL_ITR_INDX_M) |
1148                       PFINT_FW_CTL_CAUSE_ENA_M);
1149 #else
1150         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1151 #endif
1152
1153         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1154                       GLINT_DYN_CTL_INTENA_M |
1155                       GLINT_DYN_CTL_CLEARPBA_M |
1156                       GLINT_DYN_CTL_ITR_INDX_M);
1157
1158         ice_flush(hw);
1159 }
1160
1161 /* Disable IRQ0 */
1162 static void
1163 ice_pf_disable_irq0(struct ice_hw *hw)
1164 {
1165         /* Disable all interrupt types */
1166         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1167         ice_flush(hw);
1168 }
1169
1170 #ifdef ICE_LSE_SPT
1171 static void
1172 ice_handle_aq_msg(struct rte_eth_dev *dev)
1173 {
1174         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1175         struct ice_ctl_q_info *cq = &hw->adminq;
1176         struct ice_rq_event_info event;
1177         uint16_t pending, opcode;
1178         int ret;
1179
1180         event.buf_len = ICE_AQ_MAX_BUF_LEN;
1181         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1182         if (!event.msg_buf) {
1183                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
1184                 return;
1185         }
1186
1187         pending = 1;
1188         while (pending) {
1189                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1190
1191                 if (ret != ICE_SUCCESS) {
1192                         PMD_DRV_LOG(INFO,
1193                                     "Failed to read msg from AdminQ, "
1194                                     "adminq_err: %u",
1195                                     hw->adminq.sq_last_status);
1196                         break;
1197                 }
1198                 opcode = rte_le_to_cpu_16(event.desc.opcode);
1199
1200                 switch (opcode) {
1201                 case ice_aqc_opc_get_link_status:
1202                         ret = ice_link_update(dev, 0);
1203                         if (!ret)
1204                                 rte_eth_dev_callback_process
1205                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1206                         break;
1207                 default:
1208                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1209                                     opcode);
1210                         break;
1211                 }
1212         }
1213         rte_free(event.msg_buf);
1214 }
1215 #endif
1216
1217 /**
1218  * Interrupt handler triggered by NIC for handling
1219  * specific interrupt.
1220  *
1221  * @param handle
1222  *  Pointer to interrupt handle.
1223  * @param param
1224  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1225  *
1226  * @return
1227  *  void
1228  */
1229 static void
1230 ice_interrupt_handler(void *param)
1231 {
1232         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1233         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1234         uint32_t oicr;
1235         uint32_t reg;
1236         uint8_t pf_num;
1237         uint8_t event;
1238         uint16_t queue;
1239         int ret;
1240 #ifdef ICE_LSE_SPT
1241         uint32_t int_fw_ctl;
1242 #endif
1243
1244         /* Disable interrupt */
1245         ice_pf_disable_irq0(hw);
1246
1247         /* read out interrupt causes */
1248         oicr = ICE_READ_REG(hw, PFINT_OICR);
1249 #ifdef ICE_LSE_SPT
1250         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1251 #endif
1252
1253         /* No interrupt event indicated */
1254         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1255                 PMD_DRV_LOG(INFO, "No interrupt event");
1256                 goto done;
1257         }
1258
1259 #ifdef ICE_LSE_SPT
1260         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1261                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1262                 ice_handle_aq_msg(dev);
1263         }
1264 #else
1265         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1266                 PMD_DRV_LOG(INFO, "OICR: link state change event");
1267                 ret = ice_link_update(dev, 0);
1268                 if (!ret)
1269                         rte_eth_dev_callback_process
1270                                 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1271         }
1272 #endif
1273
1274         if (oicr & PFINT_OICR_MAL_DETECT_M) {
1275                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1276                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1277                 if (reg & GL_MDET_TX_PQM_VALID_M) {
1278                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1279                                  GL_MDET_TX_PQM_PF_NUM_S;
1280                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1281                                 GL_MDET_TX_PQM_MAL_TYPE_S;
1282                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1283                                 GL_MDET_TX_PQM_QNUM_S;
1284
1285                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1286                                     "%d by PQM on TX queue %d PF# %d",
1287                                     event, queue, pf_num);
1288                 }
1289
1290                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1291                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1292                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1293                                  GL_MDET_TX_TCLAN_PF_NUM_S;
1294                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1295                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1296                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1297                                 GL_MDET_TX_TCLAN_QNUM_S;
1298
1299                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1300                                     "%d by TCLAN on TX queue %d PF# %d",
1301                                     event, queue, pf_num);
1302                 }
1303         }
1304 done:
1305         /* Enable interrupt */
1306         ice_pf_enable_irq0(hw);
1307         rte_intr_ack(dev->intr_handle);
1308 }
1309
1310 static void
1311 ice_init_proto_xtr(struct rte_eth_dev *dev)
1312 {
1313         struct ice_adapter *ad =
1314                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1315         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1316         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1317         const struct proto_xtr_ol_flag *ol_flag;
1318         bool proto_xtr_enable = false;
1319         int offset;
1320         uint16_t i;
1321
1322         pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1323         if (unlikely(pf->proto_xtr == NULL)) {
1324                 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1325                 return;
1326         }
1327
1328         for (i = 0; i < pf->lan_nb_qps; i++) {
1329                 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1330                                    ad->devargs.proto_xtr[i] :
1331                                    ad->devargs.proto_xtr_dflt;
1332
1333                 if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1334                         uint8_t type = pf->proto_xtr[i];
1335
1336                         ice_proto_xtr_ol_flag_params[type].required = true;
1337                         proto_xtr_enable = true;
1338                 }
1339         }
1340
1341         if (likely(!proto_xtr_enable))
1342                 return;
1343
1344         ice_check_proto_xtr_support(hw);
1345
1346         offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1347         if (unlikely(offset == -1)) {
1348                 PMD_DRV_LOG(ERR,
1349                             "Protocol extraction metadata is disabled in mbuf with error %d",
1350                             -rte_errno);
1351                 return;
1352         }
1353
1354         PMD_DRV_LOG(DEBUG,
1355                     "Protocol extraction metadata offset in mbuf is : %d",
1356                     offset);
1357         rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1358
1359         for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1360                 ol_flag = &ice_proto_xtr_ol_flag_params[i];
1361
1362                 if (!ol_flag->required)
1363                         continue;
1364
1365                 if (!ice_proto_xtr_hw_support[i]) {
1366                         PMD_DRV_LOG(ERR,
1367                                     "Protocol extraction type %u is not supported in hardware",
1368                                     i);
1369                         rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1370                         break;
1371                 }
1372
1373                 offset = rte_mbuf_dynflag_register(&ol_flag->param);
1374                 if (unlikely(offset == -1)) {
1375                         PMD_DRV_LOG(ERR,
1376                                     "Protocol extraction offload '%s' failed to register with error %d",
1377                                     ol_flag->param.name, -rte_errno);
1378
1379                         rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1380                         break;
1381                 }
1382
1383                 PMD_DRV_LOG(DEBUG,
1384                             "Protocol extraction offload '%s' offset in mbuf is : %d",
1385                             ol_flag->param.name, offset);
1386                 *ol_flag->ol_flag = 1ULL << offset;
1387         }
1388 }
1389
1390 /*  Initialize SW parameters of PF */
1391 static int
1392 ice_pf_sw_init(struct rte_eth_dev *dev)
1393 {
1394         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1395         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1396
1397         pf->lan_nb_qp_max =
1398                 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1399                                   hw->func_caps.common_cap.num_rxq);
1400
1401         pf->lan_nb_qps = pf->lan_nb_qp_max;
1402
1403         ice_init_proto_xtr(dev);
1404
1405         if (hw->func_caps.fd_fltr_guar > 0 ||
1406             hw->func_caps.fd_fltr_best_effort > 0) {
1407                 pf->flags |= ICE_FLAG_FDIR;
1408                 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1409                 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1410         } else {
1411                 pf->fdir_nb_qps = 0;
1412         }
1413         pf->fdir_qp_offset = 0;
1414
1415         return 0;
1416 }
1417
1418 struct ice_vsi *
1419 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1420 {
1421         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1422         struct ice_vsi *vsi = NULL;
1423         struct ice_vsi_ctx vsi_ctx;
1424         int ret;
1425         struct rte_ether_addr broadcast = {
1426                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1427         struct rte_ether_addr mac_addr;
1428         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1429         uint8_t tc_bitmap = 0x1;
1430         uint16_t cfg;
1431
1432         /* hw->num_lports = 1 in NIC mode */
1433         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1434         if (!vsi)
1435                 return NULL;
1436
1437         vsi->idx = pf->next_vsi_idx;
1438         pf->next_vsi_idx++;
1439         vsi->type = type;
1440         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1441         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1442         vsi->vlan_anti_spoof_on = 0;
1443         vsi->vlan_filter_on = 1;
1444         TAILQ_INIT(&vsi->mac_list);
1445         TAILQ_INIT(&vsi->vlan_list);
1446
1447         /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1448         pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1449                         ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1450                         hw->func_caps.common_cap.rss_table_size;
1451         pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1452
1453         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1454         switch (type) {
1455         case ICE_VSI_PF:
1456                 vsi->nb_qps = pf->lan_nb_qps;
1457                 vsi->base_queue = 1;
1458                 ice_vsi_config_default_rss(&vsi_ctx.info);
1459                 vsi_ctx.alloc_from_pool = true;
1460                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1461                 /* switch_id is queried by get_switch_config aq, which is done
1462                  * by ice_init_hw
1463                  */
1464                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1465                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1466                 /* Allow all untagged or tagged packets */
1467                 vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
1468                 vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
1469                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1470                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1471                 if (ice_is_dvm_ena(hw)) {
1472                         vsi_ctx.info.outer_vlan_flags =
1473                                 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
1474                                  ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
1475                                 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
1476                         vsi_ctx.info.outer_vlan_flags |=
1477                                 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
1478                                  ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
1479                                 ICE_AQ_VSI_OUTER_TAG_TYPE_M;
1480                 }
1481
1482                 /* FDIR */
1483                 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1484                         ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1485                 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1486                 cfg = ICE_AQ_VSI_FD_ENABLE;
1487                 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1488                 vsi_ctx.info.max_fd_fltr_dedicated =
1489                         rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1490                 vsi_ctx.info.max_fd_fltr_shared =
1491                         rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1492
1493                 /* Enable VLAN/UP trip */
1494                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1495                                                       &vsi_ctx.info,
1496                                                       ICE_DEFAULT_TCMAP);
1497                 if (ret) {
1498                         PMD_INIT_LOG(ERR,
1499                                      "tc queue mapping with vsi failed, "
1500                                      "err = %d",
1501                                      ret);
1502                         goto fail_mem;
1503                 }
1504
1505                 break;
1506         case ICE_VSI_CTRL:
1507                 vsi->nb_qps = pf->fdir_nb_qps;
1508                 vsi->base_queue = ICE_FDIR_QUEUE_ID;
1509                 vsi_ctx.alloc_from_pool = true;
1510                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1511
1512                 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1513                 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1514                 cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1515                 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1516                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1517                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1518                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1519                                                       &vsi_ctx.info,
1520                                                       ICE_DEFAULT_TCMAP);
1521                 if (ret) {
1522                         PMD_INIT_LOG(ERR,
1523                                      "tc queue mapping with vsi failed, "
1524                                      "err = %d",
1525                                      ret);
1526                         goto fail_mem;
1527                 }
1528                 break;
1529         default:
1530                 /* for other types of VSI */
1531                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1532                 goto fail_mem;
1533         }
1534
1535         /* VF has MSIX interrupt in VF range, don't allocate here */
1536         if (type == ICE_VSI_PF) {
1537                 ret = ice_res_pool_alloc(&pf->msix_pool,
1538                                          RTE_MIN(vsi->nb_qps,
1539                                                  RTE_MAX_RXTX_INTR_VEC_ID));
1540                 if (ret < 0) {
1541                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1542                                      vsi->vsi_id, ret);
1543                 }
1544                 vsi->msix_intr = ret;
1545                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1546         } else if (type == ICE_VSI_CTRL) {
1547                 ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1548                 if (ret < 0) {
1549                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1550                                     vsi->vsi_id, ret);
1551                 }
1552                 vsi->msix_intr = ret;
1553                 vsi->nb_msix = 1;
1554         } else {
1555                 vsi->msix_intr = 0;
1556                 vsi->nb_msix = 0;
1557         }
1558         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1559         if (ret != ICE_SUCCESS) {
1560                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1561                 goto fail_mem;
1562         }
1563         /* store vsi information is SW structure */
1564         vsi->vsi_id = vsi_ctx.vsi_num;
1565         vsi->info = vsi_ctx.info;
1566         pf->vsis_allocated = vsi_ctx.vsis_allocd;
1567         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1568
1569         if (type == ICE_VSI_PF) {
1570                 /* MAC configuration */
1571                 rte_ether_addr_copy((struct rte_ether_addr *)
1572                                         hw->port_info->mac.perm_addr,
1573                                     &pf->dev_addr);
1574
1575                 rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1576                 ret = ice_add_mac_filter(vsi, &mac_addr);
1577                 if (ret != ICE_SUCCESS)
1578                         PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1579
1580                 rte_ether_addr_copy(&broadcast, &mac_addr);
1581                 ret = ice_add_mac_filter(vsi, &mac_addr);
1582                 if (ret != ICE_SUCCESS)
1583                         PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1584         }
1585
1586         /* At the beginning, only TC0. */
1587         /* What we need here is the maximam number of the TX queues.
1588          * Currently vsi->nb_qps means it.
1589          * Correct it if any change.
1590          */
1591         max_txqs[0] = vsi->nb_qps;
1592         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1593                               tc_bitmap, max_txqs);
1594         if (ret != ICE_SUCCESS)
1595                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1596
1597         return vsi;
1598 fail_mem:
1599         rte_free(vsi);
1600         pf->next_vsi_idx--;
1601         return NULL;
1602 }
1603
1604 static int
1605 ice_send_driver_ver(struct ice_hw *hw)
1606 {
1607         struct ice_driver_ver dv;
1608
1609         /* we don't have driver version use 0 for dummy */
1610         dv.major_ver = 0;
1611         dv.minor_ver = 0;
1612         dv.build_ver = 0;
1613         dv.subbuild_ver = 0;
1614         strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1615
1616         return ice_aq_send_driver_ver(hw, &dv, NULL);
1617 }
1618
1619 static int
1620 ice_pf_setup(struct ice_pf *pf)
1621 {
1622         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1623         struct ice_vsi *vsi;
1624         uint16_t unused;
1625
1626         /* Clear all stats counters */
1627         pf->offset_loaded = false;
1628         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1629         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1630         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1631         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1632
1633         /* force guaranteed filter pool for PF */
1634         ice_alloc_fd_guar_item(hw, &unused,
1635                                hw->func_caps.fd_fltr_guar);
1636         /* force shared filter pool for PF */
1637         ice_alloc_fd_shrd_item(hw, &unused,
1638                                hw->func_caps.fd_fltr_best_effort);
1639
1640         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1641         if (!vsi) {
1642                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1643                 return -EINVAL;
1644         }
1645
1646         pf->main_vsi = vsi;
1647
1648         return 0;
1649 }
1650
1651 /*
1652  * Extract device serial number from PCIe Configuration Space and
1653  * determine the pkg file path according to the DSN.
1654  */
1655 static int
1656 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
1657 {
1658         off_t pos;
1659         char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1660         uint32_t dsn_low, dsn_high;
1661         memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1662
1663         pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
1664
1665         if (pos) {
1666                 if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0) {
1667                         PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
1668                         return -1;
1669                 }
1670                 if (rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) {
1671                         PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
1672                         return -1;
1673                 }
1674                 snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1675                          "ice-%08x%08x.pkg", dsn_high, dsn_low);
1676         } else {
1677                 PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
1678                 goto fail_dsn;
1679         }
1680
1681         strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1682                 ICE_MAX_PKG_FILENAME_SIZE);
1683         if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1684                 return 0;
1685
1686         strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1687                 ICE_MAX_PKG_FILENAME_SIZE);
1688         if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1689                 return 0;
1690
1691 fail_dsn:
1692         strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1693         if (!access(pkg_file, 0))
1694                 return 0;
1695         strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1696         return 0;
1697 }
1698
1699 enum ice_pkg_type
1700 ice_load_pkg_type(struct ice_hw *hw)
1701 {
1702         enum ice_pkg_type package_type;
1703
1704         /* store the activated package type (OS default or Comms) */
1705         if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1706                 ICE_PKG_NAME_SIZE))
1707                 package_type = ICE_PKG_TYPE_OS_DEFAULT;
1708         else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1709                 ICE_PKG_NAME_SIZE))
1710                 package_type = ICE_PKG_TYPE_COMMS;
1711         else
1712                 package_type = ICE_PKG_TYPE_UNKNOWN;
1713
1714         PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)",
1715                 hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1716                 hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1717                 hw->active_pkg_name,
1718                 ice_is_dvm_ena(hw) ? "double" : "single");
1719
1720         return package_type;
1721 }
1722
1723 static int ice_load_pkg(struct rte_eth_dev *dev)
1724 {
1725         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1726         char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1727         int err;
1728         uint8_t *buf;
1729         int buf_len;
1730         FILE *file;
1731         struct stat fstat;
1732         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1733         struct ice_adapter *ad =
1734                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1735
1736         err = ice_pkg_file_search_path(pci_dev, pkg_file);
1737         if (err) {
1738                 PMD_INIT_LOG(ERR, "failed to search file path\n");
1739                 return err;
1740         }
1741
1742         file = fopen(pkg_file, "rb");
1743         if (!file)  {
1744                 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1745                 return -1;
1746         }
1747
1748         err = stat(pkg_file, &fstat);
1749         if (err) {
1750                 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1751                 fclose(file);
1752                 return err;
1753         }
1754
1755         buf_len = fstat.st_size;
1756         buf = rte_malloc(NULL, buf_len, 0);
1757
1758         if (!buf) {
1759                 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1760                                 buf_len);
1761                 fclose(file);
1762                 return -1;
1763         }
1764
1765         err = fread(buf, buf_len, 1, file);
1766         if (err != 1) {
1767                 PMD_INIT_LOG(ERR, "failed to read package data\n");
1768                 fclose(file);
1769                 err = -1;
1770                 goto fail_exit;
1771         }
1772
1773         fclose(file);
1774
1775         err = ice_copy_and_init_pkg(hw, buf, buf_len);
1776         if (err) {
1777                 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1778                 goto fail_exit;
1779         }
1780
1781         /* store the loaded pkg type info */
1782         ad->active_pkg_type = ice_load_pkg_type(hw);
1783
1784         err = ice_init_hw_tbls(hw);
1785         if (err) {
1786                 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1787                 goto fail_init_tbls;
1788         }
1789
1790         return 0;
1791
1792 fail_init_tbls:
1793         rte_free(hw->pkg_copy);
1794 fail_exit:
1795         rte_free(buf);
1796         return err;
1797 }
1798
1799 static void
1800 ice_base_queue_get(struct ice_pf *pf)
1801 {
1802         uint32_t reg;
1803         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1804
1805         reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1806         if (reg & PFLAN_RX_QALLOC_VALID_M) {
1807                 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1808         } else {
1809                 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1810                                         " index");
1811         }
1812 }
1813
1814 static int
1815 parse_bool(const char *key, const char *value, void *args)
1816 {
1817         int *i = (int *)args;
1818         char *end;
1819         int num;
1820
1821         num = strtoul(value, &end, 10);
1822
1823         if (num != 0 && num != 1) {
1824                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1825                         "value must be 0 or 1",
1826                         value, key);
1827                 return -1;
1828         }
1829
1830         *i = num;
1831         return 0;
1832 }
1833
1834 static int ice_parse_devargs(struct rte_eth_dev *dev)
1835 {
1836         struct ice_adapter *ad =
1837                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1838         struct rte_devargs *devargs = dev->device->devargs;
1839         struct rte_kvargs *kvlist;
1840         int ret;
1841
1842         if (devargs == NULL)
1843                 return 0;
1844
1845         kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1846         if (kvlist == NULL) {
1847                 PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1848                 return -EINVAL;
1849         }
1850
1851         ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1852         memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1853                sizeof(ad->devargs.proto_xtr));
1854
1855         ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1856                                  &handle_proto_xtr_arg, &ad->devargs);
1857         if (ret)
1858                 goto bail;
1859
1860         ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1861                                  &parse_bool, &ad->devargs.safe_mode_support);
1862         if (ret)
1863                 goto bail;
1864
1865         ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1866                                  &parse_bool, &ad->devargs.pipe_mode_support);
1867         if (ret)
1868                 goto bail;
1869
1870 bail:
1871         rte_kvargs_free(kvlist);
1872         return ret;
1873 }
1874
1875 /* Forward LLDP packets to default VSI by set switch rules */
1876 static int
1877 ice_vsi_config_sw_lldp(struct ice_vsi *vsi,  bool on)
1878 {
1879         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1880         struct ice_fltr_list_entry *s_list_itr = NULL;
1881         struct LIST_HEAD_TYPE list_head;
1882         int ret = 0;
1883
1884         INIT_LIST_HEAD(&list_head);
1885
1886         s_list_itr = (struct ice_fltr_list_entry *)
1887                         ice_malloc(hw, sizeof(*s_list_itr));
1888         if (!s_list_itr)
1889                 return -ENOMEM;
1890         s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
1891         s_list_itr->fltr_info.vsi_handle = vsi->idx;
1892         s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
1893                         RTE_ETHER_TYPE_LLDP;
1894         s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1895         s_list_itr->fltr_info.flag = ICE_FLTR_RX;
1896         s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
1897         LIST_ADD(&s_list_itr->list_entry, &list_head);
1898         if (on)
1899                 ret = ice_add_eth_mac(hw, &list_head);
1900         else
1901                 ret = ice_remove_eth_mac(hw, &list_head);
1902
1903         rte_free(s_list_itr);
1904         return ret;
1905 }
1906
1907 static enum ice_status
1908 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
1909                 uint16_t num, uint16_t desc_id,
1910                 uint16_t *prof_buf, uint16_t *num_prof)
1911 {
1912         struct ice_aqc_res_elem *resp_buf;
1913         int ret;
1914         uint16_t buf_len;
1915         bool res_shared = 1;
1916         struct ice_aq_desc aq_desc;
1917         struct ice_sq_cd *cd = NULL;
1918         struct ice_aqc_get_allocd_res_desc *cmd =
1919                         &aq_desc.params.get_res_desc;
1920
1921         buf_len = sizeof(*resp_buf) * num;
1922         resp_buf = ice_malloc(hw, buf_len);
1923         if (!resp_buf)
1924                 return -ENOMEM;
1925
1926         ice_fill_dflt_direct_cmd_desc(&aq_desc,
1927                         ice_aqc_opc_get_allocd_res_desc);
1928
1929         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
1930                                 ICE_AQC_RES_TYPE_M) | (res_shared ?
1931                                 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
1932         cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
1933
1934         ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
1935         if (!ret)
1936                 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
1937         else
1938                 goto exit;
1939
1940         ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
1941                         (*num_prof), ICE_NONDMA_TO_NONDMA);
1942
1943 exit:
1944         rte_free(resp_buf);
1945         return ret;
1946 }
1947 static int
1948 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
1949 {
1950         int ret;
1951         uint16_t prof_id;
1952         uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
1953         uint16_t first_desc = 1;
1954         uint16_t num_prof = 0;
1955
1956         ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
1957                         first_desc, prof_buf, &num_prof);
1958         if (ret) {
1959                 PMD_INIT_LOG(ERR, "Failed to get fxp resource");
1960                 return ret;
1961         }
1962
1963         for (prof_id = 0; prof_id < num_prof; prof_id++) {
1964                 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
1965                 if (ret) {
1966                         PMD_INIT_LOG(ERR, "Failed to free fxp resource");
1967                         return ret;
1968                 }
1969         }
1970         return 0;
1971 }
1972
1973 static int
1974 ice_reset_fxp_resource(struct ice_hw *hw)
1975 {
1976         int ret;
1977
1978         ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
1979         if (ret) {
1980                 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
1981                 return ret;
1982         }
1983
1984         ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
1985         if (ret) {
1986                 PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
1987                 return ret;
1988         }
1989
1990         return 0;
1991 }
1992
1993 static void
1994 ice_rss_ctx_init(struct ice_pf *pf)
1995 {
1996         memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
1997 }
1998
1999 static uint64_t
2000 ice_get_supported_rxdid(struct ice_hw *hw)
2001 {
2002         uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
2003         uint32_t regval;
2004         int i;
2005
2006         supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
2007
2008         for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2009                 regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
2010                 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2011                         & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2012                         supported_rxdid |= BIT(i);
2013         }
2014         return supported_rxdid;
2015 }
2016
2017 static int
2018 ice_dev_init(struct rte_eth_dev *dev)
2019 {
2020         struct rte_pci_device *pci_dev;
2021         struct rte_intr_handle *intr_handle;
2022         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2023         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2024         struct ice_adapter *ad =
2025                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2026         struct ice_vsi *vsi;
2027         int ret;
2028
2029         dev->dev_ops = &ice_eth_dev_ops;
2030         dev->rx_queue_count = ice_rx_queue_count;
2031         dev->rx_descriptor_status = ice_rx_descriptor_status;
2032         dev->tx_descriptor_status = ice_tx_descriptor_status;
2033         dev->rx_pkt_burst = ice_recv_pkts;
2034         dev->tx_pkt_burst = ice_xmit_pkts;
2035         dev->tx_pkt_prepare = ice_prep_pkts;
2036
2037         /* for secondary processes, we don't initialise any further as primary
2038          * has already done this work.
2039          */
2040         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2041                 ice_set_rx_function(dev);
2042                 ice_set_tx_function(dev);
2043                 return 0;
2044         }
2045
2046         dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2047
2048         ice_set_default_ptype_table(dev);
2049         pci_dev = RTE_DEV_TO_PCI(dev->device);
2050         intr_handle = &pci_dev->intr_handle;
2051
2052         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2053         pf->adapter->eth_dev = dev;
2054         pf->dev_data = dev->data;
2055         hw->back = pf->adapter;
2056         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2057         hw->vendor_id = pci_dev->id.vendor_id;
2058         hw->device_id = pci_dev->id.device_id;
2059         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2060         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2061         hw->bus.device = pci_dev->addr.devid;
2062         hw->bus.func = pci_dev->addr.function;
2063
2064         ret = ice_parse_devargs(dev);
2065         if (ret) {
2066                 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2067                 return -EINVAL;
2068         }
2069
2070         ice_init_controlq_parameter(hw);
2071
2072         ret = ice_init_hw(hw);
2073         if (ret) {
2074                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
2075                 return -EINVAL;
2076         }
2077
2078         ret = ice_load_pkg(dev);
2079         if (ret) {
2080                 if (ad->devargs.safe_mode_support == 0) {
2081                         PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2082                                         "Use safe-mode-support=1 to enter Safe Mode");
2083                         return ret;
2084                 }
2085
2086                 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2087                                         "Entering Safe Mode");
2088                 ad->is_safe_mode = 1;
2089         }
2090
2091         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2092                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2093                      hw->api_maj_ver, hw->api_min_ver);
2094
2095         ice_pf_sw_init(dev);
2096         ret = ice_init_mac_address(dev);
2097         if (ret) {
2098                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2099                 goto err_init_mac;
2100         }
2101
2102         ret = ice_res_pool_init(&pf->msix_pool, 1,
2103                                 hw->func_caps.common_cap.num_msix_vectors - 1);
2104         if (ret) {
2105                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2106                 goto err_msix_pool_init;
2107         }
2108
2109         ret = ice_pf_setup(pf);
2110         if (ret) {
2111                 PMD_INIT_LOG(ERR, "Failed to setup PF");
2112                 goto err_pf_setup;
2113         }
2114
2115         ret = ice_send_driver_ver(hw);
2116         if (ret) {
2117                 PMD_INIT_LOG(ERR, "Failed to send driver version");
2118                 goto err_pf_setup;
2119         }
2120
2121         vsi = pf->main_vsi;
2122
2123         ret = ice_aq_stop_lldp(hw, true, false, NULL);
2124         if (ret != ICE_SUCCESS)
2125                 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2126         ret = ice_init_dcb(hw, true);
2127         if (ret != ICE_SUCCESS)
2128                 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2129         /* Forward LLDP packets to default VSI */
2130         ret = ice_vsi_config_sw_lldp(vsi, true);
2131         if (ret != ICE_SUCCESS)
2132                 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2133         /* register callback func to eal lib */
2134         rte_intr_callback_register(intr_handle,
2135                                    ice_interrupt_handler, dev);
2136
2137         ice_pf_enable_irq0(hw);
2138
2139         /* enable uio intr after callback register */
2140         rte_intr_enable(intr_handle);
2141
2142         /* get base queue pairs index  in the device */
2143         ice_base_queue_get(pf);
2144
2145         /* Initialize RSS context for gtpu_eh */
2146         ice_rss_ctx_init(pf);
2147
2148         if (!ad->is_safe_mode) {
2149                 ret = ice_flow_init(ad);
2150                 if (ret) {
2151                         PMD_INIT_LOG(ERR, "Failed to initialize flow");
2152                         return ret;
2153                 }
2154         }
2155
2156         ret = ice_reset_fxp_resource(hw);
2157         if (ret) {
2158                 PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2159                 return ret;
2160         }
2161
2162         pf->supported_rxdid = ice_get_supported_rxdid(hw);
2163
2164         return 0;
2165
2166 err_pf_setup:
2167         ice_res_pool_destroy(&pf->msix_pool);
2168 err_msix_pool_init:
2169         rte_free(dev->data->mac_addrs);
2170         dev->data->mac_addrs = NULL;
2171 err_init_mac:
2172         ice_sched_cleanup_all(hw);
2173         rte_free(hw->port_info);
2174         ice_shutdown_all_ctrlq(hw);
2175         rte_free(pf->proto_xtr);
2176
2177         return ret;
2178 }
2179
2180 int
2181 ice_release_vsi(struct ice_vsi *vsi)
2182 {
2183         struct ice_hw *hw;
2184         struct ice_vsi_ctx vsi_ctx;
2185         enum ice_status ret;
2186         int error = 0;
2187
2188         if (!vsi)
2189                 return error;
2190
2191         hw = ICE_VSI_TO_HW(vsi);
2192
2193         ice_remove_all_mac_vlan_filters(vsi);
2194
2195         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2196
2197         vsi_ctx.vsi_num = vsi->vsi_id;
2198         vsi_ctx.info = vsi->info;
2199         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2200         if (ret != ICE_SUCCESS) {
2201                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2202                 error = -1;
2203         }
2204
2205         rte_free(vsi->rss_lut);
2206         rte_free(vsi->rss_key);
2207         rte_free(vsi);
2208         return error;
2209 }
2210
2211 void
2212 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2213 {
2214         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2215         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2216         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2217         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2218         uint16_t msix_intr, i;
2219
2220         /* disable interrupt and also clear all the exist config */
2221         for (i = 0; i < vsi->nb_qps; i++) {
2222                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2223                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2224                 rte_wmb();
2225         }
2226
2227         if (rte_intr_allow_others(intr_handle))
2228                 /* vfio-pci */
2229                 for (i = 0; i < vsi->nb_msix; i++) {
2230                         msix_intr = vsi->msix_intr + i;
2231                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2232                                       GLINT_DYN_CTL_WB_ON_ITR_M);
2233                 }
2234         else
2235                 /* igb_uio */
2236                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2237 }
2238
2239 static int
2240 ice_dev_stop(struct rte_eth_dev *dev)
2241 {
2242         struct rte_eth_dev_data *data = dev->data;
2243         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2244         struct ice_vsi *main_vsi = pf->main_vsi;
2245         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2246         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2247         uint16_t i;
2248
2249         /* avoid stopping again */
2250         if (pf->adapter_stopped)
2251                 return 0;
2252
2253         /* stop and clear all Rx queues */
2254         for (i = 0; i < data->nb_rx_queues; i++)
2255                 ice_rx_queue_stop(dev, i);
2256
2257         /* stop and clear all Tx queues */
2258         for (i = 0; i < data->nb_tx_queues; i++)
2259                 ice_tx_queue_stop(dev, i);
2260
2261         /* disable all queue interrupts */
2262         ice_vsi_disable_queues_intr(main_vsi);
2263
2264         if (pf->init_link_up)
2265                 ice_dev_set_link_up(dev);
2266         else
2267                 ice_dev_set_link_down(dev);
2268
2269         /* Clean datapath event and queue/vec mapping */
2270         rte_intr_efd_disable(intr_handle);
2271         if (intr_handle->intr_vec) {
2272                 rte_free(intr_handle->intr_vec);
2273                 intr_handle->intr_vec = NULL;
2274         }
2275
2276         pf->adapter_stopped = true;
2277         dev->data->dev_started = 0;
2278
2279         return 0;
2280 }
2281
2282 static int
2283 ice_dev_close(struct rte_eth_dev *dev)
2284 {
2285         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2286         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2287         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2288         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2289         struct ice_adapter *ad =
2290                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2291         int ret;
2292
2293         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2294                 return 0;
2295
2296         /* Since stop will make link down, then the link event will be
2297          * triggered, disable the irq firstly to avoid the port_infoe etc
2298          * resources deallocation causing the interrupt service thread
2299          * crash.
2300          */
2301         ice_pf_disable_irq0(hw);
2302
2303         ret = ice_dev_stop(dev);
2304
2305         if (!ad->is_safe_mode)
2306                 ice_flow_uninit(ad);
2307
2308         /* release all queue resource */
2309         ice_free_queues(dev);
2310
2311         ice_res_pool_destroy(&pf->msix_pool);
2312         ice_release_vsi(pf->main_vsi);
2313         ice_sched_cleanup_all(hw);
2314         ice_free_hw_tbls(hw);
2315         rte_free(hw->port_info);
2316         hw->port_info = NULL;
2317         ice_shutdown_all_ctrlq(hw);
2318         rte_free(pf->proto_xtr);
2319         pf->proto_xtr = NULL;
2320
2321         /* disable uio intr before callback unregister */
2322         rte_intr_disable(intr_handle);
2323
2324         /* unregister callback func from eal lib */
2325         rte_intr_callback_unregister(intr_handle,
2326                                      ice_interrupt_handler, dev);
2327
2328         return ret;
2329 }
2330
2331 static int
2332 ice_dev_uninit(struct rte_eth_dev *dev)
2333 {
2334         ice_dev_close(dev);
2335
2336         return 0;
2337 }
2338
2339 static bool
2340 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
2341 {
2342         return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false;
2343 }
2344
2345 static void
2346 hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
2347 {
2348         cfg->hash_flds = 0;
2349         cfg->addl_hdrs = 0;
2350         cfg->symm = 0;
2351         cfg->hdr_type = ICE_RSS_ANY_HEADERS;
2352 }
2353
2354 static int
2355 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2356 {
2357         enum ice_status status = ICE_SUCCESS;
2358         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2359         struct ice_vsi *vsi = pf->main_vsi;
2360
2361         if (!is_hash_cfg_valid(cfg))
2362                 return -ENOENT;
2363
2364         status = ice_rem_rss_cfg(hw, vsi->idx, cfg);
2365         if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2366                 PMD_DRV_LOG(ERR,
2367                             "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
2368                             vsi->idx, status);
2369                 return -EBUSY;
2370         }
2371
2372         return 0;
2373 }
2374
2375 static int
2376 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2377 {
2378         enum ice_status status = ICE_SUCCESS;
2379         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2380         struct ice_vsi *vsi = pf->main_vsi;
2381
2382         if (!is_hash_cfg_valid(cfg))
2383                 return -ENOENT;
2384
2385         status = ice_add_rss_cfg(hw, vsi->idx, cfg);
2386         if (status) {
2387                 PMD_DRV_LOG(ERR,
2388                             "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
2389                             vsi->idx, status);
2390                 return -EBUSY;
2391         }
2392
2393         return 0;
2394 }
2395
2396 static int
2397 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2398 {
2399         int ret;
2400
2401         ret = ice_hash_moveout(pf, cfg);
2402         if (ret && (ret != -ENOENT))
2403                 return ret;
2404
2405         hash_cfg_reset(cfg);
2406
2407         return 0;
2408 }
2409
2410 static int
2411 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2412                          u8 ctx_idx)
2413 {
2414         int ret;
2415
2416         switch (ctx_idx) {
2417         case ICE_HASH_GTPU_CTX_EH_IP:
2418                 ret = ice_hash_remove(pf,
2419                                       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2420                 if (ret && (ret != -ENOENT))
2421                         return ret;
2422
2423                 ret = ice_hash_remove(pf,
2424                                       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2425                 if (ret && (ret != -ENOENT))
2426                         return ret;
2427
2428                 ret = ice_hash_remove(pf,
2429                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2430                 if (ret && (ret != -ENOENT))
2431                         return ret;
2432
2433                 ret = ice_hash_remove(pf,
2434                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2435                 if (ret && (ret != -ENOENT))
2436                         return ret;
2437
2438                 ret = ice_hash_remove(pf,
2439                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2440                 if (ret && (ret != -ENOENT))
2441                         return ret;
2442
2443                 ret = ice_hash_remove(pf,
2444                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2445                 if (ret && (ret != -ENOENT))
2446                         return ret;
2447
2448                 ret = ice_hash_remove(pf,
2449                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2450                 if (ret && (ret != -ENOENT))
2451                         return ret;
2452
2453                 ret = ice_hash_remove(pf,
2454                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2455                 if (ret && (ret != -ENOENT))
2456                         return ret;
2457
2458                 break;
2459         case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2460                 ret = ice_hash_remove(pf,
2461                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2462                 if (ret && (ret != -ENOENT))
2463                         return ret;
2464
2465                 ret = ice_hash_remove(pf,
2466                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2467                 if (ret && (ret != -ENOENT))
2468                         return ret;
2469
2470                 ret = ice_hash_moveout(pf,
2471                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2472                 if (ret && (ret != -ENOENT))
2473                         return ret;
2474
2475                 ret = ice_hash_moveout(pf,
2476                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2477                 if (ret && (ret != -ENOENT))
2478                         return ret;
2479
2480                 ret = ice_hash_moveout(pf,
2481                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2482                 if (ret && (ret != -ENOENT))
2483                         return ret;
2484
2485                 ret = ice_hash_moveout(pf,
2486                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2487                 if (ret && (ret != -ENOENT))
2488                         return ret;
2489
2490                 break;
2491         case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2492                 ret = ice_hash_remove(pf,
2493                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2494                 if (ret && (ret != -ENOENT))
2495                         return ret;
2496
2497                 ret = ice_hash_remove(pf,
2498                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2499                 if (ret && (ret != -ENOENT))
2500                         return ret;
2501
2502                 ret = ice_hash_moveout(pf,
2503                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2504                 if (ret && (ret != -ENOENT))
2505                         return ret;
2506
2507                 ret = ice_hash_moveout(pf,
2508                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2509                 if (ret && (ret != -ENOENT))
2510                         return ret;
2511
2512                 ret = ice_hash_moveout(pf,
2513                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2514                 if (ret && (ret != -ENOENT))
2515                         return ret;
2516
2517                 ret = ice_hash_moveout(pf,
2518                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2519                 if (ret && (ret != -ENOENT))
2520                         return ret;
2521
2522                 break;
2523         case ICE_HASH_GTPU_CTX_UP_IP:
2524                 ret = ice_hash_remove(pf,
2525                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2526                 if (ret && (ret != -ENOENT))
2527                         return ret;
2528
2529                 ret = ice_hash_remove(pf,
2530                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2531                 if (ret && (ret != -ENOENT))
2532                         return ret;
2533
2534                 ret = ice_hash_moveout(pf,
2535                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2536                 if (ret && (ret != -ENOENT))
2537                         return ret;
2538
2539                 ret = ice_hash_moveout(pf,
2540                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2541                 if (ret && (ret != -ENOENT))
2542                         return ret;
2543
2544                 ret = ice_hash_moveout(pf,
2545                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2546                 if (ret && (ret != -ENOENT))
2547                         return ret;
2548
2549                 break;
2550         case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2551         case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2552                 ret = ice_hash_moveout(pf,
2553                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2554                 if (ret && (ret != -ENOENT))
2555                         return ret;
2556
2557                 ret = ice_hash_moveout(pf,
2558                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2559                 if (ret && (ret != -ENOENT))
2560                         return ret;
2561
2562                 ret = ice_hash_moveout(pf,
2563                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2564                 if (ret && (ret != -ENOENT))
2565                         return ret;
2566
2567                 break;
2568         case ICE_HASH_GTPU_CTX_DW_IP:
2569                 ret = ice_hash_remove(pf,
2570                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2571                 if (ret && (ret != -ENOENT))
2572                         return ret;
2573
2574                 ret = ice_hash_remove(pf,
2575                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2576                 if (ret && (ret != -ENOENT))
2577                         return ret;
2578
2579                 ret = ice_hash_moveout(pf,
2580                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2581                 if (ret && (ret != -ENOENT))
2582                         return ret;
2583
2584                 ret = ice_hash_moveout(pf,
2585                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2586                 if (ret && (ret != -ENOENT))
2587                         return ret;
2588
2589                 ret = ice_hash_moveout(pf,
2590                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2591                 if (ret && (ret != -ENOENT))
2592                         return ret;
2593
2594                 break;
2595         case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2596         case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2597                 ret = ice_hash_moveout(pf,
2598                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2599                 if (ret && (ret != -ENOENT))
2600                         return ret;
2601
2602                 ret = ice_hash_moveout(pf,
2603                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2604                 if (ret && (ret != -ENOENT))
2605                         return ret;
2606
2607                 ret = ice_hash_moveout(pf,
2608                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2609                 if (ret && (ret != -ENOENT))
2610                         return ret;
2611
2612                 break;
2613         default:
2614                 break;
2615         }
2616
2617         return 0;
2618 }
2619
2620 static u8 calc_gtpu_ctx_idx(uint32_t hdr)
2621 {
2622         u8 eh_idx, ip_idx;
2623
2624         if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
2625                 eh_idx = 0;
2626         else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
2627                 eh_idx = 1;
2628         else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
2629                 eh_idx = 2;
2630         else
2631                 return ICE_HASH_GTPU_CTX_MAX;
2632
2633         ip_idx = 0;
2634         if (hdr & ICE_FLOW_SEG_HDR_UDP)
2635                 ip_idx = 1;
2636         else if (hdr & ICE_FLOW_SEG_HDR_TCP)
2637                 ip_idx = 2;
2638
2639         if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
2640                 return eh_idx * 3 + ip_idx;
2641         else
2642                 return ICE_HASH_GTPU_CTX_MAX;
2643 }
2644
2645 static int
2646 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2647 {
2648         u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2649
2650         if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2651                 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
2652                                                 gtpu_ctx_idx);
2653         else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2654                 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
2655                                                 gtpu_ctx_idx);
2656
2657         return 0;
2658 }
2659
2660 static int
2661 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2662                           u8 ctx_idx, struct ice_rss_hash_cfg *cfg)
2663 {
2664         int ret;
2665
2666         if (ctx_idx < ICE_HASH_GTPU_CTX_MAX)
2667                 ctx->ctx[ctx_idx] = *cfg;
2668
2669         switch (ctx_idx) {
2670         case ICE_HASH_GTPU_CTX_EH_IP:
2671                 break;
2672         case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2673                 ret = ice_hash_moveback(pf,
2674                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2675                 if (ret && (ret != -ENOENT))
2676                         return ret;
2677
2678                 ret = ice_hash_moveback(pf,
2679                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2680                 if (ret && (ret != -ENOENT))
2681                         return ret;
2682
2683                 ret = ice_hash_moveback(pf,
2684                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2685                 if (ret && (ret != -ENOENT))
2686                         return ret;
2687
2688                 ret = ice_hash_moveback(pf,
2689                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2690                 if (ret && (ret != -ENOENT))
2691                         return ret;
2692
2693                 break;
2694         case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2695                 ret = ice_hash_moveback(pf,
2696                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2697                 if (ret && (ret != -ENOENT))
2698                         return ret;
2699
2700                 ret = ice_hash_moveback(pf,
2701                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2702                 if (ret && (ret != -ENOENT))
2703                         return ret;
2704
2705                 ret = ice_hash_moveback(pf,
2706                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2707                 if (ret && (ret != -ENOENT))
2708                         return ret;
2709
2710                 ret = ice_hash_moveback(pf,
2711                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2712                 if (ret && (ret != -ENOENT))
2713                         return ret;
2714
2715                 break;
2716         case ICE_HASH_GTPU_CTX_UP_IP:
2717         case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2718         case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2719         case ICE_HASH_GTPU_CTX_DW_IP:
2720         case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2721         case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2722                 ret = ice_hash_moveback(pf,
2723                                         &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2724                 if (ret && (ret != -ENOENT))
2725                         return ret;
2726
2727                 ret = ice_hash_moveback(pf,
2728                                         &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2729                 if (ret && (ret != -ENOENT))
2730                         return ret;
2731
2732                 ret = ice_hash_moveback(pf,
2733                                         &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2734                 if (ret && (ret != -ENOENT))
2735                         return ret;
2736
2737                 break;
2738         default:
2739                 break;
2740         }
2741
2742         return 0;
2743 }
2744
2745 static int
2746 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2747 {
2748         u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs);
2749
2750         if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
2751                 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4,
2752                                                  gtpu_ctx_idx, cfg);
2753         else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
2754                 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6,
2755                                                  gtpu_ctx_idx, cfg);
2756
2757         return 0;
2758 }
2759
2760 static void
2761 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2762 {
2763         u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2764
2765         if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
2766                 return;
2767
2768         if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2769                 hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
2770         else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2771                 hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
2772 }
2773
2774 int
2775 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2776                      struct ice_rss_hash_cfg *cfg)
2777 {
2778         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2779         int ret;
2780
2781         ret = ice_rem_rss_cfg(hw, vsi_id, cfg);
2782         if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2783                 PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2784
2785         ice_rem_rss_cfg_post(pf, cfg->addl_hdrs);
2786
2787         return 0;
2788 }
2789
2790 int
2791 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2792                      struct ice_rss_hash_cfg *cfg)
2793 {
2794         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2795         int ret;
2796
2797         ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs);
2798         if (ret)
2799                 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2800
2801         ret = ice_add_rss_cfg(hw, vsi_id, cfg);
2802         if (ret)
2803                 PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2804
2805         ret = ice_add_rss_cfg_post(pf, cfg);
2806         if (ret)
2807                 PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2808
2809         return 0;
2810 }
2811
2812 static void
2813 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2814 {
2815         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2816         struct ice_vsi *vsi = pf->main_vsi;
2817         struct ice_rss_hash_cfg cfg;
2818         int ret;
2819
2820 #define ICE_RSS_HF_ALL ( \
2821         ETH_RSS_IPV4 | \
2822         ETH_RSS_IPV6 | \
2823         ETH_RSS_NONFRAG_IPV4_UDP | \
2824         ETH_RSS_NONFRAG_IPV6_UDP | \
2825         ETH_RSS_NONFRAG_IPV4_TCP | \
2826         ETH_RSS_NONFRAG_IPV6_TCP | \
2827         ETH_RSS_NONFRAG_IPV4_SCTP | \
2828         ETH_RSS_NONFRAG_IPV6_SCTP)
2829
2830         ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
2831         if (ret)
2832                 PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d",
2833                             __func__, ret);
2834
2835         cfg.symm = 0;
2836         cfg.hdr_type = ICE_RSS_ANY_HEADERS;
2837         /* Configure RSS for IPv4 with src/dst addr as input set */
2838         if (rss_hf & ETH_RSS_IPV4) {
2839                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2840                 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2841                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2842                 if (ret)
2843                         PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
2844                                     __func__, ret);
2845         }
2846
2847         /* Configure RSS for IPv6 with src/dst addr as input set */
2848         if (rss_hf & ETH_RSS_IPV6) {
2849                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2850                 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2851                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2852                 if (ret)
2853                         PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
2854                                     __func__, ret);
2855         }
2856
2857         /* Configure RSS for udp4 with src/dst addr and port as input set */
2858         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2859                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
2860                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2861                 cfg.hash_flds = ICE_HASH_UDP_IPV4;
2862                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2863                 if (ret)
2864                         PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
2865                                     __func__, ret);
2866         }
2867
2868         /* Configure RSS for udp6 with src/dst addr and port as input set */
2869         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2870                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
2871                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2872                 cfg.hash_flds = ICE_HASH_UDP_IPV6;
2873                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2874                 if (ret)
2875                         PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
2876                                     __func__, ret);
2877         }
2878
2879         /* Configure RSS for tcp4 with src/dst addr and port as input set */
2880         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2881                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
2882                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2883                 cfg.hash_flds = ICE_HASH_TCP_IPV4;
2884                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2885                 if (ret)
2886                         PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
2887                                     __func__, ret);
2888         }
2889
2890         /* Configure RSS for tcp6 with src/dst addr and port as input set */
2891         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2892                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
2893                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2894                 cfg.hash_flds = ICE_HASH_TCP_IPV6;
2895                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2896                 if (ret)
2897                         PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
2898                                     __func__, ret);
2899         }
2900
2901         /* Configure RSS for sctp4 with src/dst addr and port as input set */
2902         if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2903                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
2904                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2905                 cfg.hash_flds = ICE_HASH_SCTP_IPV4;
2906                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2907                 if (ret)
2908                         PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2909                                     __func__, ret);
2910         }
2911
2912         /* Configure RSS for sctp6 with src/dst addr and port as input set */
2913         if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2914                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
2915                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2916                 cfg.hash_flds = ICE_HASH_SCTP_IPV6;
2917                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2918                 if (ret)
2919                         PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2920                                     __func__, ret);
2921         }
2922
2923         if (rss_hf & ETH_RSS_IPV4) {
2924                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4 |
2925                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2926                 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2927                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2928                 if (ret)
2929                         PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d",
2930                                     __func__, ret);
2931
2932                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4 |
2933                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2934                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2935                 if (ret)
2936                         PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
2937                                     __func__, ret);
2938
2939                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
2940                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2941                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2942                 if (ret)
2943                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
2944                                     __func__, ret);
2945         }
2946
2947         if (rss_hf & ETH_RSS_IPV6) {
2948                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6 |
2949                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2950                 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2951                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2952                 if (ret)
2953                         PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d",
2954                                     __func__, ret);
2955
2956                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6 |
2957                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2958                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2959                 if (ret)
2960                         PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
2961                                     __func__, ret);
2962
2963                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
2964                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
2965                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2966                 if (ret)
2967                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
2968                                     __func__, ret);
2969         }
2970
2971         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2972                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_UDP |
2973                                 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2974                 cfg.hash_flds = ICE_HASH_UDP_IPV4;
2975                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2976                 if (ret)
2977                         PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d",
2978                                     __func__, ret);
2979
2980                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_UDP |
2981                                 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2982                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2983                 if (ret)
2984                         PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
2985                                     __func__, ret);
2986
2987                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
2988                                 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2989                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2990                 if (ret)
2991                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
2992                                     __func__, ret);
2993         }
2994
2995         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2996                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_UDP |
2997                                 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2998                 cfg.hash_flds = ICE_HASH_UDP_IPV6;
2999                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3000                 if (ret)
3001                         PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d",
3002                                     __func__, ret);
3003
3004                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_UDP |
3005                                 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3006                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3007                 if (ret)
3008                         PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
3009                                     __func__, ret);
3010
3011                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3012                                 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3013                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3014                 if (ret)
3015                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
3016                                     __func__, ret);
3017         }
3018
3019         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
3020                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_TCP |
3021                                 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3022                 cfg.hash_flds = ICE_HASH_TCP_IPV4;
3023                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3024                 if (ret)
3025                         PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d",
3026                                     __func__, ret);
3027
3028                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_TCP |
3029                                 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3030                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3031                 if (ret)
3032                         PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
3033                                     __func__, ret);
3034
3035                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3036                                 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3037                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3038                 if (ret)
3039                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
3040                                     __func__, ret);
3041         }
3042
3043         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
3044                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_TCP |
3045                                 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3046                 cfg.hash_flds = ICE_HASH_TCP_IPV6;
3047                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3048                 if (ret)
3049                         PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d",
3050                                     __func__, ret);
3051
3052                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_TCP |
3053                                 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3054                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3055                 if (ret)
3056                         PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
3057                                     __func__, ret);
3058
3059                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3060                                 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3061                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3062                 if (ret)
3063                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
3064                                     __func__, ret);
3065         }
3066
3067         pf->rss_hf = rss_hf & ICE_RSS_HF_ALL;
3068 }
3069
3070 static int ice_init_rss(struct ice_pf *pf)
3071 {
3072         struct ice_hw *hw = ICE_PF_TO_HW(pf);
3073         struct ice_vsi *vsi = pf->main_vsi;
3074         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3075         struct ice_aq_get_set_rss_lut_params lut_params;
3076         struct rte_eth_rss_conf *rss_conf;
3077         struct ice_aqc_get_set_rss_keys key;
3078         uint16_t i, nb_q;
3079         int ret = 0;
3080         bool is_safe_mode = pf->adapter->is_safe_mode;
3081         uint32_t reg;
3082
3083         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
3084         nb_q = dev->data->nb_rx_queues;
3085         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3086         vsi->rss_lut_size = pf->hash_lut_size;
3087
3088         if (nb_q == 0) {
3089                 PMD_DRV_LOG(WARNING,
3090                         "RSS is not supported as rx queues number is zero\n");
3091                 return 0;
3092         }
3093
3094         if (is_safe_mode) {
3095                 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3096                 return 0;
3097         }
3098
3099         if (!vsi->rss_key) {
3100                 vsi->rss_key = rte_zmalloc(NULL,
3101                                            vsi->rss_key_size, 0);
3102                 if (vsi->rss_key == NULL) {
3103                         PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3104                         return -ENOMEM;
3105                 }
3106         }
3107         if (!vsi->rss_lut) {
3108                 vsi->rss_lut = rte_zmalloc(NULL,
3109                                            vsi->rss_lut_size, 0);
3110                 if (vsi->rss_lut == NULL) {
3111                         PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3112                         rte_free(vsi->rss_key);
3113                         vsi->rss_key = NULL;
3114                         return -ENOMEM;
3115                 }
3116         }
3117         /* configure RSS key */
3118         if (!rss_conf->rss_key) {
3119                 /* Calculate the default hash key */
3120                 for (i = 0; i <= vsi->rss_key_size; i++)
3121                         vsi->rss_key[i] = (uint8_t)rte_rand();
3122         } else {
3123                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3124                            RTE_MIN(rss_conf->rss_key_len,
3125                                    vsi->rss_key_size));
3126         }
3127         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3128         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3129         if (ret)
3130                 goto out;
3131
3132         /* init RSS LUT table */
3133         for (i = 0; i < vsi->rss_lut_size; i++)
3134                 vsi->rss_lut[i] = i % nb_q;
3135
3136         lut_params.vsi_handle = vsi->idx;
3137         lut_params.lut_size = vsi->rss_lut_size;
3138         lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
3139         lut_params.lut = vsi->rss_lut;
3140         lut_params.global_lut_id = 0;
3141         ret = ice_aq_set_rss_lut(hw, &lut_params);
3142         if (ret)
3143                 goto out;
3144
3145         /* Enable registers for symmetric_toeplitz function. */
3146         reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3147         reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3148                 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3149         ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3150
3151         /* RSS hash configuration */
3152         ice_rss_hash_set(pf, rss_conf->rss_hf);
3153
3154         return 0;
3155 out:
3156         rte_free(vsi->rss_key);
3157         vsi->rss_key = NULL;
3158         rte_free(vsi->rss_lut);
3159         vsi->rss_lut = NULL;
3160         return -EINVAL;
3161 }
3162
3163 static int
3164 ice_dev_configure(struct rte_eth_dev *dev)
3165 {
3166         struct ice_adapter *ad =
3167                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3168         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3169         int ret;
3170
3171         /* Initialize to TRUE. If any of Rx queues doesn't meet the
3172          * bulk allocation or vector Rx preconditions we will reset it.
3173          */
3174         ad->rx_bulk_alloc_allowed = true;
3175         ad->tx_simple_allowed = true;
3176
3177         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
3178                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
3179
3180         if (dev->data->nb_rx_queues) {
3181                 ret = ice_init_rss(pf);
3182                 if (ret) {
3183                         PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3184                         return ret;
3185                 }
3186         }
3187
3188         return 0;
3189 }
3190
3191 static void
3192 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3193                        int base_queue, int nb_queue)
3194 {
3195         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3196         uint32_t val, val_tx;
3197         int i;
3198
3199         for (i = 0; i < nb_queue; i++) {
3200                 /*do actual bind*/
3201                 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3202                       (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3203                 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3204                          (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3205
3206                 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3207                             base_queue + i, msix_vect);
3208                 /* set ITR0 value */
3209                 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
3210                 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3211                 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3212         }
3213 }
3214
3215 void
3216 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3217 {
3218         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3219         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3220         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3221         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3222         uint16_t msix_vect = vsi->msix_intr;
3223         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
3224         uint16_t queue_idx = 0;
3225         int record = 0;
3226         int i;
3227
3228         /* clear Rx/Tx queue interrupt */
3229         for (i = 0; i < vsi->nb_used_qps; i++) {
3230                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3231                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3232         }
3233
3234         /* PF bind interrupt */
3235         if (rte_intr_dp_is_en(intr_handle)) {
3236                 queue_idx = 0;
3237                 record = 1;
3238         }
3239
3240         for (i = 0; i < vsi->nb_used_qps; i++) {
3241                 if (nb_msix <= 1) {
3242                         if (!rte_intr_allow_others(intr_handle))
3243                                 msix_vect = ICE_MISC_VEC_ID;
3244
3245                         /* uio mapping all queue to one msix_vect */
3246                         __vsi_queues_bind_intr(vsi, msix_vect,
3247                                                vsi->base_queue + i,
3248                                                vsi->nb_used_qps - i);
3249
3250                         for (; !!record && i < vsi->nb_used_qps; i++)
3251                                 intr_handle->intr_vec[queue_idx + i] =
3252                                         msix_vect;
3253                         break;
3254                 }
3255
3256                 /* vfio 1:1 queue/msix_vect mapping */
3257                 __vsi_queues_bind_intr(vsi, msix_vect,
3258                                        vsi->base_queue + i, 1);
3259
3260                 if (!!record)
3261                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
3262
3263                 msix_vect++;
3264                 nb_msix--;
3265         }
3266 }
3267
3268 void
3269 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3270 {
3271         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3272         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3273         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3274         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3275         uint16_t msix_intr, i;
3276
3277         if (rte_intr_allow_others(intr_handle))
3278                 for (i = 0; i < vsi->nb_used_qps; i++) {
3279                         msix_intr = vsi->msix_intr + i;
3280                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3281                                       GLINT_DYN_CTL_INTENA_M |
3282                                       GLINT_DYN_CTL_CLEARPBA_M |
3283                                       GLINT_DYN_CTL_ITR_INDX_M |
3284                                       GLINT_DYN_CTL_WB_ON_ITR_M);
3285                 }
3286         else
3287                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3288                               GLINT_DYN_CTL_INTENA_M |
3289                               GLINT_DYN_CTL_CLEARPBA_M |
3290                               GLINT_DYN_CTL_ITR_INDX_M |
3291                               GLINT_DYN_CTL_WB_ON_ITR_M);
3292 }
3293
3294 static int
3295 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3296 {
3297         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3298         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3299         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3300         struct ice_vsi *vsi = pf->main_vsi;
3301         uint32_t intr_vector = 0;
3302
3303         rte_intr_disable(intr_handle);
3304
3305         /* check and configure queue intr-vector mapping */
3306         if ((rte_intr_cap_multiple(intr_handle) ||
3307              !RTE_ETH_DEV_SRIOV(dev).active) &&
3308             dev->data->dev_conf.intr_conf.rxq != 0) {
3309                 intr_vector = dev->data->nb_rx_queues;
3310                 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3311                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3312                                     ICE_MAX_INTR_QUEUE_NUM);
3313                         return -ENOTSUP;
3314                 }
3315                 if (rte_intr_efd_enable(intr_handle, intr_vector))
3316                         return -1;
3317         }
3318
3319         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3320                 intr_handle->intr_vec =
3321                 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
3322                             0);
3323                 if (!intr_handle->intr_vec) {
3324                         PMD_DRV_LOG(ERR,
3325                                     "Failed to allocate %d rx_queues intr_vec",
3326                                     dev->data->nb_rx_queues);
3327                         return -ENOMEM;
3328                 }
3329         }
3330
3331         /* Map queues with MSIX interrupt */
3332         vsi->nb_used_qps = dev->data->nb_rx_queues;
3333         ice_vsi_queues_bind_intr(vsi);
3334
3335         /* Enable interrupts for all the queues */
3336         ice_vsi_enable_queues_intr(vsi);
3337
3338         rte_intr_enable(intr_handle);
3339
3340         return 0;
3341 }
3342
3343 static void
3344 ice_get_init_link_status(struct rte_eth_dev *dev)
3345 {
3346         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3347         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3348         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3349         struct ice_link_status link_status;
3350         int ret;
3351
3352         ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3353                                    &link_status, NULL);
3354         if (ret != ICE_SUCCESS) {
3355                 PMD_DRV_LOG(ERR, "Failed to get link info");
3356                 pf->init_link_up = false;
3357                 return;
3358         }
3359
3360         if (link_status.link_info & ICE_AQ_LINK_UP)
3361                 pf->init_link_up = true;
3362 }
3363
3364 static int
3365 ice_dev_start(struct rte_eth_dev *dev)
3366 {
3367         struct rte_eth_dev_data *data = dev->data;
3368         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3369         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3370         struct ice_vsi *vsi = pf->main_vsi;
3371         uint16_t nb_rxq = 0;
3372         uint16_t nb_txq, i;
3373         uint16_t max_frame_size;
3374         int mask, ret;
3375
3376         /* program Tx queues' context in hardware */
3377         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3378                 ret = ice_tx_queue_start(dev, nb_txq);
3379                 if (ret) {
3380                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3381                         goto tx_err;
3382                 }
3383         }
3384
3385         /* program Rx queues' context in hardware*/
3386         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3387                 ret = ice_rx_queue_start(dev, nb_rxq);
3388                 if (ret) {
3389                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3390                         goto rx_err;
3391                 }
3392         }
3393
3394         ice_set_rx_function(dev);
3395         ice_set_tx_function(dev);
3396
3397         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
3398                         ETH_VLAN_EXTEND_MASK;
3399         ret = ice_vlan_offload_set(dev, mask);
3400         if (ret) {
3401                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3402                 goto rx_err;
3403         }
3404
3405         /* enable Rx interrput and mapping Rx queue to interrupt vector */
3406         if (ice_rxq_intr_setup(dev))
3407                 return -EIO;
3408
3409         /* Enable receiving broadcast packets and transmitting packets */
3410         ret = ice_set_vsi_promisc(hw, vsi->idx,
3411                                   ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3412                                   ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3413                                   0);
3414         if (ret != ICE_SUCCESS)
3415                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3416
3417         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3418                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3419                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3420                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3421                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3422                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
3423                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3424                                      NULL);
3425         if (ret != ICE_SUCCESS)
3426                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3427
3428         ice_get_init_link_status(dev);
3429
3430         ice_dev_set_link_up(dev);
3431
3432         /* Call get_link_info aq commond to enable/disable LSE */
3433         ice_link_update(dev, 0);
3434
3435         pf->adapter_stopped = false;
3436
3437         /* Set the max frame size to default value*/
3438         max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3439                 pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3440                 ICE_FRAME_SIZE_MAX;
3441
3442         /* Set the max frame size to HW*/
3443         ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3444
3445         return 0;
3446
3447         /* stop the started queues if failed to start all queues */
3448 rx_err:
3449         for (i = 0; i < nb_rxq; i++)
3450                 ice_rx_queue_stop(dev, i);
3451 tx_err:
3452         for (i = 0; i < nb_txq; i++)
3453                 ice_tx_queue_stop(dev, i);
3454
3455         return -EIO;
3456 }
3457
3458 static int
3459 ice_dev_reset(struct rte_eth_dev *dev)
3460 {
3461         int ret;
3462
3463         if (dev->data->sriov.active)
3464                 return -ENOTSUP;
3465
3466         ret = ice_dev_uninit(dev);
3467         if (ret) {
3468                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3469                 return -ENXIO;
3470         }
3471
3472         ret = ice_dev_init(dev);
3473         if (ret) {
3474                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3475                 return -ENXIO;
3476         }
3477
3478         return 0;
3479 }
3480
3481 static int
3482 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3483 {
3484         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3485         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3486         struct ice_vsi *vsi = pf->main_vsi;
3487         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3488         bool is_safe_mode = pf->adapter->is_safe_mode;
3489         u64 phy_type_low;
3490         u64 phy_type_high;
3491
3492         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3493         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3494         dev_info->max_rx_queues = vsi->nb_qps;
3495         dev_info->max_tx_queues = vsi->nb_qps;
3496         dev_info->max_mac_addrs = vsi->max_macaddrs;
3497         dev_info->max_vfs = pci_dev->max_vfs;
3498         dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3499         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3500
3501         dev_info->rx_offload_capa =
3502                 DEV_RX_OFFLOAD_VLAN_STRIP |
3503                 DEV_RX_OFFLOAD_JUMBO_FRAME |
3504                 DEV_RX_OFFLOAD_KEEP_CRC |
3505                 DEV_RX_OFFLOAD_SCATTER |
3506                 DEV_RX_OFFLOAD_VLAN_FILTER;
3507         dev_info->tx_offload_capa =
3508                 DEV_TX_OFFLOAD_VLAN_INSERT |
3509                 DEV_TX_OFFLOAD_TCP_TSO |
3510                 DEV_TX_OFFLOAD_MULTI_SEGS |
3511                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3512         dev_info->flow_type_rss_offloads = 0;
3513
3514         if (!is_safe_mode) {
3515                 dev_info->rx_offload_capa |=
3516                         DEV_RX_OFFLOAD_IPV4_CKSUM |
3517                         DEV_RX_OFFLOAD_UDP_CKSUM |
3518                         DEV_RX_OFFLOAD_TCP_CKSUM |
3519                         DEV_RX_OFFLOAD_QINQ_STRIP |
3520                         DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3521                         DEV_RX_OFFLOAD_VLAN_EXTEND |
3522                         DEV_RX_OFFLOAD_RSS_HASH;
3523                 dev_info->tx_offload_capa |=
3524                         DEV_TX_OFFLOAD_QINQ_INSERT |
3525                         DEV_TX_OFFLOAD_IPV4_CKSUM |
3526                         DEV_TX_OFFLOAD_UDP_CKSUM |
3527                         DEV_TX_OFFLOAD_TCP_CKSUM |
3528                         DEV_TX_OFFLOAD_SCTP_CKSUM |
3529                         DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3530                         DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3531                 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3532         }
3533
3534         dev_info->rx_queue_offload_capa = 0;
3535         dev_info->tx_queue_offload_capa = 0;
3536
3537         dev_info->reta_size = pf->hash_lut_size;
3538         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3539
3540         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3541                 .rx_thresh = {
3542                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
3543                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
3544                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
3545                 },
3546                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3547                 .rx_drop_en = 0,
3548                 .offloads = 0,
3549         };
3550
3551         dev_info->default_txconf = (struct rte_eth_txconf) {
3552                 .tx_thresh = {
3553                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
3554                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
3555                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
3556                 },
3557                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3558                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3559                 .offloads = 0,
3560         };
3561
3562         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3563                 .nb_max = ICE_MAX_RING_DESC,
3564                 .nb_min = ICE_MIN_RING_DESC,
3565                 .nb_align = ICE_ALIGN_RING_DESC,
3566         };
3567
3568         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3569                 .nb_max = ICE_MAX_RING_DESC,
3570                 .nb_min = ICE_MIN_RING_DESC,
3571                 .nb_align = ICE_ALIGN_RING_DESC,
3572         };
3573
3574         dev_info->speed_capa = ETH_LINK_SPEED_10M |
3575                                ETH_LINK_SPEED_100M |
3576                                ETH_LINK_SPEED_1G |
3577                                ETH_LINK_SPEED_2_5G |
3578                                ETH_LINK_SPEED_5G |
3579                                ETH_LINK_SPEED_10G |
3580                                ETH_LINK_SPEED_20G |
3581                                ETH_LINK_SPEED_25G;
3582
3583         phy_type_low = hw->port_info->phy.phy_type_low;
3584         phy_type_high = hw->port_info->phy.phy_type_high;
3585
3586         if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3587                 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3588
3589         if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3590                         ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3591                 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3592
3593         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3594         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3595
3596         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3597         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3598         dev_info->default_rxportconf.nb_queues = 1;
3599         dev_info->default_txportconf.nb_queues = 1;
3600         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3601         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3602
3603         return 0;
3604 }
3605
3606 static inline int
3607 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3608                             struct rte_eth_link *link)
3609 {
3610         struct rte_eth_link *dst = link;
3611         struct rte_eth_link *src = &dev->data->dev_link;
3612
3613         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3614                                 *(uint64_t *)src) == 0)
3615                 return -1;
3616
3617         return 0;
3618 }
3619
3620 static inline int
3621 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3622                              struct rte_eth_link *link)
3623 {
3624         struct rte_eth_link *dst = &dev->data->dev_link;
3625         struct rte_eth_link *src = link;
3626
3627         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3628                                 *(uint64_t *)src) == 0)
3629                 return -1;
3630
3631         return 0;
3632 }
3633
3634 static int
3635 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3636 {
3637 #define CHECK_INTERVAL 100  /* 100ms */
3638 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
3639         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3640         struct ice_link_status link_status;
3641         struct rte_eth_link link, old;
3642         int status;
3643         unsigned int rep_cnt = MAX_REPEAT_TIME;
3644         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3645
3646         memset(&link, 0, sizeof(link));
3647         memset(&old, 0, sizeof(old));
3648         memset(&link_status, 0, sizeof(link_status));
3649         ice_atomic_read_link_status(dev, &old);
3650
3651         do {
3652                 /* Get link status information from hardware */
3653                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
3654                                               &link_status, NULL);
3655                 if (status != ICE_SUCCESS) {
3656                         link.link_speed = ETH_SPEED_NUM_100M;
3657                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3658                         PMD_DRV_LOG(ERR, "Failed to get link info");
3659                         goto out;
3660                 }
3661
3662                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3663                 if (!wait_to_complete || link.link_status)
3664                         break;
3665
3666                 rte_delay_ms(CHECK_INTERVAL);
3667         } while (--rep_cnt);
3668
3669         if (!link.link_status)
3670                 goto out;
3671
3672         /* Full-duplex operation at all supported speeds */
3673         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3674
3675         /* Parse the link status */
3676         switch (link_status.link_speed) {
3677         case ICE_AQ_LINK_SPEED_10MB:
3678                 link.link_speed = ETH_SPEED_NUM_10M;
3679                 break;
3680         case ICE_AQ_LINK_SPEED_100MB:
3681                 link.link_speed = ETH_SPEED_NUM_100M;
3682                 break;
3683         case ICE_AQ_LINK_SPEED_1000MB:
3684                 link.link_speed = ETH_SPEED_NUM_1G;
3685                 break;
3686         case ICE_AQ_LINK_SPEED_2500MB:
3687                 link.link_speed = ETH_SPEED_NUM_2_5G;
3688                 break;
3689         case ICE_AQ_LINK_SPEED_5GB:
3690                 link.link_speed = ETH_SPEED_NUM_5G;
3691                 break;
3692         case ICE_AQ_LINK_SPEED_10GB:
3693                 link.link_speed = ETH_SPEED_NUM_10G;
3694                 break;
3695         case ICE_AQ_LINK_SPEED_20GB:
3696                 link.link_speed = ETH_SPEED_NUM_20G;
3697                 break;
3698         case ICE_AQ_LINK_SPEED_25GB:
3699                 link.link_speed = ETH_SPEED_NUM_25G;
3700                 break;
3701         case ICE_AQ_LINK_SPEED_40GB:
3702                 link.link_speed = ETH_SPEED_NUM_40G;
3703                 break;
3704         case ICE_AQ_LINK_SPEED_50GB:
3705                 link.link_speed = ETH_SPEED_NUM_50G;
3706                 break;
3707         case ICE_AQ_LINK_SPEED_100GB:
3708                 link.link_speed = ETH_SPEED_NUM_100G;
3709                 break;
3710         case ICE_AQ_LINK_SPEED_UNKNOWN:
3711                 PMD_DRV_LOG(ERR, "Unknown link speed");
3712                 link.link_speed = ETH_SPEED_NUM_UNKNOWN;
3713                 break;
3714         default:
3715                 PMD_DRV_LOG(ERR, "None link speed");
3716                 link.link_speed = ETH_SPEED_NUM_NONE;
3717                 break;
3718         }
3719
3720         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3721                               ETH_LINK_SPEED_FIXED);
3722
3723 out:
3724         ice_atomic_write_link_status(dev, &link);
3725         if (link.link_status == old.link_status)
3726                 return -1;
3727
3728         return 0;
3729 }
3730
3731 /* Force the physical link state by getting the current PHY capabilities from
3732  * hardware and setting the PHY config based on the determined capabilities. If
3733  * link changes, link event will be triggered because both the Enable Automatic
3734  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3735  */
3736 static enum ice_status
3737 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3738 {
3739         struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3740         struct ice_aqc_get_phy_caps_data *pcaps;
3741         struct ice_port_info *pi;
3742         enum ice_status status;
3743
3744         if (!hw || !hw->port_info)
3745                 return ICE_ERR_PARAM;
3746
3747         pi = hw->port_info;
3748
3749         pcaps = (struct ice_aqc_get_phy_caps_data *)
3750                 ice_malloc(hw, sizeof(*pcaps));
3751         if (!pcaps)
3752                 return ICE_ERR_NO_MEMORY;
3753
3754         status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3755                                      pcaps, NULL);
3756         if (status)
3757                 goto out;
3758
3759         /* No change in link */
3760         if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3761             link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3762                 goto out;
3763
3764         cfg.phy_type_low = pcaps->phy_type_low;
3765         cfg.phy_type_high = pcaps->phy_type_high;
3766         cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3767         cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3768         cfg.eee_cap = pcaps->eee_cap;
3769         cfg.eeer_value = pcaps->eeer_value;
3770         cfg.link_fec_opt = pcaps->link_fec_options;
3771         if (link_up)
3772                 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3773         else
3774                 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3775
3776         status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3777
3778 out:
3779         ice_free(hw, pcaps);
3780         return status;
3781 }
3782
3783 static int
3784 ice_dev_set_link_up(struct rte_eth_dev *dev)
3785 {
3786         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3787
3788         return ice_force_phys_link_state(hw, true);
3789 }
3790
3791 static int
3792 ice_dev_set_link_down(struct rte_eth_dev *dev)
3793 {
3794         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3795
3796         return ice_force_phys_link_state(hw, false);
3797 }
3798
3799 static int
3800 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3801 {
3802         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3803         struct rte_eth_dev_data *dev_data = pf->dev_data;
3804         uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3805
3806         /* check if mtu is within the allowed range */
3807         if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3808                 return -EINVAL;
3809
3810         /* mtu setting is forbidden if port is start */
3811         if (dev_data->dev_started) {
3812                 PMD_DRV_LOG(ERR,
3813                             "port %d must be stopped before configuration",
3814                             dev_data->port_id);
3815                 return -EBUSY;
3816         }
3817
3818         if (frame_size > ICE_ETH_MAX_LEN)
3819                 dev_data->dev_conf.rxmode.offloads |=
3820                         DEV_RX_OFFLOAD_JUMBO_FRAME;
3821         else
3822                 dev_data->dev_conf.rxmode.offloads &=
3823                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3824
3825         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3826
3827         return 0;
3828 }
3829
3830 static int ice_macaddr_set(struct rte_eth_dev *dev,
3831                            struct rte_ether_addr *mac_addr)
3832 {
3833         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3834         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3835         struct ice_vsi *vsi = pf->main_vsi;
3836         struct ice_mac_filter *f;
3837         uint8_t flags = 0;
3838         int ret;
3839
3840         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3841                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3842                 return -EINVAL;
3843         }
3844
3845         TAILQ_FOREACH(f, &vsi->mac_list, next) {
3846                 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3847                         break;
3848         }
3849
3850         if (!f) {
3851                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3852                 return -EIO;
3853         }
3854
3855         ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3856         if (ret != ICE_SUCCESS) {
3857                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3858                 return -EIO;
3859         }
3860         ret = ice_add_mac_filter(vsi, mac_addr);
3861         if (ret != ICE_SUCCESS) {
3862                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
3863                 return -EIO;
3864         }
3865         rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3866
3867         flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3868         ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3869         if (ret != ICE_SUCCESS)
3870                 PMD_DRV_LOG(ERR, "Failed to set manage mac");
3871
3872         return 0;
3873 }
3874
3875 /* Add a MAC address, and update filters */
3876 static int
3877 ice_macaddr_add(struct rte_eth_dev *dev,
3878                 struct rte_ether_addr *mac_addr,
3879                 __rte_unused uint32_t index,
3880                 __rte_unused uint32_t pool)
3881 {
3882         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3883         struct ice_vsi *vsi = pf->main_vsi;
3884         int ret;
3885
3886         ret = ice_add_mac_filter(vsi, mac_addr);
3887         if (ret != ICE_SUCCESS) {
3888                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3889                 return -EINVAL;
3890         }
3891
3892         return ICE_SUCCESS;
3893 }
3894
3895 /* Remove a MAC address, and update filters */
3896 static void
3897 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3898 {
3899         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3900         struct ice_vsi *vsi = pf->main_vsi;
3901         struct rte_eth_dev_data *data = dev->data;
3902         struct rte_ether_addr *macaddr;
3903         int ret;
3904
3905         macaddr = &data->mac_addrs[index];
3906         ret = ice_remove_mac_filter(vsi, macaddr);
3907         if (ret) {
3908                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3909                 return;
3910         }
3911 }
3912
3913 static int
3914 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3915 {
3916         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3917         struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id);
3918         struct ice_vsi *vsi = pf->main_vsi;
3919         int ret;
3920
3921         PMD_INIT_FUNC_TRACE();
3922
3923         /**
3924          * Vlan 0 is the generic filter for untagged packets
3925          * and can't be removed or added by user.
3926          */
3927         if (vlan_id == 0)
3928                 return 0;
3929
3930         if (on) {
3931                 ret = ice_add_vlan_filter(vsi, &vlan);
3932                 if (ret < 0) {
3933                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3934                         return -EINVAL;
3935                 }
3936         } else {
3937                 ret = ice_remove_vlan_filter(vsi, &vlan);
3938                 if (ret < 0) {
3939                         PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3940                         return -EINVAL;
3941                 }
3942         }
3943
3944         return 0;
3945 }
3946
3947 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are
3948  * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8)
3949  * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3950  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3951  *
3952  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3953  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3954  * traffic in SVM, since the VLAN TPID isn't part of filtering.
3955  *
3956  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3957  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3958  * part of filtering.
3959  */
3960 static int
3961 ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3962 {
3963         struct ice_vlan vlan;
3964         int err;
3965
3966         vlan = ICE_VLAN(0, 0);
3967         err = ice_add_vlan_filter(vsi, &vlan);
3968         if (err) {
3969                 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0");
3970                 return err;
3971         }
3972
3973         /* in SVM both VLAN 0 filters are identical */
3974         if (!ice_is_dvm_ena(&vsi->adapter->hw))
3975                 return 0;
3976
3977         vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
3978         err = ice_add_vlan_filter(vsi, &vlan);
3979         if (err) {
3980                 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode");
3981                 return err;
3982         }
3983
3984         return 0;
3985 }
3986
3987 /*
3988  * Delete the VLAN 0 filters in the same manner that they were added in
3989  * ice_vsi_add_vlan_zero.
3990  */
3991 static int
3992 ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3993 {
3994         struct ice_vlan vlan;
3995         int err;
3996
3997         vlan = ICE_VLAN(0, 0);
3998         err = ice_remove_vlan_filter(vsi, &vlan);
3999         if (err) {
4000                 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0");
4001                 return err;
4002         }
4003
4004         /* in SVM both VLAN 0 filters are identical */
4005         if (!ice_is_dvm_ena(&vsi->adapter->hw))
4006                 return 0;
4007
4008         vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4009         err = ice_remove_vlan_filter(vsi, &vlan);
4010         if (err) {
4011                 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode");
4012                 return err;
4013         }
4014
4015         return 0;
4016 }
4017
4018 /* Configure vlan filter on or off */
4019 static int
4020 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
4021 {
4022         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4023         struct ice_vsi_ctx ctxt;
4024         uint8_t sw_flags2;
4025         int ret = 0;
4026
4027         sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4028
4029         if (on)
4030                 vsi->info.sw_flags2 |= sw_flags2;
4031         else
4032                 vsi->info.sw_flags2 &= ~sw_flags2;
4033
4034         vsi->info.sw_id = hw->port_info->sw_id;
4035         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4036         ctxt.info.valid_sections =
4037                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4038                                  ICE_AQ_VSI_PROP_SECURITY_VALID);
4039         ctxt.vsi_num = vsi->vsi_id;
4040
4041         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4042         if (ret) {
4043                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
4044                             on ? "enable" : "disable");
4045                 return -EINVAL;
4046         } else {
4047                 vsi->info.valid_sections |=
4048                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4049                                          ICE_AQ_VSI_PROP_SECURITY_VALID);
4050         }
4051
4052         /* consist with other drivers, allow untagged packet when vlan filter on */
4053         if (on)
4054                 ret = ice_vsi_add_vlan_zero(vsi);
4055         else
4056                 ret = ice_vsi_del_vlan_zero(vsi);
4057
4058         return 0;
4059 }
4060
4061 /* Manage VLAN stripping for the VSI for Rx */
4062 static int
4063 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4064 {
4065         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4066         struct ice_vsi_ctx ctxt;
4067         enum ice_status status;
4068         int err = 0;
4069
4070         /* do not allow modifying VLAN stripping when a port VLAN is configured
4071          * on this VSI
4072          */
4073         if (vsi->info.port_based_inner_vlan)
4074                 return 0;
4075
4076         memset(&ctxt, 0, sizeof(ctxt));
4077
4078         if (ena)
4079                 /* Strip VLAN tag from Rx packet and put it in the desc */
4080                 ctxt.info.inner_vlan_flags =
4081                                         ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
4082         else
4083                 /* Disable stripping. Leave tag in packet */
4084                 ctxt.info.inner_vlan_flags =
4085                                         ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4086
4087         /* Allow all packets untagged/tagged */
4088         ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
4089
4090         ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4091
4092         status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4093         if (status) {
4094                 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping",
4095                             ena ? "enable" : "disable");
4096                 err = -EIO;
4097         } else {
4098                 vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags;
4099         }
4100
4101         return err;
4102 }
4103
4104 static int
4105 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi)
4106 {
4107         return ice_vsi_manage_vlan_stripping(vsi, true);
4108 }
4109
4110 static int
4111 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
4112 {
4113         return ice_vsi_manage_vlan_stripping(vsi, false);
4114 }
4115
4116 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi)
4117 {
4118         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4119         struct ice_vsi_ctx ctxt;
4120         enum ice_status status;
4121         int err = 0;
4122
4123         /* do not allow modifying VLAN stripping when a port VLAN is configured
4124          * on this VSI
4125          */
4126         if (vsi->info.port_based_outer_vlan)
4127                 return 0;
4128
4129         memset(&ctxt, 0, sizeof(ctxt));
4130
4131         ctxt.info.valid_sections =
4132                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4133         /* clear current outer VLAN strip settings */
4134         ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4135                 ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
4136         ctxt.info.outer_vlan_flags |=
4137                 (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
4138                  ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
4139                 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
4140                  ICE_AQ_VSI_OUTER_TAG_TYPE_S);
4141
4142         status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4143         if (status) {
4144                 PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping");
4145                 err = -EIO;
4146         } else {
4147                 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4148         }
4149
4150         return err;
4151 }
4152
4153 static int
4154 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
4155 {
4156         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4157         struct ice_vsi_ctx ctxt;
4158         enum ice_status status;
4159         int err = 0;
4160
4161         if (vsi->info.port_based_outer_vlan)
4162                 return 0;
4163
4164         memset(&ctxt, 0, sizeof(ctxt));
4165
4166         ctxt.info.valid_sections =
4167                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4168         /* clear current outer VLAN strip settings */
4169         ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4170                 ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
4171         ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
4172                 ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
4173
4174         status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4175         if (status) {
4176                 PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping");
4177                 err = -EIO;
4178         } else {
4179                 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4180         }
4181
4182         return err;
4183 }
4184
4185 static int
4186 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena)
4187 {
4188         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4189         int ret;
4190
4191         if (ice_is_dvm_ena(hw)) {
4192                 if (ena)
4193                         ret = ice_vsi_ena_outer_stripping(vsi);
4194                 else
4195                         ret = ice_vsi_dis_outer_stripping(vsi);
4196         } else {
4197                 if (ena)
4198                         ret = ice_vsi_ena_inner_stripping(vsi);
4199                 else
4200                         ret = ice_vsi_dis_inner_stripping(vsi);
4201         }
4202
4203         return ret;
4204 }
4205
4206 static int
4207 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4208 {
4209         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4210         struct ice_vsi *vsi = pf->main_vsi;
4211         struct rte_eth_rxmode *rxmode;
4212
4213         rxmode = &dev->data->dev_conf.rxmode;
4214         if (mask & ETH_VLAN_FILTER_MASK) {
4215                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4216                         ice_vsi_config_vlan_filter(vsi, true);
4217                 else
4218                         ice_vsi_config_vlan_filter(vsi, false);
4219         }
4220
4221         if (mask & ETH_VLAN_STRIP_MASK) {
4222                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4223                         ice_vsi_config_vlan_stripping(vsi, true);
4224                 else
4225                         ice_vsi_config_vlan_stripping(vsi, false);
4226         }
4227
4228         return 0;
4229 }
4230
4231 static int
4232 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4233 {
4234         struct ice_aq_get_set_rss_lut_params lut_params;
4235         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4236         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4237         int ret;
4238
4239         if (!lut)
4240                 return -EINVAL;
4241
4242         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4243                 lut_params.vsi_handle = vsi->idx;
4244                 lut_params.lut_size = lut_size;
4245                 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4246                 lut_params.lut = lut;
4247                 lut_params.global_lut_id = 0;
4248                 ret = ice_aq_get_rss_lut(hw, &lut_params);
4249                 if (ret) {
4250                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4251                         return -EINVAL;
4252                 }
4253         } else {
4254                 uint64_t *lut_dw = (uint64_t *)lut;
4255                 uint16_t i, lut_size_dw = lut_size / 4;
4256
4257                 for (i = 0; i < lut_size_dw; i++)
4258                         lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4259         }
4260
4261         return 0;
4262 }
4263
4264 static int
4265 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4266 {
4267         struct ice_aq_get_set_rss_lut_params lut_params;
4268         struct ice_pf *pf;
4269         struct ice_hw *hw;
4270         int ret;
4271
4272         if (!vsi || !lut)
4273                 return -EINVAL;
4274
4275         pf = ICE_VSI_TO_PF(vsi);
4276         hw = ICE_VSI_TO_HW(vsi);
4277
4278         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4279                 lut_params.vsi_handle = vsi->idx;
4280                 lut_params.lut_size = lut_size;
4281                 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4282                 lut_params.lut = lut;
4283                 lut_params.global_lut_id = 0;
4284                 ret = ice_aq_set_rss_lut(hw, &lut_params);
4285                 if (ret) {
4286                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4287                         return -EINVAL;
4288                 }
4289         } else {
4290                 uint64_t *lut_dw = (uint64_t *)lut;
4291                 uint16_t i, lut_size_dw = lut_size / 4;
4292
4293                 for (i = 0; i < lut_size_dw; i++)
4294                         ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4295
4296                 ice_flush(hw);
4297         }
4298
4299         return 0;
4300 }
4301
4302 static int
4303 ice_rss_reta_update(struct rte_eth_dev *dev,
4304                     struct rte_eth_rss_reta_entry64 *reta_conf,
4305                     uint16_t reta_size)
4306 {
4307         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4308         uint16_t i, lut_size = pf->hash_lut_size;
4309         uint16_t idx, shift;
4310         uint8_t *lut;
4311         int ret;
4312
4313         if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4314             reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4315             reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4316                 PMD_DRV_LOG(ERR,
4317                             "The size of hash lookup table configured (%d)"
4318                             "doesn't match the number hardware can "
4319                             "supported (128, 512, 2048)",
4320                             reta_size);
4321                 return -EINVAL;
4322         }
4323
4324         /* It MUST use the current LUT size to get the RSS lookup table,
4325          * otherwise if will fail with -100 error code.
4326          */
4327         lut = rte_zmalloc(NULL,  RTE_MAX(reta_size, lut_size), 0);
4328         if (!lut) {
4329                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4330                 return -ENOMEM;
4331         }
4332         ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4333         if (ret)
4334                 goto out;
4335
4336         for (i = 0; i < reta_size; i++) {
4337                 idx = i / RTE_RETA_GROUP_SIZE;
4338                 shift = i % RTE_RETA_GROUP_SIZE;
4339                 if (reta_conf[idx].mask & (1ULL << shift))
4340                         lut[i] = reta_conf[idx].reta[shift];
4341         }
4342         ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4343         if (ret == 0 && lut_size != reta_size) {
4344                 PMD_DRV_LOG(INFO,
4345                             "The size of hash lookup table is changed from (%d) to (%d)",
4346                             lut_size, reta_size);
4347                 pf->hash_lut_size = reta_size;
4348         }
4349
4350 out:
4351         rte_free(lut);
4352
4353         return ret;
4354 }
4355
4356 static int
4357 ice_rss_reta_query(struct rte_eth_dev *dev,
4358                    struct rte_eth_rss_reta_entry64 *reta_conf,
4359                    uint16_t reta_size)
4360 {
4361         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4362         uint16_t i, lut_size = pf->hash_lut_size;
4363         uint16_t idx, shift;
4364         uint8_t *lut;
4365         int ret;
4366
4367         if (reta_size != lut_size) {
4368                 PMD_DRV_LOG(ERR,
4369                             "The size of hash lookup table configured (%d)"
4370                             "doesn't match the number hardware can "
4371                             "supported (%d)",
4372                             reta_size, lut_size);
4373                 return -EINVAL;
4374         }
4375
4376         lut = rte_zmalloc(NULL, reta_size, 0);
4377         if (!lut) {
4378                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4379                 return -ENOMEM;
4380         }
4381
4382         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4383         if (ret)
4384                 goto out;
4385
4386         for (i = 0; i < reta_size; i++) {
4387                 idx = i / RTE_RETA_GROUP_SIZE;
4388                 shift = i % RTE_RETA_GROUP_SIZE;
4389                 if (reta_conf[idx].mask & (1ULL << shift))
4390                         reta_conf[idx].reta[shift] = lut[i];
4391         }
4392
4393 out:
4394         rte_free(lut);
4395
4396         return ret;
4397 }
4398
4399 static int
4400 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4401 {
4402         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4403         int ret = 0;
4404
4405         if (!key || key_len == 0) {
4406                 PMD_DRV_LOG(DEBUG, "No key to be configured");
4407                 return 0;
4408         } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4409                    sizeof(uint32_t)) {
4410                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4411                 return -EINVAL;
4412         }
4413
4414         struct ice_aqc_get_set_rss_keys *key_dw =
4415                 (struct ice_aqc_get_set_rss_keys *)key;
4416
4417         ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4418         if (ret) {
4419                 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4420                 ret = -EINVAL;
4421         }
4422
4423         return ret;
4424 }
4425
4426 static int
4427 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4428 {
4429         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4430         int ret;
4431
4432         if (!key || !key_len)
4433                 return -EINVAL;
4434
4435         ret = ice_aq_get_rss_key
4436                 (hw, vsi->idx,
4437                  (struct ice_aqc_get_set_rss_keys *)key);
4438         if (ret) {
4439                 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4440                 return -EINVAL;
4441         }
4442         *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4443
4444         return 0;
4445 }
4446
4447 static int
4448 ice_rss_hash_update(struct rte_eth_dev *dev,
4449                     struct rte_eth_rss_conf *rss_conf)
4450 {
4451         enum ice_status status = ICE_SUCCESS;
4452         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4453         struct ice_vsi *vsi = pf->main_vsi;
4454
4455         /* set hash key */
4456         status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4457         if (status)
4458                 return status;
4459
4460         if (rss_conf->rss_hf == 0) {
4461                 pf->rss_hf = 0;
4462                 return 0;
4463         }
4464
4465         /* RSS hash configuration */
4466         ice_rss_hash_set(pf, rss_conf->rss_hf);
4467
4468         return 0;
4469 }
4470
4471 static int
4472 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4473                       struct rte_eth_rss_conf *rss_conf)
4474 {
4475         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4476         struct ice_vsi *vsi = pf->main_vsi;
4477
4478         ice_get_rss_key(vsi, rss_conf->rss_key,
4479                         &rss_conf->rss_key_len);
4480
4481         rss_conf->rss_hf = pf->rss_hf;
4482         return 0;
4483 }
4484
4485 static int
4486 ice_promisc_enable(struct rte_eth_dev *dev)
4487 {
4488         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4489         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4490         struct ice_vsi *vsi = pf->main_vsi;
4491         enum ice_status status;
4492         uint8_t pmask;
4493         int ret = 0;
4494
4495         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4496                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4497
4498         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4499         switch (status) {
4500         case ICE_ERR_ALREADY_EXISTS:
4501                 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4502         case ICE_SUCCESS:
4503                 break;
4504         default:
4505                 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4506                 ret = -EAGAIN;
4507         }
4508
4509         return ret;
4510 }
4511
4512 static int
4513 ice_promisc_disable(struct rte_eth_dev *dev)
4514 {
4515         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4516         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4517         struct ice_vsi *vsi = pf->main_vsi;
4518         enum ice_status status;
4519         uint8_t pmask;
4520         int ret = 0;
4521
4522         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4523                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4524
4525         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4526         if (status != ICE_SUCCESS) {
4527                 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4528                 ret = -EAGAIN;
4529         }
4530
4531         return ret;
4532 }
4533
4534 static int
4535 ice_allmulti_enable(struct rte_eth_dev *dev)
4536 {
4537         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4538         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4539         struct ice_vsi *vsi = pf->main_vsi;
4540         enum ice_status status;
4541         uint8_t pmask;
4542         int ret = 0;
4543
4544         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4545
4546         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4547
4548         switch (status) {
4549         case ICE_ERR_ALREADY_EXISTS:
4550                 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4551         case ICE_SUCCESS:
4552                 break;
4553         default:
4554                 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4555                 ret = -EAGAIN;
4556         }
4557
4558         return ret;
4559 }
4560
4561 static int
4562 ice_allmulti_disable(struct rte_eth_dev *dev)
4563 {
4564         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4565         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4566         struct ice_vsi *vsi = pf->main_vsi;
4567         enum ice_status status;
4568         uint8_t pmask;
4569         int ret = 0;
4570
4571         if (dev->data->promiscuous == 1)
4572                 return 0; /* must remain in all_multicast mode */
4573
4574         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4575
4576         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4577         if (status != ICE_SUCCESS) {
4578                 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4579                 ret = -EAGAIN;
4580         }
4581
4582         return ret;
4583 }
4584
4585 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4586                                     uint16_t queue_id)
4587 {
4588         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4589         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4590         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4591         uint32_t val;
4592         uint16_t msix_intr;
4593
4594         msix_intr = intr_handle->intr_vec[queue_id];
4595
4596         val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4597               GLINT_DYN_CTL_ITR_INDX_M;
4598         val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4599
4600         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4601         rte_intr_ack(&pci_dev->intr_handle);
4602
4603         return 0;
4604 }
4605
4606 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4607                                      uint16_t queue_id)
4608 {
4609         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4610         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4611         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4612         uint16_t msix_intr;
4613
4614         msix_intr = intr_handle->intr_vec[queue_id];
4615
4616         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4617
4618         return 0;
4619 }
4620
4621 static int
4622 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4623 {
4624         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4625         u8 ver, patch;
4626         u16 build;
4627         int ret;
4628
4629         ver = hw->flash.orom.major;
4630         patch = hw->flash.orom.patch;
4631         build = hw->flash.orom.build;
4632
4633         ret = snprintf(fw_version, fw_size,
4634                         "%x.%02x 0x%08x %d.%d.%d",
4635                         hw->flash.nvm.major,
4636                         hw->flash.nvm.minor,
4637                         hw->flash.nvm.eetrack,
4638                         ver, build, patch);
4639
4640         /* add the size of '\0' */
4641         ret += 1;
4642         if (fw_size < (u32)ret)
4643                 return ret;
4644         else
4645                 return 0;
4646 }
4647
4648 static int
4649 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4650 {
4651         struct ice_hw *hw;
4652         struct ice_vsi_ctx ctxt;
4653         uint8_t vlan_flags = 0;
4654         int ret;
4655
4656         if (!vsi || !info) {
4657                 PMD_DRV_LOG(ERR, "invalid parameters");
4658                 return -EINVAL;
4659         }
4660
4661         if (info->on) {
4662                 vsi->info.port_based_inner_vlan = info->config.pvid;
4663                 /**
4664                  * If insert pvid is enabled, only tagged pkts are
4665                  * allowed to be sent out.
4666                  */
4667                 vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4668                              ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4669         } else {
4670                 vsi->info.port_based_inner_vlan = 0;
4671                 if (info->config.reject.tagged == 0)
4672                         vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED;
4673
4674                 if (info->config.reject.untagged == 0)
4675                         vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4676         }
4677         vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4678                                   ICE_AQ_VSI_INNER_VLAN_EMODE_M);
4679         vsi->info.inner_vlan_flags |= vlan_flags;
4680         memset(&ctxt, 0, sizeof(ctxt));
4681         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4682         ctxt.info.valid_sections =
4683                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4684         ctxt.vsi_num = vsi->vsi_id;
4685
4686         hw = ICE_VSI_TO_HW(vsi);
4687         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4688         if (ret != ICE_SUCCESS) {
4689                 PMD_DRV_LOG(ERR,
4690                             "update VSI for VLAN insert failed, err %d",
4691                             ret);
4692                 return -EINVAL;
4693         }
4694
4695         vsi->info.valid_sections |=
4696                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4697
4698         return ret;
4699 }
4700
4701 static int
4702 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4703 {
4704         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4705         struct ice_vsi *vsi = pf->main_vsi;
4706         struct rte_eth_dev_data *data = pf->dev_data;
4707         struct ice_vsi_vlan_pvid_info info;
4708         int ret;
4709
4710         memset(&info, 0, sizeof(info));
4711         info.on = on;
4712         if (info.on) {
4713                 info.config.pvid = pvid;
4714         } else {
4715                 info.config.reject.tagged =
4716                         data->dev_conf.txmode.hw_vlan_reject_tagged;
4717                 info.config.reject.untagged =
4718                         data->dev_conf.txmode.hw_vlan_reject_untagged;
4719         }
4720
4721         ret = ice_vsi_vlan_pvid_set(vsi, &info);
4722         if (ret < 0) {
4723                 PMD_DRV_LOG(ERR, "Failed to set pvid.");
4724                 return -EINVAL;
4725         }
4726
4727         return 0;
4728 }
4729
4730 static int
4731 ice_get_eeprom_length(struct rte_eth_dev *dev)
4732 {
4733         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4734
4735         return hw->flash.flash_size;
4736 }
4737
4738 static int
4739 ice_get_eeprom(struct rte_eth_dev *dev,
4740                struct rte_dev_eeprom_info *eeprom)
4741 {
4742         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4743         enum ice_status status = ICE_SUCCESS;
4744         uint8_t *data = eeprom->data;
4745
4746         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4747
4748         status = ice_acquire_nvm(hw, ICE_RES_READ);
4749         if (status) {
4750                 PMD_DRV_LOG(ERR, "acquire nvm failed.");
4751                 return -EIO;
4752         }
4753
4754         status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4755                                    data, false);
4756
4757         ice_release_nvm(hw);
4758
4759         if (status) {
4760                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
4761                 return -EIO;
4762         }
4763
4764         return 0;
4765 }
4766
4767 static void
4768 ice_stat_update_32(struct ice_hw *hw,
4769                    uint32_t reg,
4770                    bool offset_loaded,
4771                    uint64_t *offset,
4772                    uint64_t *stat)
4773 {
4774         uint64_t new_data;
4775
4776         new_data = (uint64_t)ICE_READ_REG(hw, reg);
4777         if (!offset_loaded)
4778                 *offset = new_data;
4779
4780         if (new_data >= *offset)
4781                 *stat = (uint64_t)(new_data - *offset);
4782         else
4783                 *stat = (uint64_t)((new_data +
4784                                     ((uint64_t)1 << ICE_32_BIT_WIDTH))
4785                                    - *offset);
4786 }
4787
4788 static void
4789 ice_stat_update_40(struct ice_hw *hw,
4790                    uint32_t hireg,
4791                    uint32_t loreg,
4792                    bool offset_loaded,
4793                    uint64_t *offset,
4794                    uint64_t *stat)
4795 {
4796         uint64_t new_data;
4797
4798         new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4799         new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4800                     ICE_32_BIT_WIDTH;
4801
4802         if (!offset_loaded)
4803                 *offset = new_data;
4804
4805         if (new_data >= *offset)
4806                 *stat = new_data - *offset;
4807         else
4808                 *stat = (uint64_t)((new_data +
4809                                     ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4810                                    *offset);
4811
4812         *stat &= ICE_40_BIT_MASK;
4813 }
4814
4815 /* Get all the statistics of a VSI */
4816 static void
4817 ice_update_vsi_stats(struct ice_vsi *vsi)
4818 {
4819         struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4820         struct ice_eth_stats *nes = &vsi->eth_stats;
4821         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4822         int idx = rte_le_to_cpu_16(vsi->vsi_id);
4823
4824         ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4825                            vsi->offset_loaded, &oes->rx_bytes,
4826                            &nes->rx_bytes);
4827         ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4828                            vsi->offset_loaded, &oes->rx_unicast,
4829                            &nes->rx_unicast);
4830         ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4831                            vsi->offset_loaded, &oes->rx_multicast,
4832                            &nes->rx_multicast);
4833         ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4834                            vsi->offset_loaded, &oes->rx_broadcast,
4835                            &nes->rx_broadcast);
4836         /* enlarge the limitation when rx_bytes overflowed */
4837         if (vsi->offset_loaded) {
4838                 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
4839                         nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4840                 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
4841         }
4842         vsi->old_rx_bytes = nes->rx_bytes;
4843         /* exclude CRC bytes */
4844         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4845                           nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4846
4847         ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4848                            &oes->rx_discards, &nes->rx_discards);
4849         /* GLV_REPC not supported */
4850         /* GLV_RMPC not supported */
4851         ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4852                            &oes->rx_unknown_protocol,
4853                            &nes->rx_unknown_protocol);
4854         ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4855                            vsi->offset_loaded, &oes->tx_bytes,
4856                            &nes->tx_bytes);
4857         ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4858                            vsi->offset_loaded, &oes->tx_unicast,
4859                            &nes->tx_unicast);
4860         ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4861                            vsi->offset_loaded, &oes->tx_multicast,
4862                            &nes->tx_multicast);
4863         ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4864                            vsi->offset_loaded,  &oes->tx_broadcast,
4865                            &nes->tx_broadcast);
4866         /* GLV_TDPC not supported */
4867         ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4868                            &oes->tx_errors, &nes->tx_errors);
4869         /* enlarge the limitation when tx_bytes overflowed */
4870         if (vsi->offset_loaded) {
4871                 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
4872                         nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4873                 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
4874         }
4875         vsi->old_tx_bytes = nes->tx_bytes;
4876         vsi->offset_loaded = true;
4877
4878         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4879                     vsi->vsi_id);
4880         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
4881         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
4882         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
4883         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
4884         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
4885         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4886                     nes->rx_unknown_protocol);
4887         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
4888         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
4889         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
4890         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
4891         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
4892         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
4893         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4894                     vsi->vsi_id);
4895 }
4896
4897 static void
4898 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4899 {
4900         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4901         struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4902
4903         /* Get statistics of struct ice_eth_stats */
4904         ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4905                            GLPRT_GORCL(hw->port_info->lport),
4906                            pf->offset_loaded, &os->eth.rx_bytes,
4907                            &ns->eth.rx_bytes);
4908         ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4909                            GLPRT_UPRCL(hw->port_info->lport),
4910                            pf->offset_loaded, &os->eth.rx_unicast,
4911                            &ns->eth.rx_unicast);
4912         ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4913                            GLPRT_MPRCL(hw->port_info->lport),
4914                            pf->offset_loaded, &os->eth.rx_multicast,
4915                            &ns->eth.rx_multicast);
4916         ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4917                            GLPRT_BPRCL(hw->port_info->lport),
4918                            pf->offset_loaded, &os->eth.rx_broadcast,
4919                            &ns->eth.rx_broadcast);
4920         ice_stat_update_32(hw, PRTRPB_RDPC,
4921                            pf->offset_loaded, &os->eth.rx_discards,
4922                            &ns->eth.rx_discards);
4923         /* enlarge the limitation when rx_bytes overflowed */
4924         if (pf->offset_loaded) {
4925                 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
4926                         ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4927                 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
4928         }
4929         pf->old_rx_bytes = ns->eth.rx_bytes;
4930
4931         /* Workaround: CRC size should not be included in byte statistics,
4932          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4933          * packet.
4934          */
4935         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4936                              ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4937
4938         /* GLPRT_REPC not supported */
4939         /* GLPRT_RMPC not supported */
4940         ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4941                            pf->offset_loaded,
4942                            &os->eth.rx_unknown_protocol,
4943                            &ns->eth.rx_unknown_protocol);
4944         ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4945                            GLPRT_GOTCL(hw->port_info->lport),
4946                            pf->offset_loaded, &os->eth.tx_bytes,
4947                            &ns->eth.tx_bytes);
4948         ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4949                            GLPRT_UPTCL(hw->port_info->lport),
4950                            pf->offset_loaded, &os->eth.tx_unicast,
4951                            &ns->eth.tx_unicast);
4952         ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4953                            GLPRT_MPTCL(hw->port_info->lport),
4954                            pf->offset_loaded, &os->eth.tx_multicast,
4955                            &ns->eth.tx_multicast);
4956         ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4957                            GLPRT_BPTCL(hw->port_info->lport),
4958                            pf->offset_loaded, &os->eth.tx_broadcast,
4959                            &ns->eth.tx_broadcast);
4960         /* enlarge the limitation when tx_bytes overflowed */
4961         if (pf->offset_loaded) {
4962                 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
4963                         ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4964                 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
4965         }
4966         pf->old_tx_bytes = ns->eth.tx_bytes;
4967         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4968                              ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4969
4970         /* GLPRT_TEPC not supported */
4971
4972         /* additional port specific stats */
4973         ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4974                            pf->offset_loaded, &os->tx_dropped_link_down,
4975                            &ns->tx_dropped_link_down);
4976         ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4977                            pf->offset_loaded, &os->crc_errors,
4978                            &ns->crc_errors);
4979         ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4980                            pf->offset_loaded, &os->illegal_bytes,
4981                            &ns->illegal_bytes);
4982         /* GLPRT_ERRBC not supported */
4983         ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4984                            pf->offset_loaded, &os->mac_local_faults,
4985                            &ns->mac_local_faults);
4986         ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4987                            pf->offset_loaded, &os->mac_remote_faults,
4988                            &ns->mac_remote_faults);
4989
4990         ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4991                            pf->offset_loaded, &os->rx_len_errors,
4992                            &ns->rx_len_errors);
4993
4994         ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4995                            pf->offset_loaded, &os->link_xon_rx,
4996                            &ns->link_xon_rx);
4997         ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
4998                            pf->offset_loaded, &os->link_xoff_rx,
4999                            &ns->link_xoff_rx);
5000         ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
5001                            pf->offset_loaded, &os->link_xon_tx,
5002                            &ns->link_xon_tx);
5003         ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
5004                            pf->offset_loaded, &os->link_xoff_tx,
5005                            &ns->link_xoff_tx);
5006         ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
5007                            GLPRT_PRC64L(hw->port_info->lport),
5008                            pf->offset_loaded, &os->rx_size_64,
5009                            &ns->rx_size_64);
5010         ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
5011                            GLPRT_PRC127L(hw->port_info->lport),
5012                            pf->offset_loaded, &os->rx_size_127,
5013                            &ns->rx_size_127);
5014         ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
5015                            GLPRT_PRC255L(hw->port_info->lport),
5016                            pf->offset_loaded, &os->rx_size_255,
5017                            &ns->rx_size_255);
5018         ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
5019                            GLPRT_PRC511L(hw->port_info->lport),
5020                            pf->offset_loaded, &os->rx_size_511,
5021                            &ns->rx_size_511);
5022         ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
5023                            GLPRT_PRC1023L(hw->port_info->lport),
5024                            pf->offset_loaded, &os->rx_size_1023,
5025                            &ns->rx_size_1023);
5026         ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
5027                            GLPRT_PRC1522L(hw->port_info->lport),
5028                            pf->offset_loaded, &os->rx_size_1522,
5029                            &ns->rx_size_1522);
5030         ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
5031                            GLPRT_PRC9522L(hw->port_info->lport),
5032                            pf->offset_loaded, &os->rx_size_big,
5033                            &ns->rx_size_big);
5034         ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
5035                            pf->offset_loaded, &os->rx_undersize,
5036                            &ns->rx_undersize);
5037         ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
5038                            pf->offset_loaded, &os->rx_fragments,
5039                            &ns->rx_fragments);
5040         ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
5041                            pf->offset_loaded, &os->rx_oversize,
5042                            &ns->rx_oversize);
5043         ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
5044                            pf->offset_loaded, &os->rx_jabber,
5045                            &ns->rx_jabber);
5046         ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
5047                            GLPRT_PTC64L(hw->port_info->lport),
5048                            pf->offset_loaded, &os->tx_size_64,
5049                            &ns->tx_size_64);
5050         ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
5051                            GLPRT_PTC127L(hw->port_info->lport),
5052                            pf->offset_loaded, &os->tx_size_127,
5053                            &ns->tx_size_127);
5054         ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
5055                            GLPRT_PTC255L(hw->port_info->lport),
5056                            pf->offset_loaded, &os->tx_size_255,
5057                            &ns->tx_size_255);
5058         ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
5059                            GLPRT_PTC511L(hw->port_info->lport),
5060                            pf->offset_loaded, &os->tx_size_511,
5061                            &ns->tx_size_511);
5062         ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
5063                            GLPRT_PTC1023L(hw->port_info->lport),
5064                            pf->offset_loaded, &os->tx_size_1023,
5065                            &ns->tx_size_1023);
5066         ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
5067                            GLPRT_PTC1522L(hw->port_info->lport),
5068                            pf->offset_loaded, &os->tx_size_1522,
5069                            &ns->tx_size_1522);
5070         ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
5071                            GLPRT_PTC9522L(hw->port_info->lport),
5072                            pf->offset_loaded, &os->tx_size_big,
5073                            &ns->tx_size_big);
5074
5075         /* GLPRT_MSPDC not supported */
5076         /* GLPRT_XEC not supported */
5077
5078         pf->offset_loaded = true;
5079
5080         if (pf->main_vsi)
5081                 ice_update_vsi_stats(pf->main_vsi);
5082 }
5083
5084 /* Get all statistics of a port */
5085 static int
5086 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
5087 {
5088         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5089         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5090         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5091
5092         /* call read registers - updates values, now write them to struct */
5093         ice_read_stats_registers(pf, hw);
5094
5095         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
5096                           pf->main_vsi->eth_stats.rx_multicast +
5097                           pf->main_vsi->eth_stats.rx_broadcast -
5098                           pf->main_vsi->eth_stats.rx_discards;
5099         stats->opackets = ns->eth.tx_unicast +
5100                           ns->eth.tx_multicast +
5101                           ns->eth.tx_broadcast;
5102         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
5103         stats->obytes   = ns->eth.tx_bytes;
5104         stats->oerrors  = ns->eth.tx_errors +
5105                           pf->main_vsi->eth_stats.tx_errors;
5106
5107         /* Rx Errors */
5108         stats->imissed  = ns->eth.rx_discards +
5109                           pf->main_vsi->eth_stats.rx_discards;
5110         stats->ierrors  = ns->crc_errors +
5111                           ns->rx_undersize +
5112                           ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
5113
5114         PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
5115         PMD_DRV_LOG(DEBUG, "rx_bytes:   %"PRIu64"", ns->eth.rx_bytes);
5116         PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
5117         PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
5118         PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
5119         PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
5120         PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
5121                     pf->main_vsi->eth_stats.rx_discards);
5122         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
5123                     ns->eth.rx_unknown_protocol);
5124         PMD_DRV_LOG(DEBUG, "tx_bytes:   %"PRIu64"", ns->eth.tx_bytes);
5125         PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
5126         PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
5127         PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
5128         PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
5129         PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
5130                     pf->main_vsi->eth_stats.tx_discards);
5131         PMD_DRV_LOG(DEBUG, "tx_errors:          %"PRIu64"", ns->eth.tx_errors);
5132
5133         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:       %"PRIu64"",
5134                     ns->tx_dropped_link_down);
5135         PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
5136         PMD_DRV_LOG(DEBUG, "illegal_bytes:      %"PRIu64"",
5137                     ns->illegal_bytes);
5138         PMD_DRV_LOG(DEBUG, "error_bytes:        %"PRIu64"", ns->error_bytes);
5139         PMD_DRV_LOG(DEBUG, "mac_local_faults:   %"PRIu64"",
5140                     ns->mac_local_faults);
5141         PMD_DRV_LOG(DEBUG, "mac_remote_faults:  %"PRIu64"",
5142                     ns->mac_remote_faults);
5143         PMD_DRV_LOG(DEBUG, "link_xon_rx:        %"PRIu64"", ns->link_xon_rx);
5144         PMD_DRV_LOG(DEBUG, "link_xoff_rx:       %"PRIu64"", ns->link_xoff_rx);
5145         PMD_DRV_LOG(DEBUG, "link_xon_tx:        %"PRIu64"", ns->link_xon_tx);
5146         PMD_DRV_LOG(DEBUG, "link_xoff_tx:       %"PRIu64"", ns->link_xoff_tx);
5147         PMD_DRV_LOG(DEBUG, "rx_size_64:         %"PRIu64"", ns->rx_size_64);
5148         PMD_DRV_LOG(DEBUG, "rx_size_127:        %"PRIu64"", ns->rx_size_127);
5149         PMD_DRV_LOG(DEBUG, "rx_size_255:        %"PRIu64"", ns->rx_size_255);
5150         PMD_DRV_LOG(DEBUG, "rx_size_511:        %"PRIu64"", ns->rx_size_511);
5151         PMD_DRV_LOG(DEBUG, "rx_size_1023:       %"PRIu64"", ns->rx_size_1023);
5152         PMD_DRV_LOG(DEBUG, "rx_size_1522:       %"PRIu64"", ns->rx_size_1522);
5153         PMD_DRV_LOG(DEBUG, "rx_size_big:        %"PRIu64"", ns->rx_size_big);
5154         PMD_DRV_LOG(DEBUG, "rx_undersize:       %"PRIu64"", ns->rx_undersize);
5155         PMD_DRV_LOG(DEBUG, "rx_fragments:       %"PRIu64"", ns->rx_fragments);
5156         PMD_DRV_LOG(DEBUG, "rx_oversize:        %"PRIu64"", ns->rx_oversize);
5157         PMD_DRV_LOG(DEBUG, "rx_jabber:          %"PRIu64"", ns->rx_jabber);
5158         PMD_DRV_LOG(DEBUG, "tx_size_64:         %"PRIu64"", ns->tx_size_64);
5159         PMD_DRV_LOG(DEBUG, "tx_size_127:        %"PRIu64"", ns->tx_size_127);
5160         PMD_DRV_LOG(DEBUG, "tx_size_255:        %"PRIu64"", ns->tx_size_255);
5161         PMD_DRV_LOG(DEBUG, "tx_size_511:        %"PRIu64"", ns->tx_size_511);
5162         PMD_DRV_LOG(DEBUG, "tx_size_1023:       %"PRIu64"", ns->tx_size_1023);
5163         PMD_DRV_LOG(DEBUG, "tx_size_1522:       %"PRIu64"", ns->tx_size_1522);
5164         PMD_DRV_LOG(DEBUG, "tx_size_big:        %"PRIu64"", ns->tx_size_big);
5165         PMD_DRV_LOG(DEBUG, "rx_len_errors:      %"PRIu64"", ns->rx_len_errors);
5166         PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
5167         return 0;
5168 }
5169
5170 /* Reset the statistics */
5171 static int
5172 ice_stats_reset(struct rte_eth_dev *dev)
5173 {
5174         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5175         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5176
5177         /* Mark PF and VSI stats to update the offset, aka "reset" */
5178         pf->offset_loaded = false;
5179         if (pf->main_vsi)
5180                 pf->main_vsi->offset_loaded = false;
5181
5182         /* read the stats, reading current register values into offset */
5183         ice_read_stats_registers(pf, hw);
5184
5185         return 0;
5186 }
5187
5188 static uint32_t
5189 ice_xstats_calc_num(void)
5190 {
5191         uint32_t num;
5192
5193         num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5194
5195         return num;
5196 }
5197
5198 static int
5199 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5200                unsigned int n)
5201 {
5202         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5203         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5204         unsigned int i;
5205         unsigned int count;
5206         struct ice_hw_port_stats *hw_stats = &pf->stats;
5207
5208         count = ice_xstats_calc_num();
5209         if (n < count)
5210                 return count;
5211
5212         ice_read_stats_registers(pf, hw);
5213
5214         if (!xstats)
5215                 return 0;
5216
5217         count = 0;
5218
5219         /* Get stats from ice_eth_stats struct */
5220         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5221                 xstats[count].value =
5222                         *(uint64_t *)((char *)&hw_stats->eth +
5223                                       ice_stats_strings[i].offset);
5224                 xstats[count].id = count;
5225                 count++;
5226         }
5227
5228         /* Get individiual stats from ice_hw_port struct */
5229         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5230                 xstats[count].value =
5231                         *(uint64_t *)((char *)hw_stats +
5232                                       ice_hw_port_strings[i].offset);
5233                 xstats[count].id = count;
5234                 count++;
5235         }
5236
5237         return count;
5238 }
5239
5240 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5241                                 struct rte_eth_xstat_name *xstats_names,
5242                                 __rte_unused unsigned int limit)
5243 {
5244         unsigned int count = 0;
5245         unsigned int i;
5246
5247         if (!xstats_names)
5248                 return ice_xstats_calc_num();
5249
5250         /* Note: limit checked in rte_eth_xstats_names() */
5251
5252         /* Get stats from ice_eth_stats struct */
5253         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5254                 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5255                         sizeof(xstats_names[count].name));
5256                 count++;
5257         }
5258
5259         /* Get individiual stats from ice_hw_port struct */
5260         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5261                 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5262                         sizeof(xstats_names[count].name));
5263                 count++;
5264         }
5265
5266         return count;
5267 }
5268
5269 static int
5270 ice_dev_filter_ctrl(struct rte_eth_dev *dev,
5271                      enum rte_filter_type filter_type,
5272                      enum rte_filter_op filter_op,
5273                      void *arg)
5274 {
5275         int ret = 0;
5276
5277         if (!dev)
5278                 return -EINVAL;
5279
5280         switch (filter_type) {
5281         case RTE_ETH_FILTER_GENERIC:
5282                 if (filter_op != RTE_ETH_FILTER_GET)
5283                         return -EINVAL;
5284                 *(const void **)arg = &ice_flow_ops;
5285                 break;
5286         default:
5287                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5288                                         filter_type);
5289                 ret = -EINVAL;
5290                 break;
5291         }
5292
5293         return ret;
5294 }
5295
5296 /* Add UDP tunneling port */
5297 static int
5298 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5299                              struct rte_eth_udp_tunnel *udp_tunnel)
5300 {
5301         int ret = 0;
5302         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5303
5304         if (udp_tunnel == NULL)
5305                 return -EINVAL;
5306
5307         switch (udp_tunnel->prot_type) {
5308         case RTE_TUNNEL_TYPE_VXLAN:
5309                 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5310                 break;
5311         default:
5312                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5313                 ret = -EINVAL;
5314                 break;
5315         }
5316
5317         return ret;
5318 }
5319
5320 /* Delete UDP tunneling port */
5321 static int
5322 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5323                              struct rte_eth_udp_tunnel *udp_tunnel)
5324 {
5325         int ret = 0;
5326         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5327
5328         if (udp_tunnel == NULL)
5329                 return -EINVAL;
5330
5331         switch (udp_tunnel->prot_type) {
5332         case RTE_TUNNEL_TYPE_VXLAN:
5333                 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5334                 break;
5335         default:
5336                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5337                 ret = -EINVAL;
5338                 break;
5339         }
5340
5341         return ret;
5342 }
5343
5344 static int
5345 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5346               struct rte_pci_device *pci_dev)
5347 {
5348         return rte_eth_dev_pci_generic_probe(pci_dev,
5349                                              sizeof(struct ice_adapter),
5350                                              ice_dev_init);
5351 }
5352
5353 static int
5354 ice_pci_remove(struct rte_pci_device *pci_dev)
5355 {
5356         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5357 }
5358
5359 static struct rte_pci_driver rte_ice_pmd = {
5360         .id_table = pci_id_ice_map,
5361         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5362         .probe = ice_pci_probe,
5363         .remove = ice_pci_remove,
5364 };
5365
5366 /**
5367  * Driver initialization routine.
5368  * Invoked once at EAL init time.
5369  * Register itself as the [Poll Mode] Driver of PCI devices.
5370  */
5371 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5372 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5373 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5374 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5375                               ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5376                               ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5377                               ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
5378
5379 RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE);
5380 RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE);
5381 #ifdef RTE_LIBRTE_ICE_DEBUG_RX
5382 RTE_LOG_REGISTER(ice_logtype_rx, pmd.net.ice.rx, DEBUG);
5383 #endif
5384 #ifdef RTE_LIBRTE_ICE_DEBUG_TX
5385 RTE_LOG_REGISTER(ice_logtype_tx, pmd.net.ice.tx, DEBUG);
5386 #endif
5387 #ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE
5388 RTE_LOG_REGISTER(ice_logtype_tx_free, pmd.net.ice.tx_free, DEBUG);
5389 #endif