net/ice: fix race condition in Rx timestamp
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7
8 #include <stdio.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12
13 #include <rte_tailq.h>
14
15 #include "eal_firmware.h"
16
17 #include "base/ice_sched.h"
18 #include "base/ice_flow.h"
19 #include "base/ice_dcb.h"
20 #include "base/ice_common.h"
21 #include "base/ice_ptp_hw.h"
22
23 #include "rte_pmd_ice.h"
24 #include "ice_ethdev.h"
25 #include "ice_rxtx.h"
26 #include "ice_generic_flow.h"
27
28 /* devargs */
29 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
30 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
31 #define ICE_PROTO_XTR_ARG         "proto_xtr"
32 #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
33 #define ICE_ONE_PPS_OUT_ARG       "pps_out"
34 #define ICE_RX_LOW_LATENCY_ARG    "rx_low_latency"
35
36 #define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
37
38 uint64_t ice_timestamp_dynflag;
39 int ice_timestamp_dynfield_offset = -1;
40
41 static const char * const ice_valid_args[] = {
42         ICE_SAFE_MODE_SUPPORT_ARG,
43         ICE_PIPELINE_MODE_SUPPORT_ARG,
44         ICE_PROTO_XTR_ARG,
45         ICE_HW_DEBUG_MASK_ARG,
46         ICE_ONE_PPS_OUT_ARG,
47         ICE_RX_LOW_LATENCY_ARG,
48         NULL
49 };
50
51 #define PPS_OUT_DELAY_NS  1
52
53 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
54         .name = "intel_pmd_dynfield_proto_xtr_metadata",
55         .size = sizeof(uint32_t),
56         .align = __alignof__(uint32_t),
57         .flags = 0,
58 };
59
60 struct proto_xtr_ol_flag {
61         const struct rte_mbuf_dynflag param;
62         uint64_t *ol_flag;
63         bool required;
64 };
65
66 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
67
68 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
69         [PROTO_XTR_VLAN] = {
70                 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
71                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
72         [PROTO_XTR_IPV4] = {
73                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
74                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
75         [PROTO_XTR_IPV6] = {
76                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
77                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
78         [PROTO_XTR_IPV6_FLOW] = {
79                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
80                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
81         [PROTO_XTR_TCP] = {
82                 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
83                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
84         [PROTO_XTR_IP_OFFSET] = {
85                 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
86                 .ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
87 };
88
89 #define ICE_OS_DEFAULT_PKG_NAME         "ICE OS Default Package"
90 #define ICE_COMMS_PKG_NAME                      "ICE COMMS Package"
91 #define ICE_MAX_RES_DESC_NUM        1024
92
93 static int ice_dev_configure(struct rte_eth_dev *dev);
94 static int ice_dev_start(struct rte_eth_dev *dev);
95 static int ice_dev_stop(struct rte_eth_dev *dev);
96 static int ice_dev_close(struct rte_eth_dev *dev);
97 static int ice_dev_reset(struct rte_eth_dev *dev);
98 static int ice_dev_info_get(struct rte_eth_dev *dev,
99                             struct rte_eth_dev_info *dev_info);
100 static int ice_link_update(struct rte_eth_dev *dev,
101                            int wait_to_complete);
102 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
103 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
104
105 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
106 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
107 static int ice_rss_reta_update(struct rte_eth_dev *dev,
108                                struct rte_eth_rss_reta_entry64 *reta_conf,
109                                uint16_t reta_size);
110 static int ice_rss_reta_query(struct rte_eth_dev *dev,
111                               struct rte_eth_rss_reta_entry64 *reta_conf,
112                               uint16_t reta_size);
113 static int ice_rss_hash_update(struct rte_eth_dev *dev,
114                                struct rte_eth_rss_conf *rss_conf);
115 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
116                                  struct rte_eth_rss_conf *rss_conf);
117 static int ice_promisc_enable(struct rte_eth_dev *dev);
118 static int ice_promisc_disable(struct rte_eth_dev *dev);
119 static int ice_allmulti_enable(struct rte_eth_dev *dev);
120 static int ice_allmulti_disable(struct rte_eth_dev *dev);
121 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
122                                uint16_t vlan_id,
123                                int on);
124 static int ice_macaddr_set(struct rte_eth_dev *dev,
125                            struct rte_ether_addr *mac_addr);
126 static int ice_macaddr_add(struct rte_eth_dev *dev,
127                            struct rte_ether_addr *mac_addr,
128                            __rte_unused uint32_t index,
129                            uint32_t pool);
130 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
131 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
132                                     uint16_t queue_id);
133 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
134                                      uint16_t queue_id);
135 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
136                               size_t fw_size);
137 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
138                              uint16_t pvid, int on);
139 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
140 static int ice_get_eeprom(struct rte_eth_dev *dev,
141                           struct rte_dev_eeprom_info *eeprom);
142 static int ice_get_module_info(struct rte_eth_dev *dev,
143                                struct rte_eth_dev_module_info *modinfo);
144 static int ice_get_module_eeprom(struct rte_eth_dev *dev,
145                                  struct rte_dev_eeprom_info *info);
146 static int ice_stats_get(struct rte_eth_dev *dev,
147                          struct rte_eth_stats *stats);
148 static int ice_stats_reset(struct rte_eth_dev *dev);
149 static int ice_xstats_get(struct rte_eth_dev *dev,
150                           struct rte_eth_xstat *xstats, unsigned int n);
151 static int ice_xstats_get_names(struct rte_eth_dev *dev,
152                                 struct rte_eth_xstat_name *xstats_names,
153                                 unsigned int limit);
154 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
155                                 const struct rte_flow_ops **ops);
156 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
157                         struct rte_eth_udp_tunnel *udp_tunnel);
158 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
159                         struct rte_eth_udp_tunnel *udp_tunnel);
160 static int ice_timesync_enable(struct rte_eth_dev *dev);
161 static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
162                                           struct timespec *timestamp,
163                                           uint32_t flags);
164 static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
165                                           struct timespec *timestamp);
166 static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
167 static int ice_timesync_read_time(struct rte_eth_dev *dev,
168                                   struct timespec *timestamp);
169 static int ice_timesync_write_time(struct rte_eth_dev *dev,
170                                    const struct timespec *timestamp);
171 static int ice_timesync_disable(struct rte_eth_dev *dev);
172
173 static const struct rte_pci_id pci_id_ice_map[] = {
174         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
175         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
176         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
177         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
178         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
179         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
180         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
181         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
182         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
183         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
184         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
185         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) },
186         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) },
187         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
188         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
189         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
190         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
191         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
192         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
193         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
194         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
195         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
196         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
197         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
198         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
199         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E824S) },
200         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_BACKPLANE) },
201         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_QSFP) },
202         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_SFP) },
203         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_1GBE) },
204         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825X) },
205         { .vendor_id = 0, /* sentinel */ },
206 };
207
208 static int
209 ice_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
210                 void *arg)
211 {
212         if (!arg)
213                 return -EINVAL;
214
215         *(const void **)arg = &ice_tm_ops;
216
217         return 0;
218 }
219
220 static const struct eth_dev_ops ice_eth_dev_ops = {
221         .dev_configure                = ice_dev_configure,
222         .dev_start                    = ice_dev_start,
223         .dev_stop                     = ice_dev_stop,
224         .dev_close                    = ice_dev_close,
225         .dev_reset                    = ice_dev_reset,
226         .dev_set_link_up              = ice_dev_set_link_up,
227         .dev_set_link_down            = ice_dev_set_link_down,
228         .rx_queue_start               = ice_rx_queue_start,
229         .rx_queue_stop                = ice_rx_queue_stop,
230         .tx_queue_start               = ice_tx_queue_start,
231         .tx_queue_stop                = ice_tx_queue_stop,
232         .rx_queue_setup               = ice_rx_queue_setup,
233         .rx_queue_release             = ice_dev_rx_queue_release,
234         .tx_queue_setup               = ice_tx_queue_setup,
235         .tx_queue_release             = ice_dev_tx_queue_release,
236         .dev_infos_get                = ice_dev_info_get,
237         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
238         .link_update                  = ice_link_update,
239         .mtu_set                      = ice_mtu_set,
240         .mac_addr_set                 = ice_macaddr_set,
241         .mac_addr_add                 = ice_macaddr_add,
242         .mac_addr_remove              = ice_macaddr_remove,
243         .vlan_filter_set              = ice_vlan_filter_set,
244         .vlan_offload_set             = ice_vlan_offload_set,
245         .reta_update                  = ice_rss_reta_update,
246         .reta_query                   = ice_rss_reta_query,
247         .rss_hash_update              = ice_rss_hash_update,
248         .rss_hash_conf_get            = ice_rss_hash_conf_get,
249         .promiscuous_enable           = ice_promisc_enable,
250         .promiscuous_disable          = ice_promisc_disable,
251         .allmulticast_enable          = ice_allmulti_enable,
252         .allmulticast_disable         = ice_allmulti_disable,
253         .rx_queue_intr_enable         = ice_rx_queue_intr_enable,
254         .rx_queue_intr_disable        = ice_rx_queue_intr_disable,
255         .fw_version_get               = ice_fw_version_get,
256         .vlan_pvid_set                = ice_vlan_pvid_set,
257         .rxq_info_get                 = ice_rxq_info_get,
258         .txq_info_get                 = ice_txq_info_get,
259         .rx_burst_mode_get            = ice_rx_burst_mode_get,
260         .tx_burst_mode_get            = ice_tx_burst_mode_get,
261         .get_eeprom_length            = ice_get_eeprom_length,
262         .get_eeprom                   = ice_get_eeprom,
263         .get_module_info              = ice_get_module_info,
264         .get_module_eeprom            = ice_get_module_eeprom,
265         .stats_get                    = ice_stats_get,
266         .stats_reset                  = ice_stats_reset,
267         .xstats_get                   = ice_xstats_get,
268         .xstats_get_names             = ice_xstats_get_names,
269         .xstats_reset                 = ice_stats_reset,
270         .flow_ops_get                 = ice_dev_flow_ops_get,
271         .udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
272         .udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
273         .tx_done_cleanup              = ice_tx_done_cleanup,
274         .get_monitor_addr             = ice_get_monitor_addr,
275         .timesync_enable              = ice_timesync_enable,
276         .timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
277         .timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
278         .timesync_adjust_time         = ice_timesync_adjust_time,
279         .timesync_read_time           = ice_timesync_read_time,
280         .timesync_write_time          = ice_timesync_write_time,
281         .timesync_disable             = ice_timesync_disable,
282         .tm_ops_get                   = ice_tm_ops_get,
283 };
284
285 /* store statistics names and its offset in stats structure */
286 struct ice_xstats_name_off {
287         char name[RTE_ETH_XSTATS_NAME_SIZE];
288         unsigned int offset;
289 };
290
291 static const struct ice_xstats_name_off ice_stats_strings[] = {
292         {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
293         {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
294         {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
295         {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
296         {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
297                 rx_unknown_protocol)},
298         {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
299         {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
300         {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
301         {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
302 };
303
304 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
305                 sizeof(ice_stats_strings[0]))
306
307 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
308         {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
309                 tx_dropped_link_down)},
310         {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
311         {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
312                 illegal_bytes)},
313         {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
314         {"mac_local_errors", offsetof(struct ice_hw_port_stats,
315                 mac_local_faults)},
316         {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
317                 mac_remote_faults)},
318         {"rx_len_errors", offsetof(struct ice_hw_port_stats,
319                 rx_len_errors)},
320         {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
321         {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
322         {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
323         {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
324         {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
325         {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
326                 rx_size_127)},
327         {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
328                 rx_size_255)},
329         {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
330                 rx_size_511)},
331         {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
332                 rx_size_1023)},
333         {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
334                 rx_size_1522)},
335         {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
336                 rx_size_big)},
337         {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
338                 rx_undersize)},
339         {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
340                 rx_oversize)},
341         {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
342                 mac_short_pkt_dropped)},
343         {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
344                 rx_fragments)},
345         {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
346         {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
347         {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
348                 tx_size_127)},
349         {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
350                 tx_size_255)},
351         {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
352                 tx_size_511)},
353         {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
354                 tx_size_1023)},
355         {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
356                 tx_size_1522)},
357         {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
358                 tx_size_big)},
359 };
360
361 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
362                 sizeof(ice_hw_port_strings[0]))
363
364 static void
365 ice_init_controlq_parameter(struct ice_hw *hw)
366 {
367         /* fields for adminq */
368         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
369         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
370         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
371         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
372
373         /* fields for mailboxq, DPDK used as PF host */
374         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
375         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
376         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
377         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
378
379         /* fields for sideband queue */
380         hw->sbq.num_rq_entries = ICE_SBQ_LEN;
381         hw->sbq.num_sq_entries = ICE_SBQ_LEN;
382         hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
383         hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
384
385 }
386
387 static int
388 lookup_proto_xtr_type(const char *xtr_name)
389 {
390         static struct {
391                 const char *name;
392                 enum proto_xtr_type type;
393         } xtr_type_map[] = {
394                 { "vlan",      PROTO_XTR_VLAN      },
395                 { "ipv4",      PROTO_XTR_IPV4      },
396                 { "ipv6",      PROTO_XTR_IPV6      },
397                 { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
398                 { "tcp",       PROTO_XTR_TCP       },
399                 { "ip_offset", PROTO_XTR_IP_OFFSET },
400         };
401         uint32_t i;
402
403         for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
404                 if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
405                         return xtr_type_map[i].type;
406         }
407
408         return -1;
409 }
410
411 /*
412  * Parse elem, the elem could be single number/range or '(' ')' group
413  * 1) A single number elem, it's just a simple digit. e.g. 9
414  * 2) A single range elem, two digits with a '-' between. e.g. 2-6
415  * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
416  *    Within group elem, '-' used for a range separator;
417  *                       ',' used for a single number.
418  */
419 static int
420 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
421 {
422         const char *str = input;
423         char *end = NULL;
424         uint32_t min, max;
425         uint32_t idx;
426
427         while (isblank(*str))
428                 str++;
429
430         if (!isdigit(*str) && *str != '(')
431                 return -1;
432
433         /* process single number or single range of number */
434         if (*str != '(') {
435                 errno = 0;
436                 idx = strtoul(str, &end, 10);
437                 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
438                         return -1;
439
440                 while (isblank(*end))
441                         end++;
442
443                 min = idx;
444                 max = idx;
445
446                 /* process single <number>-<number> */
447                 if (*end == '-') {
448                         end++;
449                         while (isblank(*end))
450                                 end++;
451                         if (!isdigit(*end))
452                                 return -1;
453
454                         errno = 0;
455                         idx = strtoul(end, &end, 10);
456                         if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
457                                 return -1;
458
459                         max = idx;
460                         while (isblank(*end))
461                                 end++;
462                 }
463
464                 if (*end != ':')
465                         return -1;
466
467                 for (idx = RTE_MIN(min, max);
468                      idx <= RTE_MAX(min, max); idx++)
469                         devargs->proto_xtr[idx] = xtr_type;
470
471                 return 0;
472         }
473
474         /* process set within bracket */
475         str++;
476         while (isblank(*str))
477                 str++;
478         if (*str == '\0')
479                 return -1;
480
481         min = ICE_MAX_QUEUE_NUM;
482         do {
483                 /* go ahead to the first digit */
484                 while (isblank(*str))
485                         str++;
486                 if (!isdigit(*str))
487                         return -1;
488
489                 /* get the digit value */
490                 errno = 0;
491                 idx = strtoul(str, &end, 10);
492                 if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
493                         return -1;
494
495                 /* go ahead to separator '-',',' and ')' */
496                 while (isblank(*end))
497                         end++;
498                 if (*end == '-') {
499                         if (min == ICE_MAX_QUEUE_NUM)
500                                 min = idx;
501                         else /* avoid continuous '-' */
502                                 return -1;
503                 } else if (*end == ',' || *end == ')') {
504                         max = idx;
505                         if (min == ICE_MAX_QUEUE_NUM)
506                                 min = idx;
507
508                         for (idx = RTE_MIN(min, max);
509                              idx <= RTE_MAX(min, max); idx++)
510                                 devargs->proto_xtr[idx] = xtr_type;
511
512                         min = ICE_MAX_QUEUE_NUM;
513                 } else {
514                         return -1;
515                 }
516
517                 str = end + 1;
518         } while (*end != ')' && *end != '\0');
519
520         return 0;
521 }
522
523 static int
524 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
525 {
526         const char *queue_start;
527         uint32_t idx;
528         int xtr_type;
529         char xtr_name[32];
530
531         while (isblank(*queues))
532                 queues++;
533
534         if (*queues != '[') {
535                 xtr_type = lookup_proto_xtr_type(queues);
536                 if (xtr_type < 0)
537                         return -1;
538
539                 devargs->proto_xtr_dflt = xtr_type;
540
541                 return 0;
542         }
543
544         queues++;
545         do {
546                 while (isblank(*queues))
547                         queues++;
548                 if (*queues == '\0')
549                         return -1;
550
551                 queue_start = queues;
552
553                 /* go across a complete bracket */
554                 if (*queue_start == '(') {
555                         queues += strcspn(queues, ")");
556                         if (*queues != ')')
557                                 return -1;
558                 }
559
560                 /* scan the separator ':' */
561                 queues += strcspn(queues, ":");
562                 if (*queues++ != ':')
563                         return -1;
564                 while (isblank(*queues))
565                         queues++;
566
567                 for (idx = 0; ; idx++) {
568                         if (isblank(queues[idx]) ||
569                             queues[idx] == ',' ||
570                             queues[idx] == ']' ||
571                             queues[idx] == '\0')
572                                 break;
573
574                         if (idx > sizeof(xtr_name) - 2)
575                                 return -1;
576
577                         xtr_name[idx] = queues[idx];
578                 }
579                 xtr_name[idx] = '\0';
580                 xtr_type = lookup_proto_xtr_type(xtr_name);
581                 if (xtr_type < 0)
582                         return -1;
583
584                 queues += idx;
585
586                 while (isblank(*queues) || *queues == ',' || *queues == ']')
587                         queues++;
588
589                 if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
590                         return -1;
591         } while (*queues != '\0');
592
593         return 0;
594 }
595
596 static int
597 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
598                      void *extra_args)
599 {
600         struct ice_devargs *devargs = extra_args;
601
602         if (value == NULL || extra_args == NULL)
603                 return -EINVAL;
604
605         if (parse_queue_proto_xtr(value, devargs) < 0) {
606                 PMD_DRV_LOG(ERR,
607                             "The protocol extraction parameter is wrong : '%s'",
608                             value);
609                 return -1;
610         }
611
612         return 0;
613 }
614
615 static void
616 ice_check_proto_xtr_support(struct ice_hw *hw)
617 {
618 #define FLX_REG(val, fld, idx) \
619         (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
620          GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
621         static struct {
622                 uint32_t rxdid;
623                 uint8_t opcode;
624                 uint8_t protid_0;
625                 uint8_t protid_1;
626         } xtr_sets[] = {
627                 [PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
628                                      ICE_RX_OPC_EXTRACT,
629                                      ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
630                 [PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
631                                      ICE_RX_OPC_EXTRACT,
632                                      ICE_PROT_IPV4_OF_OR_S,
633                                      ICE_PROT_IPV4_OF_OR_S },
634                 [PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
635                                      ICE_RX_OPC_EXTRACT,
636                                      ICE_PROT_IPV6_OF_OR_S,
637                                      ICE_PROT_IPV6_OF_OR_S },
638                 [PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
639                                           ICE_RX_OPC_EXTRACT,
640                                           ICE_PROT_IPV6_OF_OR_S,
641                                           ICE_PROT_IPV6_OF_OR_S },
642                 [PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
643                                     ICE_RX_OPC_EXTRACT,
644                                     ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
645                 [PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
646                                           ICE_RX_OPC_PROTID,
647                                           ICE_PROT_IPV4_OF_OR_S,
648                                           ICE_PROT_IPV6_OF_OR_S },
649         };
650         uint32_t i;
651
652         for (i = 0; i < RTE_DIM(xtr_sets); i++) {
653                 uint32_t rxdid = xtr_sets[i].rxdid;
654                 uint32_t v;
655
656                 if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
657                         v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
658
659                         if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
660                             FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
661                                 ice_proto_xtr_hw_support[i] = true;
662                 }
663
664                 if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
665                         v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
666
667                         if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
668                             FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
669                                 ice_proto_xtr_hw_support[i] = true;
670                 }
671         }
672 }
673
674 static int
675 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
676                   uint32_t num)
677 {
678         struct pool_entry *entry;
679
680         if (!pool || !num)
681                 return -EINVAL;
682
683         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
684         if (!entry) {
685                 PMD_INIT_LOG(ERR,
686                              "Failed to allocate memory for resource pool");
687                 return -ENOMEM;
688         }
689
690         /* queue heap initialize */
691         pool->num_free = num;
692         pool->num_alloc = 0;
693         pool->base = base;
694         LIST_INIT(&pool->alloc_list);
695         LIST_INIT(&pool->free_list);
696
697         /* Initialize element  */
698         entry->base = 0;
699         entry->len = num;
700
701         LIST_INSERT_HEAD(&pool->free_list, entry, next);
702         return 0;
703 }
704
705 static int
706 ice_res_pool_alloc(struct ice_res_pool_info *pool,
707                    uint16_t num)
708 {
709         struct pool_entry *entry, *valid_entry;
710
711         if (!pool || !num) {
712                 PMD_INIT_LOG(ERR, "Invalid parameter");
713                 return -EINVAL;
714         }
715
716         if (pool->num_free < num) {
717                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
718                              num, pool->num_free);
719                 return -ENOMEM;
720         }
721
722         valid_entry = NULL;
723         /* Lookup  in free list and find most fit one */
724         LIST_FOREACH(entry, &pool->free_list, next) {
725                 if (entry->len >= num) {
726                         /* Find best one */
727                         if (entry->len == num) {
728                                 valid_entry = entry;
729                                 break;
730                         }
731                         if (!valid_entry ||
732                             valid_entry->len > entry->len)
733                                 valid_entry = entry;
734                 }
735         }
736
737         /* Not find one to satisfy the request, return */
738         if (!valid_entry) {
739                 PMD_INIT_LOG(ERR, "No valid entry found");
740                 return -ENOMEM;
741         }
742         /**
743          * The entry have equal queue number as requested,
744          * remove it from alloc_list.
745          */
746         if (valid_entry->len == num) {
747                 LIST_REMOVE(valid_entry, next);
748         } else {
749                 /**
750                  * The entry have more numbers than requested,
751                  * create a new entry for alloc_list and minus its
752                  * queue base and number in free_list.
753                  */
754                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
755                 if (!entry) {
756                         PMD_INIT_LOG(ERR,
757                                      "Failed to allocate memory for "
758                                      "resource pool");
759                         return -ENOMEM;
760                 }
761                 entry->base = valid_entry->base;
762                 entry->len = num;
763                 valid_entry->base += num;
764                 valid_entry->len -= num;
765                 valid_entry = entry;
766         }
767
768         /* Insert it into alloc list, not sorted */
769         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
770
771         pool->num_free -= valid_entry->len;
772         pool->num_alloc += valid_entry->len;
773
774         return valid_entry->base + pool->base;
775 }
776
777 static void
778 ice_res_pool_destroy(struct ice_res_pool_info *pool)
779 {
780         struct pool_entry *entry, *next_entry;
781
782         if (!pool)
783                 return;
784
785         for (entry = LIST_FIRST(&pool->alloc_list);
786              entry && (next_entry = LIST_NEXT(entry, next), 1);
787              entry = next_entry) {
788                 LIST_REMOVE(entry, next);
789                 rte_free(entry);
790         }
791
792         for (entry = LIST_FIRST(&pool->free_list);
793              entry && (next_entry = LIST_NEXT(entry, next), 1);
794              entry = next_entry) {
795                 LIST_REMOVE(entry, next);
796                 rte_free(entry);
797         }
798
799         pool->num_free = 0;
800         pool->num_alloc = 0;
801         pool->base = 0;
802         LIST_INIT(&pool->alloc_list);
803         LIST_INIT(&pool->free_list);
804 }
805
806 static void
807 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
808 {
809         /* Set VSI LUT selection */
810         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
811                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
812         /* Set Hash scheme */
813         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
814                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
815         /* enable TC */
816         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
817 }
818
819 static enum ice_status
820 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
821                                 struct ice_aqc_vsi_props *info,
822                                 uint8_t enabled_tcmap)
823 {
824         uint16_t fls, qp_idx;
825
826         /* default tc 0 now. Multi-TC supporting need to be done later.
827          * Configure TC and queue mapping parameters, for enabled TC,
828          * allocate qpnum_per_tc queues to this traffic.
829          */
830         if (enabled_tcmap != 0x01) {
831                 PMD_INIT_LOG(ERR, "only TC0 is supported");
832                 return -ENOTSUP;
833         }
834
835         /* vector 0 is reserved and 1 vector for ctrl vsi */
836         if (vsi->adapter->hw.func_caps.common_cap.num_msix_vectors < 2)
837                 vsi->nb_qps = 0;
838         else
839                 vsi->nb_qps = RTE_MIN
840                         ((uint16_t)vsi->adapter->hw.func_caps.common_cap.num_msix_vectors - 2,
841                         RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC));
842
843         /* nb_qps(hex)  -> fls */
844         /* 0000         -> 0 */
845         /* 0001         -> 0 */
846         /* 0002         -> 1 */
847         /* 0003 ~ 0004  -> 2 */
848         /* 0005 ~ 0008  -> 3 */
849         /* 0009 ~ 0010  -> 4 */
850         /* 0011 ~ 0020  -> 5 */
851         /* 0021 ~ 0040  -> 6 */
852         /* 0041 ~ 0080  -> 7 */
853         /* 0081 ~ 0100  -> 8 */
854         fls = (vsi->nb_qps == 0) ? 0 : rte_fls_u32(vsi->nb_qps - 1);
855
856         qp_idx = 0;
857         /* Set tc and queue mapping with VSI */
858         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
859                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
860                                                (fls << ICE_AQ_VSI_TC_Q_NUM_S));
861
862         /* Associate queue number with VSI */
863         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
864         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
865         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
866         info->valid_sections |=
867                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
868         /* Set the info.ingress_table and info.egress_table
869          * for UP translate table. Now just set it to 1:1 map by default
870          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
871          */
872 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
873         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
874         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
875         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
876         return 0;
877 }
878
879 static int
880 ice_init_mac_address(struct rte_eth_dev *dev)
881 {
882         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
883
884         if (!rte_is_unicast_ether_addr
885                 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
886                 PMD_INIT_LOG(ERR, "Invalid MAC address");
887                 return -EINVAL;
888         }
889
890         rte_ether_addr_copy(
891                 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
892                 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
893
894         dev->data->mac_addrs =
895                 rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0);
896         if (!dev->data->mac_addrs) {
897                 PMD_INIT_LOG(ERR,
898                              "Failed to allocate memory to store mac address");
899                 return -ENOMEM;
900         }
901         /* store it to dev data */
902         rte_ether_addr_copy(
903                 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
904                 &dev->data->mac_addrs[0]);
905         return 0;
906 }
907
908 /* Find out specific MAC filter */
909 static struct ice_mac_filter *
910 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
911 {
912         struct ice_mac_filter *f;
913
914         TAILQ_FOREACH(f, &vsi->mac_list, next) {
915                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
916                         return f;
917         }
918
919         return NULL;
920 }
921
922 static int
923 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
924 {
925         struct ice_fltr_list_entry *m_list_itr = NULL;
926         struct ice_mac_filter *f;
927         struct LIST_HEAD_TYPE list_head;
928         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
929         int ret = 0;
930
931         /* If it's added and configured, return */
932         f = ice_find_mac_filter(vsi, mac_addr);
933         if (f) {
934                 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
935                 return 0;
936         }
937
938         INIT_LIST_HEAD(&list_head);
939
940         m_list_itr = (struct ice_fltr_list_entry *)
941                 ice_malloc(hw, sizeof(*m_list_itr));
942         if (!m_list_itr) {
943                 ret = -ENOMEM;
944                 goto DONE;
945         }
946         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
947                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
948         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
949         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
950         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
951         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
952         m_list_itr->fltr_info.vsi_handle = vsi->idx;
953
954         LIST_ADD(&m_list_itr->list_entry, &list_head);
955
956         /* Add the mac */
957         ret = ice_add_mac(hw, &list_head);
958         if (ret != ICE_SUCCESS) {
959                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
960                 ret = -EINVAL;
961                 goto DONE;
962         }
963         /* Add the mac addr into mac list */
964         f = rte_zmalloc(NULL, sizeof(*f), 0);
965         if (!f) {
966                 PMD_DRV_LOG(ERR, "failed to allocate memory");
967                 ret = -ENOMEM;
968                 goto DONE;
969         }
970         rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
971         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
972         vsi->mac_num++;
973
974         ret = 0;
975
976 DONE:
977         rte_free(m_list_itr);
978         return ret;
979 }
980
981 static int
982 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
983 {
984         struct ice_fltr_list_entry *m_list_itr = NULL;
985         struct ice_mac_filter *f;
986         struct LIST_HEAD_TYPE list_head;
987         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
988         int ret = 0;
989
990         /* Can't find it, return an error */
991         f = ice_find_mac_filter(vsi, mac_addr);
992         if (!f)
993                 return -EINVAL;
994
995         INIT_LIST_HEAD(&list_head);
996
997         m_list_itr = (struct ice_fltr_list_entry *)
998                 ice_malloc(hw, sizeof(*m_list_itr));
999         if (!m_list_itr) {
1000                 ret = -ENOMEM;
1001                 goto DONE;
1002         }
1003         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
1004                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
1005         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1006         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1007         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
1008         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
1009         m_list_itr->fltr_info.vsi_handle = vsi->idx;
1010
1011         LIST_ADD(&m_list_itr->list_entry, &list_head);
1012
1013         /* remove the mac filter */
1014         ret = ice_remove_mac(hw, &list_head);
1015         if (ret != ICE_SUCCESS) {
1016                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
1017                 ret = -EINVAL;
1018                 goto DONE;
1019         }
1020
1021         /* Remove the mac addr from mac list */
1022         TAILQ_REMOVE(&vsi->mac_list, f, next);
1023         rte_free(f);
1024         vsi->mac_num--;
1025
1026         ret = 0;
1027 DONE:
1028         rte_free(m_list_itr);
1029         return ret;
1030 }
1031
1032 /* Find out specific VLAN filter */
1033 static struct ice_vlan_filter *
1034 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1035 {
1036         struct ice_vlan_filter *f;
1037
1038         TAILQ_FOREACH(f, &vsi->vlan_list, next) {
1039                 if (vlan->tpid == f->vlan_info.vlan.tpid &&
1040                     vlan->vid == f->vlan_info.vlan.vid)
1041                         return f;
1042         }
1043
1044         return NULL;
1045 }
1046
1047 static int
1048 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1049 {
1050         struct ice_fltr_list_entry *v_list_itr = NULL;
1051         struct ice_vlan_filter *f;
1052         struct LIST_HEAD_TYPE list_head;
1053         struct ice_hw *hw;
1054         int ret = 0;
1055
1056         if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1057                 return -EINVAL;
1058
1059         hw = ICE_VSI_TO_HW(vsi);
1060
1061         /* If it's added and configured, return. */
1062         f = ice_find_vlan_filter(vsi, vlan);
1063         if (f) {
1064                 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
1065                 return 0;
1066         }
1067
1068         if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
1069                 return 0;
1070
1071         INIT_LIST_HEAD(&list_head);
1072
1073         v_list_itr = (struct ice_fltr_list_entry *)
1074                       ice_malloc(hw, sizeof(*v_list_itr));
1075         if (!v_list_itr) {
1076                 ret = -ENOMEM;
1077                 goto DONE;
1078         }
1079         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1080         v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1081         v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1082         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1083         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1084         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1085         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1086         v_list_itr->fltr_info.vsi_handle = vsi->idx;
1087
1088         LIST_ADD(&v_list_itr->list_entry, &list_head);
1089
1090         /* Add the vlan */
1091         ret = ice_add_vlan(hw, &list_head);
1092         if (ret != ICE_SUCCESS) {
1093                 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1094                 ret = -EINVAL;
1095                 goto DONE;
1096         }
1097
1098         /* Add vlan into vlan list */
1099         f = rte_zmalloc(NULL, sizeof(*f), 0);
1100         if (!f) {
1101                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1102                 ret = -ENOMEM;
1103                 goto DONE;
1104         }
1105         f->vlan_info.vlan.tpid = vlan->tpid;
1106         f->vlan_info.vlan.vid = vlan->vid;
1107         TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1108         vsi->vlan_num++;
1109
1110         ret = 0;
1111
1112 DONE:
1113         rte_free(v_list_itr);
1114         return ret;
1115 }
1116
1117 static int
1118 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1119 {
1120         struct ice_fltr_list_entry *v_list_itr = NULL;
1121         struct ice_vlan_filter *f;
1122         struct LIST_HEAD_TYPE list_head;
1123         struct ice_hw *hw;
1124         int ret = 0;
1125
1126         if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1127                 return -EINVAL;
1128
1129         hw = ICE_VSI_TO_HW(vsi);
1130
1131         /* Can't find it, return an error */
1132         f = ice_find_vlan_filter(vsi, vlan);
1133         if (!f)
1134                 return -EINVAL;
1135
1136         INIT_LIST_HEAD(&list_head);
1137
1138         v_list_itr = (struct ice_fltr_list_entry *)
1139                       ice_malloc(hw, sizeof(*v_list_itr));
1140         if (!v_list_itr) {
1141                 ret = -ENOMEM;
1142                 goto DONE;
1143         }
1144
1145         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1146         v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1147         v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1148         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1149         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1150         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1151         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1152         v_list_itr->fltr_info.vsi_handle = vsi->idx;
1153
1154         LIST_ADD(&v_list_itr->list_entry, &list_head);
1155
1156         /* remove the vlan filter */
1157         ret = ice_remove_vlan(hw, &list_head);
1158         if (ret != ICE_SUCCESS) {
1159                 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1160                 ret = -EINVAL;
1161                 goto DONE;
1162         }
1163
1164         /* Remove the vlan id from vlan list */
1165         TAILQ_REMOVE(&vsi->vlan_list, f, next);
1166         rte_free(f);
1167         vsi->vlan_num--;
1168
1169         ret = 0;
1170 DONE:
1171         rte_free(v_list_itr);
1172         return ret;
1173 }
1174
1175 static int
1176 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1177 {
1178         struct ice_mac_filter *m_f;
1179         struct ice_vlan_filter *v_f;
1180         void *temp;
1181         int ret = 0;
1182
1183         if (!vsi || !vsi->mac_num)
1184                 return -EINVAL;
1185
1186         RTE_TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) {
1187                 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1188                 if (ret != ICE_SUCCESS) {
1189                         ret = -EINVAL;
1190                         goto DONE;
1191                 }
1192         }
1193
1194         if (vsi->vlan_num == 0)
1195                 return 0;
1196
1197         RTE_TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) {
1198                 ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan);
1199                 if (ret != ICE_SUCCESS) {
1200                         ret = -EINVAL;
1201                         goto DONE;
1202                 }
1203         }
1204
1205 DONE:
1206         return ret;
1207 }
1208
1209 /* Enable IRQ0 */
1210 static void
1211 ice_pf_enable_irq0(struct ice_hw *hw)
1212 {
1213         /* reset the registers */
1214         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1215         ICE_READ_REG(hw, PFINT_OICR);
1216
1217 #ifdef ICE_LSE_SPT
1218         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1219                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1220                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1221
1222         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1223                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1224                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1225                        PFINT_OICR_CTL_ITR_INDX_M) |
1226                       PFINT_OICR_CTL_CAUSE_ENA_M);
1227
1228         ICE_WRITE_REG(hw, PFINT_FW_CTL,
1229                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1230                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1231                        PFINT_FW_CTL_ITR_INDX_M) |
1232                       PFINT_FW_CTL_CAUSE_ENA_M);
1233 #else
1234         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1235 #endif
1236
1237         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1238                       GLINT_DYN_CTL_INTENA_M |
1239                       GLINT_DYN_CTL_CLEARPBA_M |
1240                       GLINT_DYN_CTL_ITR_INDX_M);
1241
1242         ice_flush(hw);
1243 }
1244
1245 /* Disable IRQ0 */
1246 static void
1247 ice_pf_disable_irq0(struct ice_hw *hw)
1248 {
1249         /* Disable all interrupt types */
1250         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1251         ice_flush(hw);
1252 }
1253
1254 #ifdef ICE_LSE_SPT
1255 static void
1256 ice_handle_aq_msg(struct rte_eth_dev *dev)
1257 {
1258         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1259         struct ice_ctl_q_info *cq = &hw->adminq;
1260         struct ice_rq_event_info event;
1261         uint16_t pending, opcode;
1262         int ret;
1263
1264         event.buf_len = ICE_AQ_MAX_BUF_LEN;
1265         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1266         if (!event.msg_buf) {
1267                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
1268                 return;
1269         }
1270
1271         pending = 1;
1272         while (pending) {
1273                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1274
1275                 if (ret != ICE_SUCCESS) {
1276                         PMD_DRV_LOG(INFO,
1277                                     "Failed to read msg from AdminQ, "
1278                                     "adminq_err: %u",
1279                                     hw->adminq.sq_last_status);
1280                         break;
1281                 }
1282                 opcode = rte_le_to_cpu_16(event.desc.opcode);
1283
1284                 switch (opcode) {
1285                 case ice_aqc_opc_get_link_status:
1286                         ret = ice_link_update(dev, 0);
1287                         if (!ret)
1288                                 rte_eth_dev_callback_process
1289                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1290                         break;
1291                 default:
1292                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1293                                     opcode);
1294                         break;
1295                 }
1296         }
1297         rte_free(event.msg_buf);
1298 }
1299 #endif
1300
1301 /**
1302  * Interrupt handler triggered by NIC for handling
1303  * specific interrupt.
1304  *
1305  * @param handle
1306  *  Pointer to interrupt handle.
1307  * @param param
1308  *  The address of parameter (struct rte_eth_dev *) registered before.
1309  *
1310  * @return
1311  *  void
1312  */
1313 static void
1314 ice_interrupt_handler(void *param)
1315 {
1316         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1317         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1318         uint32_t oicr;
1319         uint32_t reg;
1320         uint8_t pf_num;
1321         uint8_t event;
1322         uint16_t queue;
1323         int ret;
1324 #ifdef ICE_LSE_SPT
1325         uint32_t int_fw_ctl;
1326 #endif
1327
1328         /* Disable interrupt */
1329         ice_pf_disable_irq0(hw);
1330
1331         /* read out interrupt causes */
1332         oicr = ICE_READ_REG(hw, PFINT_OICR);
1333 #ifdef ICE_LSE_SPT
1334         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1335 #endif
1336
1337         /* No interrupt event indicated */
1338         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1339                 PMD_DRV_LOG(INFO, "No interrupt event");
1340                 goto done;
1341         }
1342
1343 #ifdef ICE_LSE_SPT
1344         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1345                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1346                 ice_handle_aq_msg(dev);
1347         }
1348 #else
1349         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1350                 PMD_DRV_LOG(INFO, "OICR: link state change event");
1351                 ret = ice_link_update(dev, 0);
1352                 if (!ret)
1353                         rte_eth_dev_callback_process
1354                                 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1355         }
1356 #endif
1357
1358         if (oicr & PFINT_OICR_MAL_DETECT_M) {
1359                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1360                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1361                 if (reg & GL_MDET_TX_PQM_VALID_M) {
1362                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1363                                  GL_MDET_TX_PQM_PF_NUM_S;
1364                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1365                                 GL_MDET_TX_PQM_MAL_TYPE_S;
1366                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1367                                 GL_MDET_TX_PQM_QNUM_S;
1368
1369                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1370                                     "%d by PQM on TX queue %d PF# %d",
1371                                     event, queue, pf_num);
1372                 }
1373
1374                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1375                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1376                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1377                                  GL_MDET_TX_TCLAN_PF_NUM_S;
1378                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1379                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1380                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1381                                 GL_MDET_TX_TCLAN_QNUM_S;
1382
1383                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1384                                     "%d by TCLAN on TX queue %d PF# %d",
1385                                     event, queue, pf_num);
1386                 }
1387         }
1388 done:
1389         /* Enable interrupt */
1390         ice_pf_enable_irq0(hw);
1391         rte_intr_ack(dev->intr_handle);
1392 }
1393
1394 static void
1395 ice_init_proto_xtr(struct rte_eth_dev *dev)
1396 {
1397         struct ice_adapter *ad =
1398                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1399         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1400         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1401         const struct proto_xtr_ol_flag *ol_flag;
1402         bool proto_xtr_enable = false;
1403         int offset;
1404         uint16_t i;
1405
1406         pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1407         if (unlikely(pf->proto_xtr == NULL)) {
1408                 PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1409                 return;
1410         }
1411
1412         for (i = 0; i < pf->lan_nb_qps; i++) {
1413                 pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1414                                    ad->devargs.proto_xtr[i] :
1415                                    ad->devargs.proto_xtr_dflt;
1416
1417                 if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1418                         uint8_t type = pf->proto_xtr[i];
1419
1420                         ice_proto_xtr_ol_flag_params[type].required = true;
1421                         proto_xtr_enable = true;
1422                 }
1423         }
1424
1425         if (likely(!proto_xtr_enable))
1426                 return;
1427
1428         ice_check_proto_xtr_support(hw);
1429
1430         offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1431         if (unlikely(offset == -1)) {
1432                 PMD_DRV_LOG(ERR,
1433                             "Protocol extraction metadata is disabled in mbuf with error %d",
1434                             -rte_errno);
1435                 return;
1436         }
1437
1438         PMD_DRV_LOG(DEBUG,
1439                     "Protocol extraction metadata offset in mbuf is : %d",
1440                     offset);
1441         rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1442
1443         for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1444                 ol_flag = &ice_proto_xtr_ol_flag_params[i];
1445
1446                 if (!ol_flag->required)
1447                         continue;
1448
1449                 if (!ice_proto_xtr_hw_support[i]) {
1450                         PMD_DRV_LOG(ERR,
1451                                     "Protocol extraction type %u is not supported in hardware",
1452                                     i);
1453                         rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1454                         break;
1455                 }
1456
1457                 offset = rte_mbuf_dynflag_register(&ol_flag->param);
1458                 if (unlikely(offset == -1)) {
1459                         PMD_DRV_LOG(ERR,
1460                                     "Protocol extraction offload '%s' failed to register with error %d",
1461                                     ol_flag->param.name, -rte_errno);
1462
1463                         rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1464                         break;
1465                 }
1466
1467                 PMD_DRV_LOG(DEBUG,
1468                             "Protocol extraction offload '%s' offset in mbuf is : %d",
1469                             ol_flag->param.name, offset);
1470                 *ol_flag->ol_flag = 1ULL << offset;
1471         }
1472 }
1473
1474 /*  Initialize SW parameters of PF */
1475 static int
1476 ice_pf_sw_init(struct rte_eth_dev *dev)
1477 {
1478         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1479         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1480
1481         pf->lan_nb_qp_max =
1482                 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1483                                   hw->func_caps.common_cap.num_rxq);
1484
1485         pf->lan_nb_qps = pf->lan_nb_qp_max;
1486
1487         ice_init_proto_xtr(dev);
1488
1489         if (hw->func_caps.fd_fltr_guar > 0 ||
1490             hw->func_caps.fd_fltr_best_effort > 0) {
1491                 pf->flags |= ICE_FLAG_FDIR;
1492                 pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1493                 pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1494         } else {
1495                 pf->fdir_nb_qps = 0;
1496         }
1497         pf->fdir_qp_offset = 0;
1498
1499         return 0;
1500 }
1501
1502 struct ice_vsi *
1503 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1504 {
1505         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1506         struct ice_vsi *vsi = NULL;
1507         struct ice_vsi_ctx vsi_ctx;
1508         int ret;
1509         struct rte_ether_addr broadcast = {
1510                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1511         struct rte_ether_addr mac_addr;
1512         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1513         uint8_t tc_bitmap = 0x1;
1514         uint16_t cfg;
1515
1516         /* hw->num_lports = 1 in NIC mode */
1517         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1518         if (!vsi)
1519                 return NULL;
1520
1521         vsi->idx = pf->next_vsi_idx;
1522         pf->next_vsi_idx++;
1523         vsi->type = type;
1524         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1525         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1526         vsi->vlan_anti_spoof_on = 0;
1527         vsi->vlan_filter_on = 1;
1528         TAILQ_INIT(&vsi->mac_list);
1529         TAILQ_INIT(&vsi->vlan_list);
1530
1531         /* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
1532         pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1533                         RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
1534                         hw->func_caps.common_cap.rss_table_size;
1535         pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1536
1537         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1538         switch (type) {
1539         case ICE_VSI_PF:
1540                 vsi->nb_qps = pf->lan_nb_qps;
1541                 vsi->base_queue = 1;
1542                 ice_vsi_config_default_rss(&vsi_ctx.info);
1543                 vsi_ctx.alloc_from_pool = true;
1544                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1545                 /* switch_id is queried by get_switch_config aq, which is done
1546                  * by ice_init_hw
1547                  */
1548                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1549                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1550                 /* Allow all untagged or tagged packets */
1551                 vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
1552                 vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
1553                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1554                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1555                 if (ice_is_dvm_ena(hw)) {
1556                         vsi_ctx.info.outer_vlan_flags =
1557                                 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
1558                                  ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
1559                                 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
1560                         vsi_ctx.info.outer_vlan_flags |=
1561                                 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
1562                                  ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
1563                                 ICE_AQ_VSI_OUTER_TAG_TYPE_M;
1564                 }
1565
1566                 /* FDIR */
1567                 cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1568                         ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1569                 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1570                 cfg = ICE_AQ_VSI_FD_ENABLE;
1571                 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1572                 vsi_ctx.info.max_fd_fltr_dedicated =
1573                         rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1574                 vsi_ctx.info.max_fd_fltr_shared =
1575                         rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1576
1577                 /* Enable VLAN/UP trip */
1578                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1579                                                       &vsi_ctx.info,
1580                                                       ICE_DEFAULT_TCMAP);
1581                 if (ret) {
1582                         PMD_INIT_LOG(ERR,
1583                                      "tc queue mapping with vsi failed, "
1584                                      "err = %d",
1585                                      ret);
1586                         goto fail_mem;
1587                 }
1588
1589                 break;
1590         case ICE_VSI_CTRL:
1591                 vsi->nb_qps = pf->fdir_nb_qps;
1592                 vsi->base_queue = ICE_FDIR_QUEUE_ID;
1593                 vsi_ctx.alloc_from_pool = true;
1594                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1595
1596                 cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1597                 vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1598                 cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1599                 vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1600                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1601                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1602                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1603                                                       &vsi_ctx.info,
1604                                                       ICE_DEFAULT_TCMAP);
1605                 if (ret) {
1606                         PMD_INIT_LOG(ERR,
1607                                      "tc queue mapping with vsi failed, "
1608                                      "err = %d",
1609                                      ret);
1610                         goto fail_mem;
1611                 }
1612                 break;
1613         default:
1614                 /* for other types of VSI */
1615                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1616                 goto fail_mem;
1617         }
1618
1619         /* VF has MSIX interrupt in VF range, don't allocate here */
1620         if (type == ICE_VSI_PF) {
1621                 ret = ice_res_pool_alloc(&pf->msix_pool,
1622                                          RTE_MIN(vsi->nb_qps,
1623                                                  RTE_MAX_RXTX_INTR_VEC_ID));
1624                 if (ret < 0) {
1625                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1626                                      vsi->vsi_id, ret);
1627                 }
1628                 vsi->msix_intr = ret;
1629                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1630         } else if (type == ICE_VSI_CTRL) {
1631                 ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1632                 if (ret < 0) {
1633                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1634                                     vsi->vsi_id, ret);
1635                 }
1636                 vsi->msix_intr = ret;
1637                 vsi->nb_msix = 1;
1638         } else {
1639                 vsi->msix_intr = 0;
1640                 vsi->nb_msix = 0;
1641         }
1642         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1643         if (ret != ICE_SUCCESS) {
1644                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1645                 goto fail_mem;
1646         }
1647         /* store vsi information is SW structure */
1648         vsi->vsi_id = vsi_ctx.vsi_num;
1649         vsi->info = vsi_ctx.info;
1650         pf->vsis_allocated = vsi_ctx.vsis_allocd;
1651         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1652
1653         if (type == ICE_VSI_PF) {
1654                 /* MAC configuration */
1655                 rte_ether_addr_copy((struct rte_ether_addr *)
1656                                         hw->port_info->mac.perm_addr,
1657                                     &pf->dev_addr);
1658
1659                 rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1660                 ret = ice_add_mac_filter(vsi, &mac_addr);
1661                 if (ret != ICE_SUCCESS)
1662                         PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1663
1664                 rte_ether_addr_copy(&broadcast, &mac_addr);
1665                 ret = ice_add_mac_filter(vsi, &mac_addr);
1666                 if (ret != ICE_SUCCESS)
1667                         PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1668         }
1669
1670         /* At the beginning, only TC0. */
1671         /* What we need here is the maximum number of the TX queues.
1672          * Currently vsi->nb_qps means it.
1673          * Correct it if any change.
1674          */
1675         max_txqs[0] = vsi->nb_qps;
1676         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1677                               tc_bitmap, max_txqs);
1678         if (ret != ICE_SUCCESS)
1679                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1680
1681         return vsi;
1682 fail_mem:
1683         rte_free(vsi);
1684         pf->next_vsi_idx--;
1685         return NULL;
1686 }
1687
1688 static int
1689 ice_send_driver_ver(struct ice_hw *hw)
1690 {
1691         struct ice_driver_ver dv;
1692
1693         /* we don't have driver version use 0 for dummy */
1694         dv.major_ver = 0;
1695         dv.minor_ver = 0;
1696         dv.build_ver = 0;
1697         dv.subbuild_ver = 0;
1698         strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1699
1700         return ice_aq_send_driver_ver(hw, &dv, NULL);
1701 }
1702
1703 static int
1704 ice_pf_setup(struct ice_pf *pf)
1705 {
1706         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1707         struct ice_vsi *vsi;
1708         uint16_t unused;
1709
1710         /* Clear all stats counters */
1711         pf->offset_loaded = false;
1712         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1713         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1714         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1715         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1716
1717         /* force guaranteed filter pool for PF */
1718         ice_alloc_fd_guar_item(hw, &unused,
1719                                hw->func_caps.fd_fltr_guar);
1720         /* force shared filter pool for PF */
1721         ice_alloc_fd_shrd_item(hw, &unused,
1722                                hw->func_caps.fd_fltr_best_effort);
1723
1724         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1725         if (!vsi) {
1726                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1727                 return -EINVAL;
1728         }
1729
1730         pf->main_vsi = vsi;
1731
1732         return 0;
1733 }
1734
1735 static enum ice_pkg_type
1736 ice_load_pkg_type(struct ice_hw *hw)
1737 {
1738         enum ice_pkg_type package_type;
1739
1740         /* store the activated package type (OS default or Comms) */
1741         if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1742                 ICE_PKG_NAME_SIZE))
1743                 package_type = ICE_PKG_TYPE_OS_DEFAULT;
1744         else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1745                 ICE_PKG_NAME_SIZE))
1746                 package_type = ICE_PKG_TYPE_COMMS;
1747         else
1748                 package_type = ICE_PKG_TYPE_UNKNOWN;
1749
1750         PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)",
1751                 hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1752                 hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1753                 hw->active_pkg_name,
1754                 ice_is_dvm_ena(hw) ? "double" : "single");
1755
1756         return package_type;
1757 }
1758
1759 int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn)
1760 {
1761         struct ice_hw *hw = &adapter->hw;
1762         char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1763         char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1764         void *buf;
1765         size_t bufsz;
1766         int err;
1767
1768         if (!use_dsn)
1769                 goto no_dsn;
1770
1771         memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1772         snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1773                 "ice-%016" PRIx64 ".pkg", dsn);
1774         strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1775                 ICE_MAX_PKG_FILENAME_SIZE);
1776         strcat(pkg_file, opt_ddp_filename);
1777         if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1778                 goto load_fw;
1779
1780         strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1781                 ICE_MAX_PKG_FILENAME_SIZE);
1782         strcat(pkg_file, opt_ddp_filename);
1783         if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1784                 goto load_fw;
1785
1786 no_dsn:
1787         strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1788         if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1789                 goto load_fw;
1790
1791         strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1792         if (rte_firmware_read(pkg_file, &buf, &bufsz) < 0) {
1793                 PMD_INIT_LOG(ERR, "failed to search file path\n");
1794                 return -1;
1795         }
1796
1797 load_fw:
1798         PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_file);
1799
1800         err = ice_copy_and_init_pkg(hw, buf, bufsz);
1801         if (err) {
1802                 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1803                 goto out;
1804         }
1805
1806         /* store the loaded pkg type info */
1807         adapter->active_pkg_type = ice_load_pkg_type(hw);
1808
1809 out:
1810         free(buf);
1811         return err;
1812 }
1813
1814 static void
1815 ice_base_queue_get(struct ice_pf *pf)
1816 {
1817         uint32_t reg;
1818         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1819
1820         reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1821         if (reg & PFLAN_RX_QALLOC_VALID_M) {
1822                 pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1823         } else {
1824                 PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1825                                         " index");
1826         }
1827 }
1828
1829 static int
1830 parse_bool(const char *key, const char *value, void *args)
1831 {
1832         int *i = (int *)args;
1833         char *end;
1834         int num;
1835
1836         num = strtoul(value, &end, 10);
1837
1838         if (num != 0 && num != 1) {
1839                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1840                         "value must be 0 or 1",
1841                         value, key);
1842                 return -1;
1843         }
1844
1845         *i = num;
1846         return 0;
1847 }
1848
1849 static int
1850 parse_u64(const char *key, const char *value, void *args)
1851 {
1852         u64 *num = (u64 *)args;
1853         u64 tmp;
1854
1855         errno = 0;
1856         tmp = strtoull(value, NULL, 16);
1857         if (errno) {
1858                 PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u64",
1859                             key, value);
1860                 return -1;
1861         }
1862
1863         *num = tmp;
1864
1865         return 0;
1866 }
1867
1868 static int
1869 lookup_pps_type(const char *pps_name)
1870 {
1871         static struct {
1872                 const char *name;
1873                 enum pps_type type;
1874         } pps_type_map[] = {
1875                 { "pin",  PPS_PIN  },
1876         };
1877
1878         uint32_t i;
1879
1880         for (i = 0; i < RTE_DIM(pps_type_map); i++) {
1881                 if (strcmp(pps_name, pps_type_map[i].name) == 0)
1882                         return pps_type_map[i].type;
1883         }
1884
1885         return -1;
1886 }
1887
1888 static int
1889 parse_pin_set(const char *input, int pps_type, struct ice_devargs *devargs)
1890 {
1891         const char *str = input;
1892         char *end = NULL;
1893         uint32_t idx;
1894
1895         while (isblank(*str))
1896                 str++;
1897
1898         if (!isdigit(*str))
1899                 return -1;
1900
1901         if (pps_type == PPS_PIN) {
1902                 idx = strtoul(str, &end, 10);
1903                 if (end == NULL || idx >= ICE_MAX_PIN_NUM)
1904                         return -1;
1905                 while (isblank(*end))
1906                         end++;
1907                 if (*end != ']')
1908                         return -1;
1909
1910                 devargs->pin_idx = idx;
1911                 devargs->pps_out_ena = 1;
1912
1913                 return 0;
1914         }
1915
1916         return -1;
1917 }
1918
1919 static int
1920 parse_pps_out_parameter(const char *pins, struct ice_devargs *devargs)
1921 {
1922         const char *pin_start;
1923         uint32_t idx;
1924         int pps_type;
1925         char pps_name[32];
1926
1927         while (isblank(*pins))
1928                 pins++;
1929
1930         pins++;
1931         while (isblank(*pins))
1932                 pins++;
1933         if (*pins == '\0')
1934                 return -1;
1935
1936         for (idx = 0; ; idx++) {
1937                 if (isblank(pins[idx]) ||
1938                     pins[idx] == ':' ||
1939                     pins[idx] == '\0')
1940                         break;
1941
1942                 pps_name[idx] = pins[idx];
1943         }
1944         pps_name[idx] = '\0';
1945         pps_type = lookup_pps_type(pps_name);
1946         if (pps_type < 0)
1947                 return -1;
1948
1949         pins += idx;
1950
1951         pins += strcspn(pins, ":");
1952         if (*pins++ != ':')
1953                 return -1;
1954         while (isblank(*pins))
1955                 pins++;
1956
1957         pin_start = pins;
1958
1959         while (isblank(*pins))
1960                 pins++;
1961
1962         if (parse_pin_set(pin_start, pps_type, devargs) < 0)
1963                 return -1;
1964
1965         return 0;
1966 }
1967
1968 static int
1969 handle_pps_out_arg(__rte_unused const char *key, const char *value,
1970                    void *extra_args)
1971 {
1972         struct ice_devargs *devargs = extra_args;
1973
1974         if (value == NULL || extra_args == NULL)
1975                 return -EINVAL;
1976
1977         if (parse_pps_out_parameter(value, devargs) < 0) {
1978                 PMD_DRV_LOG(ERR,
1979                             "The GPIO pin parameter is wrong : '%s'",
1980                             value);
1981                 return -1;
1982         }
1983
1984         return 0;
1985 }
1986
1987 static int ice_parse_devargs(struct rte_eth_dev *dev)
1988 {
1989         struct ice_adapter *ad =
1990                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1991         struct rte_devargs *devargs = dev->device->devargs;
1992         struct rte_kvargs *kvlist;
1993         int ret;
1994
1995         if (devargs == NULL)
1996                 return 0;
1997
1998         kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1999         if (kvlist == NULL) {
2000                 PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
2001                 return -EINVAL;
2002         }
2003
2004         ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
2005         memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
2006                sizeof(ad->devargs.proto_xtr));
2007
2008         ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
2009                                  &handle_proto_xtr_arg, &ad->devargs);
2010         if (ret)
2011                 goto bail;
2012
2013         ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
2014                                  &parse_bool, &ad->devargs.safe_mode_support);
2015         if (ret)
2016                 goto bail;
2017
2018         ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
2019                                  &parse_bool, &ad->devargs.pipe_mode_support);
2020         if (ret)
2021                 goto bail;
2022
2023         ret = rte_kvargs_process(kvlist, ICE_HW_DEBUG_MASK_ARG,
2024                                  &parse_u64, &ad->hw.debug_mask);
2025         if (ret)
2026                 goto bail;
2027
2028         ret = rte_kvargs_process(kvlist, ICE_ONE_PPS_OUT_ARG,
2029                                  &handle_pps_out_arg, &ad->devargs);
2030         if (ret)
2031                 goto bail;
2032
2033         ret = rte_kvargs_process(kvlist, ICE_RX_LOW_LATENCY_ARG,
2034                                  &parse_bool, &ad->devargs.rx_low_latency);
2035
2036 bail:
2037         rte_kvargs_free(kvlist);
2038         return ret;
2039 }
2040
2041 /* Forward LLDP packets to default VSI by set switch rules */
2042 static int
2043 ice_vsi_config_sw_lldp(struct ice_vsi *vsi,  bool on)
2044 {
2045         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2046         struct ice_fltr_list_entry *s_list_itr = NULL;
2047         struct LIST_HEAD_TYPE list_head;
2048         int ret = 0;
2049
2050         INIT_LIST_HEAD(&list_head);
2051
2052         s_list_itr = (struct ice_fltr_list_entry *)
2053                         ice_malloc(hw, sizeof(*s_list_itr));
2054         if (!s_list_itr)
2055                 return -ENOMEM;
2056         s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2057         s_list_itr->fltr_info.vsi_handle = vsi->idx;
2058         s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
2059                         RTE_ETHER_TYPE_LLDP;
2060         s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2061         s_list_itr->fltr_info.flag = ICE_FLTR_RX;
2062         s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
2063         LIST_ADD(&s_list_itr->list_entry, &list_head);
2064         if (on)
2065                 ret = ice_add_eth_mac(hw, &list_head);
2066         else
2067                 ret = ice_remove_eth_mac(hw, &list_head);
2068
2069         rte_free(s_list_itr);
2070         return ret;
2071 }
2072
2073 static enum ice_status
2074 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
2075                 uint16_t num, uint16_t desc_id,
2076                 uint16_t *prof_buf, uint16_t *num_prof)
2077 {
2078         struct ice_aqc_res_elem *resp_buf;
2079         int ret;
2080         uint16_t buf_len;
2081         bool res_shared = 1;
2082         struct ice_aq_desc aq_desc;
2083         struct ice_sq_cd *cd = NULL;
2084         struct ice_aqc_get_allocd_res_desc *cmd =
2085                         &aq_desc.params.get_res_desc;
2086
2087         buf_len = sizeof(*resp_buf) * num;
2088         resp_buf = ice_malloc(hw, buf_len);
2089         if (!resp_buf)
2090                 return -ENOMEM;
2091
2092         ice_fill_dflt_direct_cmd_desc(&aq_desc,
2093                         ice_aqc_opc_get_allocd_res_desc);
2094
2095         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2096                                 ICE_AQC_RES_TYPE_M) | (res_shared ?
2097                                 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2098         cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
2099
2100         ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
2101         if (!ret)
2102                 *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
2103         else
2104                 goto exit;
2105
2106         ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
2107                         (*num_prof), ICE_NONDMA_TO_NONDMA);
2108
2109 exit:
2110         rte_free(resp_buf);
2111         return ret;
2112 }
2113 static int
2114 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
2115 {
2116         int ret;
2117         uint16_t prof_id;
2118         uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
2119         uint16_t first_desc = 1;
2120         uint16_t num_prof = 0;
2121
2122         ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
2123                         first_desc, prof_buf, &num_prof);
2124         if (ret) {
2125                 PMD_INIT_LOG(ERR, "Failed to get fxp resource");
2126                 return ret;
2127         }
2128
2129         for (prof_id = 0; prof_id < num_prof; prof_id++) {
2130                 ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
2131                 if (ret) {
2132                         PMD_INIT_LOG(ERR, "Failed to free fxp resource");
2133                         return ret;
2134                 }
2135         }
2136         return 0;
2137 }
2138
2139 static int
2140 ice_reset_fxp_resource(struct ice_hw *hw)
2141 {
2142         int ret;
2143
2144         ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
2145         if (ret) {
2146                 PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
2147                 return ret;
2148         }
2149
2150         ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
2151         if (ret) {
2152                 PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
2153                 return ret;
2154         }
2155
2156         return 0;
2157 }
2158
2159 static void
2160 ice_rss_ctx_init(struct ice_pf *pf)
2161 {
2162         memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
2163 }
2164
2165 static uint64_t
2166 ice_get_supported_rxdid(struct ice_hw *hw)
2167 {
2168         uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
2169         uint32_t regval;
2170         int i;
2171
2172         supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
2173
2174         for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2175                 regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
2176                 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2177                         & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2178                         supported_rxdid |= BIT(i);
2179         }
2180         return supported_rxdid;
2181 }
2182
2183 static int
2184 ice_dev_init(struct rte_eth_dev *dev)
2185 {
2186         struct rte_pci_device *pci_dev;
2187         struct rte_intr_handle *intr_handle;
2188         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2189         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2190         struct ice_adapter *ad =
2191                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2192         struct ice_vsi *vsi;
2193         int ret;
2194 #ifndef RTE_EXEC_ENV_WINDOWS
2195         off_t pos;
2196         uint32_t dsn_low, dsn_high;
2197         uint64_t dsn;
2198         bool use_dsn;
2199 #endif
2200
2201         dev->dev_ops = &ice_eth_dev_ops;
2202         dev->rx_queue_count = ice_rx_queue_count;
2203         dev->rx_descriptor_status = ice_rx_descriptor_status;
2204         dev->tx_descriptor_status = ice_tx_descriptor_status;
2205         dev->rx_pkt_burst = ice_recv_pkts;
2206         dev->tx_pkt_burst = ice_xmit_pkts;
2207         dev->tx_pkt_prepare = ice_prep_pkts;
2208
2209         /* for secondary processes, we don't initialise any further as primary
2210          * has already done this work.
2211          */
2212         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2213                 ice_set_rx_function(dev);
2214                 ice_set_tx_function(dev);
2215                 return 0;
2216         }
2217
2218         dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2219
2220         ice_set_default_ptype_table(dev);
2221         pci_dev = RTE_DEV_TO_PCI(dev->device);
2222         intr_handle = pci_dev->intr_handle;
2223
2224         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2225         pf->dev_data = dev->data;
2226         hw->back = pf->adapter;
2227         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2228         hw->vendor_id = pci_dev->id.vendor_id;
2229         hw->device_id = pci_dev->id.device_id;
2230         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2231         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2232         hw->bus.device = pci_dev->addr.devid;
2233         hw->bus.func = pci_dev->addr.function;
2234
2235         ret = ice_parse_devargs(dev);
2236         if (ret) {
2237                 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2238                 return -EINVAL;
2239         }
2240
2241         ice_init_controlq_parameter(hw);
2242
2243         ret = ice_init_hw(hw);
2244         if (ret) {
2245                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
2246                 return -EINVAL;
2247         }
2248
2249 #ifndef RTE_EXEC_ENV_WINDOWS
2250         use_dsn = false;
2251         dsn = 0;
2252         pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
2253         if (pos) {
2254                 if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0 ||
2255                                 rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) {
2256                         PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
2257                 } else {
2258                         use_dsn = true;
2259                         dsn = (uint64_t)dsn_high << 32 | dsn_low;
2260                 }
2261         } else {
2262                 PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
2263         }
2264
2265         ret = ice_load_pkg(pf->adapter, use_dsn, dsn);
2266         if (ret == 0) {
2267                 ret = ice_init_hw_tbls(hw);
2268                 if (ret) {
2269                         PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", ret);
2270                         rte_free(hw->pkg_copy);
2271                 }
2272         }
2273
2274         if (ret) {
2275                 if (ad->devargs.safe_mode_support == 0) {
2276                         PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2277                                         "Use safe-mode-support=1 to enter Safe Mode");
2278                         goto err_init_fw;
2279                 }
2280
2281                 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2282                                         "Entering Safe Mode");
2283                 ad->is_safe_mode = 1;
2284         }
2285 #endif
2286
2287         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2288                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2289                      hw->api_maj_ver, hw->api_min_ver);
2290
2291         ice_pf_sw_init(dev);
2292         ret = ice_init_mac_address(dev);
2293         if (ret) {
2294                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2295                 goto err_init_mac;
2296         }
2297
2298         ret = ice_res_pool_init(&pf->msix_pool, 1,
2299                                 hw->func_caps.common_cap.num_msix_vectors - 1);
2300         if (ret) {
2301                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2302                 goto err_msix_pool_init;
2303         }
2304
2305         ret = ice_pf_setup(pf);
2306         if (ret) {
2307                 PMD_INIT_LOG(ERR, "Failed to setup PF");
2308                 goto err_pf_setup;
2309         }
2310
2311         ret = ice_send_driver_ver(hw);
2312         if (ret) {
2313                 PMD_INIT_LOG(ERR, "Failed to send driver version");
2314                 goto err_pf_setup;
2315         }
2316
2317         vsi = pf->main_vsi;
2318
2319         ret = ice_aq_stop_lldp(hw, true, false, NULL);
2320         if (ret != ICE_SUCCESS)
2321                 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2322         ret = ice_init_dcb(hw, true);
2323         if (ret != ICE_SUCCESS)
2324                 PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2325         /* Forward LLDP packets to default VSI */
2326         ret = ice_vsi_config_sw_lldp(vsi, true);
2327         if (ret != ICE_SUCCESS)
2328                 PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2329         /* register callback func to eal lib */
2330         rte_intr_callback_register(intr_handle,
2331                                    ice_interrupt_handler, dev);
2332
2333         ice_pf_enable_irq0(hw);
2334
2335         /* enable uio intr after callback register */
2336         rte_intr_enable(intr_handle);
2337
2338         /* get base queue pairs index  in the device */
2339         ice_base_queue_get(pf);
2340
2341         /* Initialize RSS context for gtpu_eh */
2342         ice_rss_ctx_init(pf);
2343
2344         /* Initialize TM configuration */
2345         ice_tm_conf_init(dev);
2346
2347         if (!ad->is_safe_mode) {
2348                 ret = ice_flow_init(ad);
2349                 if (ret) {
2350                         PMD_INIT_LOG(ERR, "Failed to initialize flow");
2351                         goto err_flow_init;
2352                 }
2353         }
2354
2355         ret = ice_reset_fxp_resource(hw);
2356         if (ret) {
2357                 PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2358                 goto err_flow_init;
2359         }
2360
2361         pf->supported_rxdid = ice_get_supported_rxdid(hw);
2362
2363         return 0;
2364
2365 err_flow_init:
2366         ice_flow_uninit(ad);
2367         rte_intr_disable(intr_handle);
2368         ice_pf_disable_irq0(hw);
2369         rte_intr_callback_unregister(intr_handle,
2370                                      ice_interrupt_handler, dev);
2371 err_pf_setup:
2372         ice_res_pool_destroy(&pf->msix_pool);
2373 err_msix_pool_init:
2374         rte_free(dev->data->mac_addrs);
2375         dev->data->mac_addrs = NULL;
2376 err_init_mac:
2377         rte_free(pf->proto_xtr);
2378 #ifndef RTE_EXEC_ENV_WINDOWS
2379 err_init_fw:
2380 #endif
2381         ice_deinit_hw(hw);
2382
2383         return ret;
2384 }
2385
2386 int
2387 ice_release_vsi(struct ice_vsi *vsi)
2388 {
2389         struct ice_hw *hw;
2390         struct ice_vsi_ctx vsi_ctx;
2391         enum ice_status ret;
2392         int error = 0;
2393
2394         if (!vsi)
2395                 return error;
2396
2397         hw = ICE_VSI_TO_HW(vsi);
2398
2399         ice_remove_all_mac_vlan_filters(vsi);
2400
2401         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2402
2403         vsi_ctx.vsi_num = vsi->vsi_id;
2404         vsi_ctx.info = vsi->info;
2405         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2406         if (ret != ICE_SUCCESS) {
2407                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2408                 error = -1;
2409         }
2410
2411         rte_free(vsi->rss_lut);
2412         rte_free(vsi->rss_key);
2413         rte_free(vsi);
2414         return error;
2415 }
2416
2417 void
2418 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2419 {
2420         struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
2421         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2422         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2423         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2424         uint16_t msix_intr, i;
2425
2426         /* disable interrupt and also clear all the exist config */
2427         for (i = 0; i < vsi->nb_qps; i++) {
2428                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2429                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2430                 rte_wmb();
2431         }
2432
2433         if (rte_intr_allow_others(intr_handle))
2434                 /* vfio-pci */
2435                 for (i = 0; i < vsi->nb_msix; i++) {
2436                         msix_intr = vsi->msix_intr + i;
2437                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2438                                       GLINT_DYN_CTL_WB_ON_ITR_M);
2439                 }
2440         else
2441                 /* igb_uio */
2442                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2443 }
2444
2445 static int
2446 ice_dev_stop(struct rte_eth_dev *dev)
2447 {
2448         struct rte_eth_dev_data *data = dev->data;
2449         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2450         struct ice_vsi *main_vsi = pf->main_vsi;
2451         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2452         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2453         uint16_t i;
2454
2455         /* avoid stopping again */
2456         if (pf->adapter_stopped)
2457                 return 0;
2458
2459         /* stop and clear all Rx queues */
2460         for (i = 0; i < data->nb_rx_queues; i++)
2461                 ice_rx_queue_stop(dev, i);
2462
2463         /* stop and clear all Tx queues */
2464         for (i = 0; i < data->nb_tx_queues; i++)
2465                 ice_tx_queue_stop(dev, i);
2466
2467         /* disable all queue interrupts */
2468         ice_vsi_disable_queues_intr(main_vsi);
2469
2470         if (pf->init_link_up)
2471                 ice_dev_set_link_up(dev);
2472         else
2473                 ice_dev_set_link_down(dev);
2474
2475         /* Clean datapath event and queue/vec mapping */
2476         rte_intr_efd_disable(intr_handle);
2477         rte_intr_vec_list_free(intr_handle);
2478
2479         pf->adapter_stopped = true;
2480         dev->data->dev_started = 0;
2481
2482         return 0;
2483 }
2484
2485 static int
2486 ice_dev_close(struct rte_eth_dev *dev)
2487 {
2488         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2489         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2490         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2491         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2492         struct ice_adapter *ad =
2493                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2494         int ret;
2495         uint32_t val;
2496         uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned;
2497         uint32_t pin_idx = ad->devargs.pin_idx;
2498
2499         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2500                 return 0;
2501
2502         /* Since stop will make link down, then the link event will be
2503          * triggered, disable the irq firstly to avoid the port_infoe etc
2504          * resources deallocation causing the interrupt service thread
2505          * crash.
2506          */
2507         ice_pf_disable_irq0(hw);
2508
2509         ret = ice_dev_stop(dev);
2510
2511         if (!ad->is_safe_mode)
2512                 ice_flow_uninit(ad);
2513
2514         /* release all queue resource */
2515         ice_free_queues(dev);
2516
2517         ice_res_pool_destroy(&pf->msix_pool);
2518         ice_release_vsi(pf->main_vsi);
2519         ice_sched_cleanup_all(hw);
2520         ice_free_hw_tbls(hw);
2521         rte_free(hw->port_info);
2522         hw->port_info = NULL;
2523         ice_shutdown_all_ctrlq(hw);
2524         rte_free(pf->proto_xtr);
2525         pf->proto_xtr = NULL;
2526
2527         /* Uninit TM configuration */
2528         ice_tm_conf_uninit(dev);
2529
2530         if (ad->devargs.pps_out_ena) {
2531                 ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0);
2532                 ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0);
2533                 ICE_WRITE_REG(hw, GLTSYN_TGT_L(pin_idx, timer), 0);
2534                 ICE_WRITE_REG(hw, GLTSYN_TGT_H(pin_idx, timer), 0);
2535
2536                 val = GLGEN_GPIO_CTL_PIN_DIR_M;
2537                 ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(pin_idx), val);
2538         }
2539
2540         /* disable uio intr before callback unregister */
2541         rte_intr_disable(intr_handle);
2542
2543         /* unregister callback func from eal lib */
2544         rte_intr_callback_unregister(intr_handle,
2545                                      ice_interrupt_handler, dev);
2546
2547         return ret;
2548 }
2549
2550 static int
2551 ice_dev_uninit(struct rte_eth_dev *dev)
2552 {
2553         ice_dev_close(dev);
2554
2555         return 0;
2556 }
2557
2558 static bool
2559 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
2560 {
2561         return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false;
2562 }
2563
2564 static void
2565 hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
2566 {
2567         cfg->hash_flds = 0;
2568         cfg->addl_hdrs = 0;
2569         cfg->symm = 0;
2570         cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
2571 }
2572
2573 static int
2574 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2575 {
2576         enum ice_status status = ICE_SUCCESS;
2577         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2578         struct ice_vsi *vsi = pf->main_vsi;
2579
2580         if (!is_hash_cfg_valid(cfg))
2581                 return -ENOENT;
2582
2583         status = ice_rem_rss_cfg(hw, vsi->idx, cfg);
2584         if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2585                 PMD_DRV_LOG(ERR,
2586                             "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
2587                             vsi->idx, status);
2588                 return -EBUSY;
2589         }
2590
2591         return 0;
2592 }
2593
2594 static int
2595 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2596 {
2597         enum ice_status status = ICE_SUCCESS;
2598         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2599         struct ice_vsi *vsi = pf->main_vsi;
2600
2601         if (!is_hash_cfg_valid(cfg))
2602                 return -ENOENT;
2603
2604         status = ice_add_rss_cfg(hw, vsi->idx, cfg);
2605         if (status) {
2606                 PMD_DRV_LOG(ERR,
2607                             "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
2608                             vsi->idx, status);
2609                 return -EBUSY;
2610         }
2611
2612         return 0;
2613 }
2614
2615 static int
2616 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2617 {
2618         int ret;
2619
2620         ret = ice_hash_moveout(pf, cfg);
2621         if (ret && (ret != -ENOENT))
2622                 return ret;
2623
2624         hash_cfg_reset(cfg);
2625
2626         return 0;
2627 }
2628
2629 static int
2630 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2631                          u8 ctx_idx)
2632 {
2633         int ret;
2634
2635         switch (ctx_idx) {
2636         case ICE_HASH_GTPU_CTX_EH_IP:
2637                 ret = ice_hash_remove(pf,
2638                                       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2639                 if (ret && (ret != -ENOENT))
2640                         return ret;
2641
2642                 ret = ice_hash_remove(pf,
2643                                       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2644                 if (ret && (ret != -ENOENT))
2645                         return ret;
2646
2647                 ret = ice_hash_remove(pf,
2648                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2649                 if (ret && (ret != -ENOENT))
2650                         return ret;
2651
2652                 ret = ice_hash_remove(pf,
2653                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2654                 if (ret && (ret != -ENOENT))
2655                         return ret;
2656
2657                 ret = ice_hash_remove(pf,
2658                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2659                 if (ret && (ret != -ENOENT))
2660                         return ret;
2661
2662                 ret = ice_hash_remove(pf,
2663                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2664                 if (ret && (ret != -ENOENT))
2665                         return ret;
2666
2667                 ret = ice_hash_remove(pf,
2668                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2669                 if (ret && (ret != -ENOENT))
2670                         return ret;
2671
2672                 ret = ice_hash_remove(pf,
2673                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2674                 if (ret && (ret != -ENOENT))
2675                         return ret;
2676
2677                 break;
2678         case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2679                 ret = ice_hash_remove(pf,
2680                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2681                 if (ret && (ret != -ENOENT))
2682                         return ret;
2683
2684                 ret = ice_hash_remove(pf,
2685                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2686                 if (ret && (ret != -ENOENT))
2687                         return ret;
2688
2689                 ret = ice_hash_moveout(pf,
2690                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2691                 if (ret && (ret != -ENOENT))
2692                         return ret;
2693
2694                 ret = ice_hash_moveout(pf,
2695                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2696                 if (ret && (ret != -ENOENT))
2697                         return ret;
2698
2699                 ret = ice_hash_moveout(pf,
2700                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2701                 if (ret && (ret != -ENOENT))
2702                         return ret;
2703
2704                 ret = ice_hash_moveout(pf,
2705                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2706                 if (ret && (ret != -ENOENT))
2707                         return ret;
2708
2709                 break;
2710         case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2711                 ret = ice_hash_remove(pf,
2712                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2713                 if (ret && (ret != -ENOENT))
2714                         return ret;
2715
2716                 ret = ice_hash_remove(pf,
2717                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2718                 if (ret && (ret != -ENOENT))
2719                         return ret;
2720
2721                 ret = ice_hash_moveout(pf,
2722                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2723                 if (ret && (ret != -ENOENT))
2724                         return ret;
2725
2726                 ret = ice_hash_moveout(pf,
2727                                        &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2728                 if (ret && (ret != -ENOENT))
2729                         return ret;
2730
2731                 ret = ice_hash_moveout(pf,
2732                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2733                 if (ret && (ret != -ENOENT))
2734                         return ret;
2735
2736                 ret = ice_hash_moveout(pf,
2737                                        &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2738                 if (ret && (ret != -ENOENT))
2739                         return ret;
2740
2741                 break;
2742         case ICE_HASH_GTPU_CTX_UP_IP:
2743                 ret = ice_hash_remove(pf,
2744                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2745                 if (ret && (ret != -ENOENT))
2746                         return ret;
2747
2748                 ret = ice_hash_remove(pf,
2749                                       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2750                 if (ret && (ret != -ENOENT))
2751                         return ret;
2752
2753                 ret = ice_hash_moveout(pf,
2754                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2755                 if (ret && (ret != -ENOENT))
2756                         return ret;
2757
2758                 ret = ice_hash_moveout(pf,
2759                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2760                 if (ret && (ret != -ENOENT))
2761                         return ret;
2762
2763                 ret = ice_hash_moveout(pf,
2764                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2765                 if (ret && (ret != -ENOENT))
2766                         return ret;
2767
2768                 break;
2769         case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2770         case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2771                 ret = ice_hash_moveout(pf,
2772                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2773                 if (ret && (ret != -ENOENT))
2774                         return ret;
2775
2776                 ret = ice_hash_moveout(pf,
2777                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2778                 if (ret && (ret != -ENOENT))
2779                         return ret;
2780
2781                 ret = ice_hash_moveout(pf,
2782                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2783                 if (ret && (ret != -ENOENT))
2784                         return ret;
2785
2786                 break;
2787         case ICE_HASH_GTPU_CTX_DW_IP:
2788                 ret = ice_hash_remove(pf,
2789                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2790                 if (ret && (ret != -ENOENT))
2791                         return ret;
2792
2793                 ret = ice_hash_remove(pf,
2794                                       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2795                 if (ret && (ret != -ENOENT))
2796                         return ret;
2797
2798                 ret = ice_hash_moveout(pf,
2799                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2800                 if (ret && (ret != -ENOENT))
2801                         return ret;
2802
2803                 ret = ice_hash_moveout(pf,
2804                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2805                 if (ret && (ret != -ENOENT))
2806                         return ret;
2807
2808                 ret = ice_hash_moveout(pf,
2809                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2810                 if (ret && (ret != -ENOENT))
2811                         return ret;
2812
2813                 break;
2814         case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2815         case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2816                 ret = ice_hash_moveout(pf,
2817                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2818                 if (ret && (ret != -ENOENT))
2819                         return ret;
2820
2821                 ret = ice_hash_moveout(pf,
2822                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2823                 if (ret && (ret != -ENOENT))
2824                         return ret;
2825
2826                 ret = ice_hash_moveout(pf,
2827                                        &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2828                 if (ret && (ret != -ENOENT))
2829                         return ret;
2830
2831                 break;
2832         default:
2833                 break;
2834         }
2835
2836         return 0;
2837 }
2838
2839 static u8 calc_gtpu_ctx_idx(uint32_t hdr)
2840 {
2841         u8 eh_idx, ip_idx;
2842
2843         if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
2844                 eh_idx = 0;
2845         else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
2846                 eh_idx = 1;
2847         else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
2848                 eh_idx = 2;
2849         else
2850                 return ICE_HASH_GTPU_CTX_MAX;
2851
2852         ip_idx = 0;
2853         if (hdr & ICE_FLOW_SEG_HDR_UDP)
2854                 ip_idx = 1;
2855         else if (hdr & ICE_FLOW_SEG_HDR_TCP)
2856                 ip_idx = 2;
2857
2858         if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
2859                 return eh_idx * 3 + ip_idx;
2860         else
2861                 return ICE_HASH_GTPU_CTX_MAX;
2862 }
2863
2864 static int
2865 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2866 {
2867         u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2868
2869         if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2870                 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
2871                                                 gtpu_ctx_idx);
2872         else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2873                 return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
2874                                                 gtpu_ctx_idx);
2875
2876         return 0;
2877 }
2878
2879 static int
2880 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2881                           u8 ctx_idx, struct ice_rss_hash_cfg *cfg)
2882 {
2883         int ret;
2884
2885         if (ctx_idx < ICE_HASH_GTPU_CTX_MAX)
2886                 ctx->ctx[ctx_idx] = *cfg;
2887
2888         switch (ctx_idx) {
2889         case ICE_HASH_GTPU_CTX_EH_IP:
2890                 break;
2891         case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2892                 ret = ice_hash_moveback(pf,
2893                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2894                 if (ret && (ret != -ENOENT))
2895                         return ret;
2896
2897                 ret = ice_hash_moveback(pf,
2898                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2899                 if (ret && (ret != -ENOENT))
2900                         return ret;
2901
2902                 ret = ice_hash_moveback(pf,
2903                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2904                 if (ret && (ret != -ENOENT))
2905                         return ret;
2906
2907                 ret = ice_hash_moveback(pf,
2908                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2909                 if (ret && (ret != -ENOENT))
2910                         return ret;
2911
2912                 break;
2913         case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2914                 ret = ice_hash_moveback(pf,
2915                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2916                 if (ret && (ret != -ENOENT))
2917                         return ret;
2918
2919                 ret = ice_hash_moveback(pf,
2920                                         &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2921                 if (ret && (ret != -ENOENT))
2922                         return ret;
2923
2924                 ret = ice_hash_moveback(pf,
2925                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2926                 if (ret && (ret != -ENOENT))
2927                         return ret;
2928
2929                 ret = ice_hash_moveback(pf,
2930                                         &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2931                 if (ret && (ret != -ENOENT))
2932                         return ret;
2933
2934                 break;
2935         case ICE_HASH_GTPU_CTX_UP_IP:
2936         case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2937         case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2938         case ICE_HASH_GTPU_CTX_DW_IP:
2939         case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2940         case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2941                 ret = ice_hash_moveback(pf,
2942                                         &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2943                 if (ret && (ret != -ENOENT))
2944                         return ret;
2945
2946                 ret = ice_hash_moveback(pf,
2947                                         &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2948                 if (ret && (ret != -ENOENT))
2949                         return ret;
2950
2951                 ret = ice_hash_moveback(pf,
2952                                         &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2953                 if (ret && (ret != -ENOENT))
2954                         return ret;
2955
2956                 break;
2957         default:
2958                 break;
2959         }
2960
2961         return 0;
2962 }
2963
2964 static int
2965 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2966 {
2967         u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs);
2968
2969         if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
2970                 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4,
2971                                                  gtpu_ctx_idx, cfg);
2972         else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
2973                 return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6,
2974                                                  gtpu_ctx_idx, cfg);
2975
2976         return 0;
2977 }
2978
2979 static void
2980 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2981 {
2982         u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2983
2984         if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
2985                 return;
2986
2987         if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2988                 hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
2989         else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2990                 hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
2991 }
2992
2993 int
2994 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2995                      struct ice_rss_hash_cfg *cfg)
2996 {
2997         struct ice_hw *hw = ICE_PF_TO_HW(pf);
2998         int ret;
2999
3000         ret = ice_rem_rss_cfg(hw, vsi_id, cfg);
3001         if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
3002                 PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
3003
3004         ice_rem_rss_cfg_post(pf, cfg->addl_hdrs);
3005
3006         return 0;
3007 }
3008
3009 int
3010 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
3011                      struct ice_rss_hash_cfg *cfg)
3012 {
3013         struct ice_hw *hw = ICE_PF_TO_HW(pf);
3014         int ret;
3015
3016         ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs);
3017         if (ret)
3018                 PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
3019
3020         ret = ice_add_rss_cfg(hw, vsi_id, cfg);
3021         if (ret)
3022                 PMD_DRV_LOG(ERR, "add rss cfg failed\n");
3023
3024         ret = ice_add_rss_cfg_post(pf, cfg);
3025         if (ret)
3026                 PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
3027
3028         return 0;
3029 }
3030
3031 static void
3032 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
3033 {
3034         struct ice_hw *hw = ICE_PF_TO_HW(pf);
3035         struct ice_vsi *vsi = pf->main_vsi;
3036         struct ice_rss_hash_cfg cfg;
3037         int ret;
3038
3039 #define ICE_RSS_HF_ALL ( \
3040         RTE_ETH_RSS_IPV4 | \
3041         RTE_ETH_RSS_IPV6 | \
3042         RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
3043         RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
3044         RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
3045         RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
3046         RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
3047         RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
3048
3049         ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
3050         if (ret)
3051                 PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d",
3052                             __func__, ret);
3053
3054         cfg.symm = 0;
3055         cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3056         /* Configure RSS for IPv4 with src/dst addr as input set */
3057         if (rss_hf & RTE_ETH_RSS_IPV4) {
3058                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3059                 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
3060                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3061                 if (ret)
3062                         PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
3063                                     __func__, ret);
3064         }
3065
3066         /* Configure RSS for IPv6 with src/dst addr as input set */
3067         if (rss_hf & RTE_ETH_RSS_IPV6) {
3068                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3069                 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
3070                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3071                 if (ret)
3072                         PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
3073                                     __func__, ret);
3074         }
3075
3076         /* Configure RSS for udp4 with src/dst addr and port as input set */
3077         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
3078                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
3079                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
3080                 cfg.hash_flds = ICE_HASH_UDP_IPV4;
3081                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3082                 if (ret)
3083                         PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
3084                                     __func__, ret);
3085         }
3086
3087         /* Configure RSS for udp6 with src/dst addr and port as input set */
3088         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
3089                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
3090                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
3091                 cfg.hash_flds = ICE_HASH_UDP_IPV6;
3092                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3093                 if (ret)
3094                         PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
3095                                     __func__, ret);
3096         }
3097
3098         /* Configure RSS for tcp4 with src/dst addr and port as input set */
3099         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
3100                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
3101                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
3102                 cfg.hash_flds = ICE_HASH_TCP_IPV4;
3103                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3104                 if (ret)
3105                         PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
3106                                     __func__, ret);
3107         }
3108
3109         /* Configure RSS for tcp6 with src/dst addr and port as input set */
3110         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
3111                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
3112                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
3113                 cfg.hash_flds = ICE_HASH_TCP_IPV6;
3114                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3115                 if (ret)
3116                         PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
3117                                     __func__, ret);
3118         }
3119
3120         /* Configure RSS for sctp4 with src/dst addr and port as input set */
3121         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
3122                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
3123                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
3124                 cfg.hash_flds = ICE_HASH_SCTP_IPV4;
3125                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3126                 if (ret)
3127                         PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
3128                                     __func__, ret);
3129         }
3130
3131         /* Configure RSS for sctp6 with src/dst addr and port as input set */
3132         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
3133                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
3134                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
3135                 cfg.hash_flds = ICE_HASH_SCTP_IPV6;
3136                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3137                 if (ret)
3138                         PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
3139                                     __func__, ret);
3140         }
3141
3142         if (rss_hf & RTE_ETH_RSS_IPV4) {
3143                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
3144                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
3145                 cfg.hash_flds = ICE_FLOW_HASH_IPV4;
3146                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3147                 if (ret)
3148                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
3149                                     __func__, ret);
3150         }
3151
3152         if (rss_hf & RTE_ETH_RSS_IPV6) {
3153                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
3154                                 ICE_FLOW_SEG_HDR_IPV_OTHER;
3155                 cfg.hash_flds = ICE_FLOW_HASH_IPV6;
3156                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3157                 if (ret)
3158                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
3159                                     __func__, ret);
3160         }
3161
3162         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
3163                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3164                                 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3165                 cfg.hash_flds = ICE_HASH_UDP_IPV4;
3166                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3167                 if (ret)
3168                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
3169                                     __func__, ret);
3170         }
3171
3172         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
3173                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3174                                 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3175                 cfg.hash_flds = ICE_HASH_UDP_IPV6;
3176                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3177                 if (ret)
3178                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
3179                                     __func__, ret);
3180         }
3181
3182         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
3183                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3184                                 ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3185                 cfg.hash_flds = ICE_HASH_TCP_IPV4;
3186                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3187                 if (ret)
3188                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
3189                                     __func__, ret);
3190         }
3191
3192         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
3193                 cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3194                                 ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3195                 cfg.hash_flds = ICE_HASH_TCP_IPV6;
3196                 ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3197                 if (ret)
3198                         PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
3199                                     __func__, ret);
3200         }
3201
3202         pf->rss_hf = rss_hf & ICE_RSS_HF_ALL;
3203 }
3204
3205 static void
3206 ice_get_default_rss_key(uint8_t *rss_key, uint32_t rss_key_size)
3207 {
3208         static struct ice_aqc_get_set_rss_keys default_key;
3209         static bool default_key_done;
3210         uint8_t *key = (uint8_t *)&default_key;
3211         size_t i;
3212
3213         if (rss_key_size > sizeof(default_key)) {
3214                 PMD_DRV_LOG(WARNING,
3215                             "requested size %u is larger than default %zu, "
3216                             "only %zu bytes are gotten for key\n",
3217                             rss_key_size, sizeof(default_key),
3218                             sizeof(default_key));
3219         }
3220
3221         if (!default_key_done) {
3222                 /* Calculate the default hash key */
3223                 for (i = 0; i < sizeof(default_key); i++)
3224                         key[i] = (uint8_t)rte_rand();
3225                 default_key_done = true;
3226         }
3227         rte_memcpy(rss_key, key, RTE_MIN(rss_key_size, sizeof(default_key)));
3228 }
3229
3230 static int ice_init_rss(struct ice_pf *pf)
3231 {
3232         struct ice_hw *hw = ICE_PF_TO_HW(pf);
3233         struct ice_vsi *vsi = pf->main_vsi;
3234         struct rte_eth_dev_data *dev_data = pf->dev_data;
3235         struct ice_aq_get_set_rss_lut_params lut_params;
3236         struct rte_eth_rss_conf *rss_conf;
3237         struct ice_aqc_get_set_rss_keys key;
3238         uint16_t i, nb_q;
3239         int ret = 0;
3240         bool is_safe_mode = pf->adapter->is_safe_mode;
3241         uint32_t reg;
3242
3243         rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
3244         nb_q = dev_data->nb_rx_queues;
3245         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3246         vsi->rss_lut_size = pf->hash_lut_size;
3247
3248         if (nb_q == 0) {
3249                 PMD_DRV_LOG(WARNING,
3250                         "RSS is not supported as rx queues number is zero\n");
3251                 return 0;
3252         }
3253
3254         if (is_safe_mode) {
3255                 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3256                 return 0;
3257         }
3258
3259         if (!vsi->rss_key) {
3260                 vsi->rss_key = rte_zmalloc(NULL,
3261                                            vsi->rss_key_size, 0);
3262                 if (vsi->rss_key == NULL) {
3263                         PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3264                         return -ENOMEM;
3265                 }
3266         }
3267         if (!vsi->rss_lut) {
3268                 vsi->rss_lut = rte_zmalloc(NULL,
3269                                            vsi->rss_lut_size, 0);
3270                 if (vsi->rss_lut == NULL) {
3271                         PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3272                         rte_free(vsi->rss_key);
3273                         vsi->rss_key = NULL;
3274                         return -ENOMEM;
3275                 }
3276         }
3277         /* configure RSS key */
3278         if (!rss_conf->rss_key)
3279                 ice_get_default_rss_key(vsi->rss_key, vsi->rss_key_size);
3280         else
3281                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3282                            RTE_MIN(rss_conf->rss_key_len,
3283                                    vsi->rss_key_size));
3284
3285         rte_memcpy(key.standard_rss_key, vsi->rss_key,
3286                 RTE_MIN(sizeof(key.standard_rss_key), vsi->rss_key_size));
3287         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3288         if (ret)
3289                 goto out;
3290
3291         /* init RSS LUT table */
3292         for (i = 0; i < vsi->rss_lut_size; i++)
3293                 vsi->rss_lut[i] = i % nb_q;
3294
3295         lut_params.vsi_handle = vsi->idx;
3296         lut_params.lut_size = vsi->rss_lut_size;
3297         lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
3298         lut_params.lut = vsi->rss_lut;
3299         lut_params.global_lut_id = 0;
3300         ret = ice_aq_set_rss_lut(hw, &lut_params);
3301         if (ret)
3302                 goto out;
3303
3304         /* Enable registers for symmetric_toeplitz function. */
3305         reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3306         reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3307                 (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3308         ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3309
3310         /* RSS hash configuration */
3311         ice_rss_hash_set(pf, rss_conf->rss_hf);
3312
3313         return 0;
3314 out:
3315         rte_free(vsi->rss_key);
3316         vsi->rss_key = NULL;
3317         rte_free(vsi->rss_lut);
3318         vsi->rss_lut = NULL;
3319         return -EINVAL;
3320 }
3321
3322 static int
3323 ice_dev_configure(struct rte_eth_dev *dev)
3324 {
3325         struct ice_adapter *ad =
3326                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3327         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3328         int ret;
3329
3330         /* Initialize to TRUE. If any of Rx queues doesn't meet the
3331          * bulk allocation or vector Rx preconditions we will reset it.
3332          */
3333         ad->rx_bulk_alloc_allowed = true;
3334         ad->tx_simple_allowed = true;
3335
3336         if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
3337                 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
3338
3339         if (dev->data->nb_rx_queues) {
3340                 ret = ice_init_rss(pf);
3341                 if (ret) {
3342                         PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3343                         return ret;
3344                 }
3345         }
3346
3347         return 0;
3348 }
3349
3350 static void
3351 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3352                        int base_queue, int nb_queue)
3353 {
3354         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3355         uint32_t val, val_tx;
3356         int rx_low_latency, i;
3357
3358         rx_low_latency = vsi->adapter->devargs.rx_low_latency;
3359         for (i = 0; i < nb_queue; i++) {
3360                 /*do actual bind*/
3361                 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3362                       (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3363                 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3364                          (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3365
3366                 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3367                             base_queue + i, msix_vect);
3368
3369                 /* set ITR0 value */
3370                 if (rx_low_latency) {
3371                         /**
3372                          * Empirical configuration for optimal real time
3373                          * latency reduced interrupt throttling to 2us
3374                          */
3375                         ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x1);
3376                         ICE_WRITE_REG(hw, QRX_ITR(base_queue + i),
3377                                       QRX_ITR_NO_EXPR_M);
3378                 } else {
3379                         ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
3380                         ICE_WRITE_REG(hw, QRX_ITR(base_queue + i), 0);
3381                 }
3382
3383                 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3384                 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3385         }
3386 }
3387
3388 void
3389 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3390 {
3391         struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3392         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3393         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3394         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3395         uint16_t msix_vect = vsi->msix_intr;
3396         uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
3397                                    rte_intr_nb_efd_get(intr_handle));
3398         uint16_t queue_idx = 0;
3399         int record = 0;
3400         int i;
3401
3402         /* clear Rx/Tx queue interrupt */
3403         for (i = 0; i < vsi->nb_used_qps; i++) {
3404                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3405                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3406         }
3407
3408         /* PF bind interrupt */
3409         if (rte_intr_dp_is_en(intr_handle)) {
3410                 queue_idx = 0;
3411                 record = 1;
3412         }
3413
3414         for (i = 0; i < vsi->nb_used_qps; i++) {
3415                 if (nb_msix <= 1) {
3416                         if (!rte_intr_allow_others(intr_handle))
3417                                 msix_vect = ICE_MISC_VEC_ID;
3418
3419                         /* uio mapping all queue to one msix_vect */
3420                         __vsi_queues_bind_intr(vsi, msix_vect,
3421                                                vsi->base_queue + i,
3422                                                vsi->nb_used_qps - i);
3423
3424                         for (; !!record && i < vsi->nb_used_qps; i++)
3425                                 rte_intr_vec_list_index_set(intr_handle,
3426                                                 queue_idx + i, msix_vect);
3427
3428                         break;
3429                 }
3430
3431                 /* vfio 1:1 queue/msix_vect mapping */
3432                 __vsi_queues_bind_intr(vsi, msix_vect,
3433                                        vsi->base_queue + i, 1);
3434
3435                 if (!!record)
3436                         rte_intr_vec_list_index_set(intr_handle,
3437                                                            queue_idx + i,
3438                                                            msix_vect);
3439
3440                 msix_vect++;
3441                 nb_msix--;
3442         }
3443 }
3444
3445 void
3446 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3447 {
3448         struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3449         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3450         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3451         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3452         uint16_t msix_intr, i;
3453
3454         if (rte_intr_allow_others(intr_handle))
3455                 for (i = 0; i < vsi->nb_used_qps; i++) {
3456                         msix_intr = vsi->msix_intr + i;
3457                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3458                                       GLINT_DYN_CTL_INTENA_M |
3459                                       GLINT_DYN_CTL_CLEARPBA_M |
3460                                       GLINT_DYN_CTL_ITR_INDX_M |
3461                                       GLINT_DYN_CTL_WB_ON_ITR_M);
3462                 }
3463         else
3464                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3465                               GLINT_DYN_CTL_INTENA_M |
3466                               GLINT_DYN_CTL_CLEARPBA_M |
3467                               GLINT_DYN_CTL_ITR_INDX_M |
3468                               GLINT_DYN_CTL_WB_ON_ITR_M);
3469 }
3470
3471 static int
3472 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3473 {
3474         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3475         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3476         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3477         struct ice_vsi *vsi = pf->main_vsi;
3478         uint32_t intr_vector = 0;
3479
3480         rte_intr_disable(intr_handle);
3481
3482         /* check and configure queue intr-vector mapping */
3483         if ((rte_intr_cap_multiple(intr_handle) ||
3484              !RTE_ETH_DEV_SRIOV(dev).active) &&
3485             dev->data->dev_conf.intr_conf.rxq != 0) {
3486                 intr_vector = dev->data->nb_rx_queues;
3487                 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3488                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3489                                     ICE_MAX_INTR_QUEUE_NUM);
3490                         return -ENOTSUP;
3491                 }
3492                 if (rte_intr_efd_enable(intr_handle, intr_vector))
3493                         return -1;
3494         }
3495
3496         if (rte_intr_dp_is_en(intr_handle)) {
3497                 if (rte_intr_vec_list_alloc(intr_handle, NULL,
3498                                                    dev->data->nb_rx_queues)) {
3499                         PMD_DRV_LOG(ERR,
3500                                     "Failed to allocate %d rx_queues intr_vec",
3501                                     dev->data->nb_rx_queues);
3502                         return -ENOMEM;
3503                 }
3504         }
3505
3506         /* Map queues with MSIX interrupt */
3507         vsi->nb_used_qps = dev->data->nb_rx_queues;
3508         ice_vsi_queues_bind_intr(vsi);
3509
3510         /* Enable interrupts for all the queues */
3511         ice_vsi_enable_queues_intr(vsi);
3512
3513         rte_intr_enable(intr_handle);
3514
3515         return 0;
3516 }
3517
3518 static void
3519 ice_get_init_link_status(struct rte_eth_dev *dev)
3520 {
3521         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3522         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3523         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3524         struct ice_link_status link_status;
3525         int ret;
3526
3527         ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3528                                    &link_status, NULL);
3529         if (ret != ICE_SUCCESS) {
3530                 PMD_DRV_LOG(ERR, "Failed to get link info");
3531                 pf->init_link_up = false;
3532                 return;
3533         }
3534
3535         if (link_status.link_info & ICE_AQ_LINK_UP)
3536                 pf->init_link_up = true;
3537 }
3538
3539 static int
3540 ice_pps_out_cfg(struct ice_hw *hw, int idx, int timer)
3541 {
3542         uint64_t current_time, start_time;
3543         uint32_t hi, lo, lo2, func, val;
3544
3545         lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3546         hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer));
3547         lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3548
3549         if (lo2 < lo) {
3550                 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3551                 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer));
3552         }
3553
3554         current_time = ((uint64_t)hi << 32) | lo;
3555
3556         start_time = (current_time + NSEC_PER_SEC) /
3557                         NSEC_PER_SEC * NSEC_PER_SEC;
3558         start_time = start_time - PPS_OUT_DELAY_NS;
3559
3560         func = 8 + idx + timer * 4;
3561         val = GLGEN_GPIO_CTL_PIN_DIR_M |
3562                 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
3563                 GLGEN_GPIO_CTL_PIN_FUNC_M);
3564
3565         /* Write clkout with half of period value */
3566         ICE_WRITE_REG(hw, GLTSYN_CLKO(idx, timer), NSEC_PER_SEC / 2);
3567
3568         /* Write TARGET time register */
3569         ICE_WRITE_REG(hw, GLTSYN_TGT_L(idx, timer), start_time & 0xffffffff);
3570         ICE_WRITE_REG(hw, GLTSYN_TGT_H(idx, timer), start_time >> 32);
3571
3572         /* Write AUX_OUT register */
3573         ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(idx, timer),
3574                       GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M);
3575
3576         /* Write GPIO CTL register */
3577         ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(idx), val);
3578
3579         return 0;
3580 }
3581
3582 static int
3583 ice_dev_start(struct rte_eth_dev *dev)
3584 {
3585         struct rte_eth_dev_data *data = dev->data;
3586         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3587         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3588         struct ice_vsi *vsi = pf->main_vsi;
3589         struct ice_adapter *ad =
3590                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3591         uint16_t nb_rxq = 0;
3592         uint16_t nb_txq, i;
3593         uint16_t max_frame_size;
3594         int mask, ret;
3595         uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned;
3596         uint32_t pin_idx = ad->devargs.pin_idx;
3597
3598         /* program Tx queues' context in hardware */
3599         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3600                 ret = ice_tx_queue_start(dev, nb_txq);
3601                 if (ret) {
3602                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3603                         goto tx_err;
3604                 }
3605         }
3606
3607         /* program Rx queues' context in hardware*/
3608         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3609                 ret = ice_rx_queue_start(dev, nb_rxq);
3610                 if (ret) {
3611                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3612                         goto rx_err;
3613                 }
3614         }
3615
3616         ice_set_rx_function(dev);
3617         ice_set_tx_function(dev);
3618
3619         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
3620                         RTE_ETH_VLAN_EXTEND_MASK;
3621         ret = ice_vlan_offload_set(dev, mask);
3622         if (ret) {
3623                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3624                 goto rx_err;
3625         }
3626
3627         /* enable Rx interrupt and mapping Rx queue to interrupt vector */
3628         if (ice_rxq_intr_setup(dev))
3629                 return -EIO;
3630
3631         /* Enable receiving broadcast packets and transmitting packets */
3632         ret = ice_set_vsi_promisc(hw, vsi->idx,
3633                                   ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3634                                   ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3635                                   0);
3636         if (ret != ICE_SUCCESS)
3637                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3638
3639         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3640                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3641                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3642                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3643                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3644                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
3645                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3646                                      NULL);
3647         if (ret != ICE_SUCCESS)
3648                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3649
3650         ice_get_init_link_status(dev);
3651
3652         ice_dev_set_link_up(dev);
3653
3654         /* Call get_link_info aq command to enable/disable LSE */
3655         ice_link_update(dev, 1);
3656
3657         pf->adapter_stopped = false;
3658
3659         /* Set the max frame size to default value*/
3660         max_frame_size = pf->dev_data->mtu ?
3661                 pf->dev_data->mtu + ICE_ETH_OVERHEAD :
3662                 ICE_FRAME_SIZE_MAX;
3663
3664         /* Set the max frame size to HW*/
3665         ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3666
3667         if (ad->devargs.pps_out_ena) {
3668                 ret = ice_pps_out_cfg(hw, pin_idx, timer);
3669                 if (ret) {
3670                         PMD_DRV_LOG(ERR, "Fail to configure 1pps out");
3671                         goto rx_err;
3672                 }
3673         }
3674
3675         return 0;
3676
3677         /* stop the started queues if failed to start all queues */
3678 rx_err:
3679         for (i = 0; i < nb_rxq; i++)
3680                 ice_rx_queue_stop(dev, i);
3681 tx_err:
3682         for (i = 0; i < nb_txq; i++)
3683                 ice_tx_queue_stop(dev, i);
3684
3685         return -EIO;
3686 }
3687
3688 static int
3689 ice_dev_reset(struct rte_eth_dev *dev)
3690 {
3691         int ret;
3692
3693         if (dev->data->sriov.active)
3694                 return -ENOTSUP;
3695
3696         ret = ice_dev_uninit(dev);
3697         if (ret) {
3698                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3699                 return -ENXIO;
3700         }
3701
3702         ret = ice_dev_init(dev);
3703         if (ret) {
3704                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3705                 return -ENXIO;
3706         }
3707
3708         return 0;
3709 }
3710
3711 static int
3712 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3713 {
3714         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3715         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3716         struct ice_vsi *vsi = pf->main_vsi;
3717         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3718         bool is_safe_mode = pf->adapter->is_safe_mode;
3719         u64 phy_type_low;
3720         u64 phy_type_high;
3721
3722         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3723         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3724         dev_info->max_rx_queues = vsi->nb_qps;
3725         dev_info->max_tx_queues = vsi->nb_qps;
3726         dev_info->max_mac_addrs = vsi->max_macaddrs;
3727         dev_info->max_vfs = pci_dev->max_vfs;
3728         dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3729         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3730
3731         dev_info->rx_offload_capa =
3732                 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
3733                 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
3734                 RTE_ETH_RX_OFFLOAD_SCATTER |
3735                 RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3736         dev_info->tx_offload_capa =
3737                 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
3738                 RTE_ETH_TX_OFFLOAD_TCP_TSO |
3739                 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
3740                 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
3741         dev_info->flow_type_rss_offloads = 0;
3742
3743         if (!is_safe_mode) {
3744                 dev_info->rx_offload_capa |=
3745                         RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
3746                         RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
3747                         RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
3748                         RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
3749                         RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3750                         RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
3751                         RTE_ETH_RX_OFFLOAD_RSS_HASH |
3752                         RTE_ETH_RX_OFFLOAD_TIMESTAMP;
3753                 dev_info->tx_offload_capa |=
3754                         RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
3755                         RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
3756                         RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
3757                         RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
3758                         RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
3759                         RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3760                         RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
3761                 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3762         }
3763
3764         dev_info->rx_queue_offload_capa = 0;
3765         dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
3766
3767         dev_info->reta_size = pf->hash_lut_size;
3768         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3769
3770         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3771                 .rx_thresh = {
3772                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
3773                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
3774                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
3775                 },
3776                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3777                 .rx_drop_en = 0,
3778                 .offloads = 0,
3779         };
3780
3781         dev_info->default_txconf = (struct rte_eth_txconf) {
3782                 .tx_thresh = {
3783                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
3784                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
3785                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
3786                 },
3787                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3788                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3789                 .offloads = 0,
3790         };
3791
3792         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3793                 .nb_max = ICE_MAX_RING_DESC,
3794                 .nb_min = ICE_MIN_RING_DESC,
3795                 .nb_align = ICE_ALIGN_RING_DESC,
3796         };
3797
3798         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3799                 .nb_max = ICE_MAX_RING_DESC,
3800                 .nb_min = ICE_MIN_RING_DESC,
3801                 .nb_align = ICE_ALIGN_RING_DESC,
3802         };
3803
3804         dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
3805                                RTE_ETH_LINK_SPEED_100M |
3806                                RTE_ETH_LINK_SPEED_1G |
3807                                RTE_ETH_LINK_SPEED_2_5G |
3808                                RTE_ETH_LINK_SPEED_5G |
3809                                RTE_ETH_LINK_SPEED_10G |
3810                                RTE_ETH_LINK_SPEED_20G |
3811                                RTE_ETH_LINK_SPEED_25G;
3812
3813         phy_type_low = hw->port_info->phy.phy_type_low;
3814         phy_type_high = hw->port_info->phy.phy_type_high;
3815
3816         if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3817                 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
3818
3819         if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3820                         ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3821                 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
3822
3823         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3824         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3825
3826         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3827         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3828         dev_info->default_rxportconf.nb_queues = 1;
3829         dev_info->default_txportconf.nb_queues = 1;
3830         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3831         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3832
3833         return 0;
3834 }
3835
3836 static inline int
3837 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3838                             struct rte_eth_link *link)
3839 {
3840         struct rte_eth_link *dst = link;
3841         struct rte_eth_link *src = &dev->data->dev_link;
3842
3843         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3844                                 *(uint64_t *)src) == 0)
3845                 return -1;
3846
3847         return 0;
3848 }
3849
3850 static inline int
3851 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3852                              struct rte_eth_link *link)
3853 {
3854         struct rte_eth_link *dst = &dev->data->dev_link;
3855         struct rte_eth_link *src = link;
3856
3857         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3858                                 *(uint64_t *)src) == 0)
3859                 return -1;
3860
3861         return 0;
3862 }
3863
3864 static int
3865 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3866 {
3867 #define CHECK_INTERVAL 100  /* 100ms */
3868 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
3869         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3870         struct ice_link_status link_status;
3871         struct rte_eth_link link, old;
3872         int status;
3873         unsigned int rep_cnt = MAX_REPEAT_TIME;
3874         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3875
3876         memset(&link, 0, sizeof(link));
3877         memset(&old, 0, sizeof(old));
3878         memset(&link_status, 0, sizeof(link_status));
3879         ice_atomic_read_link_status(dev, &old);
3880
3881         do {
3882                 /* Get link status information from hardware */
3883                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
3884                                               &link_status, NULL);
3885                 if (status != ICE_SUCCESS) {
3886                         link.link_speed = RTE_ETH_SPEED_NUM_100M;
3887                         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3888                         PMD_DRV_LOG(ERR, "Failed to get link info");
3889                         goto out;
3890                 }
3891
3892                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3893                 if (!wait_to_complete || link.link_status)
3894                         break;
3895
3896                 rte_delay_ms(CHECK_INTERVAL);
3897         } while (--rep_cnt);
3898
3899         if (!link.link_status)
3900                 goto out;
3901
3902         /* Full-duplex operation at all supported speeds */
3903         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3904
3905         /* Parse the link status */
3906         switch (link_status.link_speed) {
3907         case ICE_AQ_LINK_SPEED_10MB:
3908                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
3909                 break;
3910         case ICE_AQ_LINK_SPEED_100MB:
3911                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
3912                 break;
3913         case ICE_AQ_LINK_SPEED_1000MB:
3914                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
3915                 break;
3916         case ICE_AQ_LINK_SPEED_2500MB:
3917                 link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
3918                 break;
3919         case ICE_AQ_LINK_SPEED_5GB:
3920                 link.link_speed = RTE_ETH_SPEED_NUM_5G;
3921                 break;
3922         case ICE_AQ_LINK_SPEED_10GB:
3923                 link.link_speed = RTE_ETH_SPEED_NUM_10G;
3924                 break;
3925         case ICE_AQ_LINK_SPEED_20GB:
3926                 link.link_speed = RTE_ETH_SPEED_NUM_20G;
3927                 break;
3928         case ICE_AQ_LINK_SPEED_25GB:
3929                 link.link_speed = RTE_ETH_SPEED_NUM_25G;
3930                 break;
3931         case ICE_AQ_LINK_SPEED_40GB:
3932                 link.link_speed = RTE_ETH_SPEED_NUM_40G;
3933                 break;
3934         case ICE_AQ_LINK_SPEED_50GB:
3935                 link.link_speed = RTE_ETH_SPEED_NUM_50G;
3936                 break;
3937         case ICE_AQ_LINK_SPEED_100GB:
3938                 link.link_speed = RTE_ETH_SPEED_NUM_100G;
3939                 break;
3940         case ICE_AQ_LINK_SPEED_UNKNOWN:
3941                 PMD_DRV_LOG(ERR, "Unknown link speed");
3942                 link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
3943                 break;
3944         default:
3945                 PMD_DRV_LOG(ERR, "None link speed");
3946                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
3947                 break;
3948         }
3949
3950         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3951                               RTE_ETH_LINK_SPEED_FIXED);
3952
3953 out:
3954         ice_atomic_write_link_status(dev, &link);
3955         if (link.link_status == old.link_status)
3956                 return -1;
3957
3958         return 0;
3959 }
3960
3961 /* Force the physical link state by getting the current PHY capabilities from
3962  * hardware and setting the PHY config based on the determined capabilities. If
3963  * link changes, link event will be triggered because both the Enable Automatic
3964  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3965  */
3966 static enum ice_status
3967 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3968 {
3969         struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3970         struct ice_aqc_get_phy_caps_data *pcaps;
3971         struct ice_port_info *pi;
3972         enum ice_status status;
3973
3974         if (!hw || !hw->port_info)
3975                 return ICE_ERR_PARAM;
3976
3977         pi = hw->port_info;
3978
3979         pcaps = (struct ice_aqc_get_phy_caps_data *)
3980                 ice_malloc(hw, sizeof(*pcaps));
3981         if (!pcaps)
3982                 return ICE_ERR_NO_MEMORY;
3983
3984         status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3985                                      pcaps, NULL);
3986         if (status)
3987                 goto out;
3988
3989         /* No change in link */
3990         if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3991             link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3992                 goto out;
3993
3994         cfg.phy_type_low = pcaps->phy_type_low;
3995         cfg.phy_type_high = pcaps->phy_type_high;
3996         cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3997         cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3998         cfg.eee_cap = pcaps->eee_cap;
3999         cfg.eeer_value = pcaps->eeer_value;
4000         cfg.link_fec_opt = pcaps->link_fec_options;
4001         if (link_up)
4002                 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
4003         else
4004                 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
4005
4006         status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
4007
4008 out:
4009         ice_free(hw, pcaps);
4010         return status;
4011 }
4012
4013 static int
4014 ice_dev_set_link_up(struct rte_eth_dev *dev)
4015 {
4016         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4017
4018         return ice_force_phys_link_state(hw, true);
4019 }
4020
4021 static int
4022 ice_dev_set_link_down(struct rte_eth_dev *dev)
4023 {
4024         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4025
4026         return ice_force_phys_link_state(hw, false);
4027 }
4028
4029 static int
4030 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
4031 {
4032         /* mtu setting is forbidden if port is start */
4033         if (dev->data->dev_started != 0) {
4034                 PMD_DRV_LOG(ERR,
4035                             "port %d must be stopped before configuration",
4036                             dev->data->port_id);
4037                 return -EBUSY;
4038         }
4039
4040         return 0;
4041 }
4042
4043 static int ice_macaddr_set(struct rte_eth_dev *dev,
4044                            struct rte_ether_addr *mac_addr)
4045 {
4046         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4047         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4048         struct ice_vsi *vsi = pf->main_vsi;
4049         struct ice_mac_filter *f;
4050         uint8_t flags = 0;
4051         int ret;
4052
4053         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
4054                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
4055                 return -EINVAL;
4056         }
4057
4058         TAILQ_FOREACH(f, &vsi->mac_list, next) {
4059                 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
4060                         break;
4061         }
4062
4063         if (!f) {
4064                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
4065                 return -EIO;
4066         }
4067
4068         ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
4069         if (ret != ICE_SUCCESS) {
4070                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
4071                 return -EIO;
4072         }
4073         ret = ice_add_mac_filter(vsi, mac_addr);
4074         if (ret != ICE_SUCCESS) {
4075                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
4076                 return -EIO;
4077         }
4078         rte_ether_addr_copy(mac_addr, &pf->dev_addr);
4079
4080         flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
4081         ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
4082         if (ret != ICE_SUCCESS)
4083                 PMD_DRV_LOG(ERR, "Failed to set manage mac");
4084
4085         return 0;
4086 }
4087
4088 /* Add a MAC address, and update filters */
4089 static int
4090 ice_macaddr_add(struct rte_eth_dev *dev,
4091                 struct rte_ether_addr *mac_addr,
4092                 __rte_unused uint32_t index,
4093                 __rte_unused uint32_t pool)
4094 {
4095         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4096         struct ice_vsi *vsi = pf->main_vsi;
4097         int ret;
4098
4099         ret = ice_add_mac_filter(vsi, mac_addr);
4100         if (ret != ICE_SUCCESS) {
4101                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
4102                 return -EINVAL;
4103         }
4104
4105         return ICE_SUCCESS;
4106 }
4107
4108 /* Remove a MAC address, and update filters */
4109 static void
4110 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4111 {
4112         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4113         struct ice_vsi *vsi = pf->main_vsi;
4114         struct rte_eth_dev_data *data = dev->data;
4115         struct rte_ether_addr *macaddr;
4116         int ret;
4117
4118         macaddr = &data->mac_addrs[index];
4119         ret = ice_remove_mac_filter(vsi, macaddr);
4120         if (ret) {
4121                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
4122                 return;
4123         }
4124 }
4125
4126 static int
4127 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
4128 {
4129         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4130         struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id);
4131         struct ice_vsi *vsi = pf->main_vsi;
4132         int ret;
4133
4134         PMD_INIT_FUNC_TRACE();
4135
4136         /**
4137          * Vlan 0 is the generic filter for untagged packets
4138          * and can't be removed or added by user.
4139          */
4140         if (vlan_id == 0)
4141                 return 0;
4142
4143         if (on) {
4144                 ret = ice_add_vlan_filter(vsi, &vlan);
4145                 if (ret < 0) {
4146                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
4147                         return -EINVAL;
4148                 }
4149         } else {
4150                 ret = ice_remove_vlan_filter(vsi, &vlan);
4151                 if (ret < 0) {
4152                         PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
4153                         return -EINVAL;
4154                 }
4155         }
4156
4157         return 0;
4158 }
4159
4160 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are
4161  * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8)
4162  * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
4163  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
4164  *
4165  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
4166  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
4167  * traffic in SVM, since the VLAN TPID isn't part of filtering.
4168  *
4169  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
4170  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
4171  * part of filtering.
4172  */
4173 static int
4174 ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
4175 {
4176         struct ice_vlan vlan;
4177         int err;
4178
4179         vlan = ICE_VLAN(0, 0);
4180         err = ice_add_vlan_filter(vsi, &vlan);
4181         if (err) {
4182                 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0");
4183                 return err;
4184         }
4185
4186         /* in SVM both VLAN 0 filters are identical */
4187         if (!ice_is_dvm_ena(&vsi->adapter->hw))
4188                 return 0;
4189
4190         vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4191         err = ice_add_vlan_filter(vsi, &vlan);
4192         if (err) {
4193                 PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode");
4194                 return err;
4195         }
4196
4197         return 0;
4198 }
4199
4200 /*
4201  * Delete the VLAN 0 filters in the same manner that they were added in
4202  * ice_vsi_add_vlan_zero.
4203  */
4204 static int
4205 ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
4206 {
4207         struct ice_vlan vlan;
4208         int err;
4209
4210         vlan = ICE_VLAN(0, 0);
4211         err = ice_remove_vlan_filter(vsi, &vlan);
4212         if (err) {
4213                 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0");
4214                 return err;
4215         }
4216
4217         /* in SVM both VLAN 0 filters are identical */
4218         if (!ice_is_dvm_ena(&vsi->adapter->hw))
4219                 return 0;
4220
4221         vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4222         err = ice_remove_vlan_filter(vsi, &vlan);
4223         if (err) {
4224                 PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode");
4225                 return err;
4226         }
4227
4228         return 0;
4229 }
4230
4231 /* Configure vlan filter on or off */
4232 static int
4233 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
4234 {
4235         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4236         struct ice_vsi_ctx ctxt;
4237         uint8_t sw_flags2;
4238         int ret = 0;
4239
4240         sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4241
4242         if (on)
4243                 vsi->info.sw_flags2 |= sw_flags2;
4244         else
4245                 vsi->info.sw_flags2 &= ~sw_flags2;
4246
4247         vsi->info.sw_id = hw->port_info->sw_id;
4248         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4249         ctxt.info.valid_sections =
4250                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4251                                  ICE_AQ_VSI_PROP_SECURITY_VALID);
4252         ctxt.vsi_num = vsi->vsi_id;
4253
4254         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4255         if (ret) {
4256                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
4257                             on ? "enable" : "disable");
4258                 return -EINVAL;
4259         } else {
4260                 vsi->info.valid_sections |=
4261                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4262                                          ICE_AQ_VSI_PROP_SECURITY_VALID);
4263         }
4264
4265         /* consist with other drivers, allow untagged packet when vlan filter on */
4266         if (on)
4267                 ret = ice_vsi_add_vlan_zero(vsi);
4268         else
4269                 ret = ice_vsi_del_vlan_zero(vsi);
4270
4271         return 0;
4272 }
4273
4274 /* Manage VLAN stripping for the VSI for Rx */
4275 static int
4276 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4277 {
4278         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4279         struct ice_vsi_ctx ctxt;
4280         enum ice_status status;
4281         int err = 0;
4282
4283         /* do not allow modifying VLAN stripping when a port VLAN is configured
4284          * on this VSI
4285          */
4286         if (vsi->info.port_based_inner_vlan)
4287                 return 0;
4288
4289         memset(&ctxt, 0, sizeof(ctxt));
4290
4291         if (ena)
4292                 /* Strip VLAN tag from Rx packet and put it in the desc */
4293                 ctxt.info.inner_vlan_flags =
4294                                         ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
4295         else
4296                 /* Disable stripping. Leave tag in packet */
4297                 ctxt.info.inner_vlan_flags =
4298                                         ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4299
4300         /* Allow all packets untagged/tagged */
4301         ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
4302
4303         ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4304
4305         status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4306         if (status) {
4307                 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping",
4308                             ena ? "enable" : "disable");
4309                 err = -EIO;
4310         } else {
4311                 vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags;
4312         }
4313
4314         return err;
4315 }
4316
4317 static int
4318 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi)
4319 {
4320         return ice_vsi_manage_vlan_stripping(vsi, true);
4321 }
4322
4323 static int
4324 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
4325 {
4326         return ice_vsi_manage_vlan_stripping(vsi, false);
4327 }
4328
4329 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi)
4330 {
4331         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4332         struct ice_vsi_ctx ctxt;
4333         enum ice_status status;
4334         int err = 0;
4335
4336         /* do not allow modifying VLAN stripping when a port VLAN is configured
4337          * on this VSI
4338          */
4339         if (vsi->info.port_based_outer_vlan)
4340                 return 0;
4341
4342         memset(&ctxt, 0, sizeof(ctxt));
4343
4344         ctxt.info.valid_sections =
4345                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4346         /* clear current outer VLAN strip settings */
4347         ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4348                 ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
4349         ctxt.info.outer_vlan_flags |=
4350                 (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
4351                  ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
4352                 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
4353                  ICE_AQ_VSI_OUTER_TAG_TYPE_S);
4354
4355         status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4356         if (status) {
4357                 PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping");
4358                 err = -EIO;
4359         } else {
4360                 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4361         }
4362
4363         return err;
4364 }
4365
4366 static int
4367 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
4368 {
4369         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4370         struct ice_vsi_ctx ctxt;
4371         enum ice_status status;
4372         int err = 0;
4373
4374         if (vsi->info.port_based_outer_vlan)
4375                 return 0;
4376
4377         memset(&ctxt, 0, sizeof(ctxt));
4378
4379         ctxt.info.valid_sections =
4380                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4381         /* clear current outer VLAN strip settings */
4382         ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4383                 ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
4384         ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
4385                 ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
4386
4387         status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4388         if (status) {
4389                 PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping");
4390                 err = -EIO;
4391         } else {
4392                 vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4393         }
4394
4395         return err;
4396 }
4397
4398 static int
4399 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena)
4400 {
4401         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4402         int ret;
4403
4404         if (ice_is_dvm_ena(hw)) {
4405                 if (ena)
4406                         ret = ice_vsi_ena_outer_stripping(vsi);
4407                 else
4408                         ret = ice_vsi_dis_outer_stripping(vsi);
4409         } else {
4410                 if (ena)
4411                         ret = ice_vsi_ena_inner_stripping(vsi);
4412                 else
4413                         ret = ice_vsi_dis_inner_stripping(vsi);
4414         }
4415
4416         return ret;
4417 }
4418
4419 static int
4420 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4421 {
4422         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4423         struct ice_vsi *vsi = pf->main_vsi;
4424         struct rte_eth_rxmode *rxmode;
4425
4426         rxmode = &dev->data->dev_conf.rxmode;
4427         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
4428                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
4429                         ice_vsi_config_vlan_filter(vsi, true);
4430                 else
4431                         ice_vsi_config_vlan_filter(vsi, false);
4432         }
4433
4434         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
4435                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4436                         ice_vsi_config_vlan_stripping(vsi, true);
4437                 else
4438                         ice_vsi_config_vlan_stripping(vsi, false);
4439         }
4440
4441         return 0;
4442 }
4443
4444 static int
4445 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4446 {
4447         struct ice_aq_get_set_rss_lut_params lut_params;
4448         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4449         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4450         int ret;
4451
4452         if (!lut)
4453                 return -EINVAL;
4454
4455         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4456                 lut_params.vsi_handle = vsi->idx;
4457                 lut_params.lut_size = lut_size;
4458                 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4459                 lut_params.lut = lut;
4460                 lut_params.global_lut_id = 0;
4461                 ret = ice_aq_get_rss_lut(hw, &lut_params);
4462                 if (ret) {
4463                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4464                         return -EINVAL;
4465                 }
4466         } else {
4467                 uint64_t *lut_dw = (uint64_t *)lut;
4468                 uint16_t i, lut_size_dw = lut_size / 4;
4469
4470                 for (i = 0; i < lut_size_dw; i++)
4471                         lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4472         }
4473
4474         return 0;
4475 }
4476
4477 static int
4478 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4479 {
4480         struct ice_aq_get_set_rss_lut_params lut_params;
4481         struct ice_pf *pf;
4482         struct ice_hw *hw;
4483         int ret;
4484
4485         if (!vsi || !lut)
4486                 return -EINVAL;
4487
4488         pf = ICE_VSI_TO_PF(vsi);
4489         hw = ICE_VSI_TO_HW(vsi);
4490
4491         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4492                 lut_params.vsi_handle = vsi->idx;
4493                 lut_params.lut_size = lut_size;
4494                 lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4495                 lut_params.lut = lut;
4496                 lut_params.global_lut_id = 0;
4497                 ret = ice_aq_set_rss_lut(hw, &lut_params);
4498                 if (ret) {
4499                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4500                         return -EINVAL;
4501                 }
4502         } else {
4503                 uint64_t *lut_dw = (uint64_t *)lut;
4504                 uint16_t i, lut_size_dw = lut_size / 4;
4505
4506                 for (i = 0; i < lut_size_dw; i++)
4507                         ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4508
4509                 ice_flush(hw);
4510         }
4511
4512         return 0;
4513 }
4514
4515 static int
4516 ice_rss_reta_update(struct rte_eth_dev *dev,
4517                     struct rte_eth_rss_reta_entry64 *reta_conf,
4518                     uint16_t reta_size)
4519 {
4520         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4521         uint16_t i, lut_size = pf->hash_lut_size;
4522         uint16_t idx, shift;
4523         uint8_t *lut;
4524         int ret;
4525
4526         if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4527             reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4528             reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4529                 PMD_DRV_LOG(ERR,
4530                             "The size of hash lookup table configured (%d)"
4531                             "doesn't match the number hardware can "
4532                             "supported (128, 512, 2048)",
4533                             reta_size);
4534                 return -EINVAL;
4535         }
4536
4537         /* It MUST use the current LUT size to get the RSS lookup table,
4538          * otherwise if will fail with -100 error code.
4539          */
4540         lut = rte_zmalloc(NULL,  RTE_MAX(reta_size, lut_size), 0);
4541         if (!lut) {
4542                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4543                 return -ENOMEM;
4544         }
4545         ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4546         if (ret)
4547                 goto out;
4548
4549         for (i = 0; i < reta_size; i++) {
4550                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
4551                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
4552                 if (reta_conf[idx].mask & (1ULL << shift))
4553                         lut[i] = reta_conf[idx].reta[shift];
4554         }
4555         ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4556         if (ret == 0 && lut_size != reta_size) {
4557                 PMD_DRV_LOG(INFO,
4558                             "The size of hash lookup table is changed from (%d) to (%d)",
4559                             lut_size, reta_size);
4560                 pf->hash_lut_size = reta_size;
4561         }
4562
4563 out:
4564         rte_free(lut);
4565
4566         return ret;
4567 }
4568
4569 static int
4570 ice_rss_reta_query(struct rte_eth_dev *dev,
4571                    struct rte_eth_rss_reta_entry64 *reta_conf,
4572                    uint16_t reta_size)
4573 {
4574         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4575         uint16_t i, lut_size = pf->hash_lut_size;
4576         uint16_t idx, shift;
4577         uint8_t *lut;
4578         int ret;
4579
4580         if (reta_size != lut_size) {
4581                 PMD_DRV_LOG(ERR,
4582                             "The size of hash lookup table configured (%d)"
4583                             "doesn't match the number hardware can "
4584                             "supported (%d)",
4585                             reta_size, lut_size);
4586                 return -EINVAL;
4587         }
4588
4589         lut = rte_zmalloc(NULL, reta_size, 0);
4590         if (!lut) {
4591                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4592                 return -ENOMEM;
4593         }
4594
4595         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4596         if (ret)
4597                 goto out;
4598
4599         for (i = 0; i < reta_size; i++) {
4600                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
4601                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
4602                 if (reta_conf[idx].mask & (1ULL << shift))
4603                         reta_conf[idx].reta[shift] = lut[i];
4604         }
4605
4606 out:
4607         rte_free(lut);
4608
4609         return ret;
4610 }
4611
4612 static int
4613 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4614 {
4615         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4616         int ret = 0;
4617
4618         if (!key || key_len == 0) {
4619                 PMD_DRV_LOG(DEBUG, "No key to be configured");
4620                 return 0;
4621         } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4622                    sizeof(uint32_t)) {
4623                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4624                 return -EINVAL;
4625         }
4626
4627         struct ice_aqc_get_set_rss_keys *key_dw =
4628                 (struct ice_aqc_get_set_rss_keys *)key;
4629
4630         ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4631         if (ret) {
4632                 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4633                 ret = -EINVAL;
4634         }
4635
4636         return ret;
4637 }
4638
4639 static int
4640 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4641 {
4642         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4643         int ret;
4644
4645         if (!key || !key_len)
4646                 return -EINVAL;
4647
4648         ret = ice_aq_get_rss_key
4649                 (hw, vsi->idx,
4650                  (struct ice_aqc_get_set_rss_keys *)key);
4651         if (ret) {
4652                 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4653                 return -EINVAL;
4654         }
4655         *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4656
4657         return 0;
4658 }
4659
4660 static int
4661 ice_rss_hash_update(struct rte_eth_dev *dev,
4662                     struct rte_eth_rss_conf *rss_conf)
4663 {
4664         enum ice_status status = ICE_SUCCESS;
4665         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4666         struct ice_vsi *vsi = pf->main_vsi;
4667
4668         /* set hash key */
4669         status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4670         if (status)
4671                 return status;
4672
4673         if (rss_conf->rss_hf == 0) {
4674                 pf->rss_hf = 0;
4675                 return 0;
4676         }
4677
4678         /* RSS hash configuration */
4679         ice_rss_hash_set(pf, rss_conf->rss_hf);
4680
4681         return 0;
4682 }
4683
4684 static int
4685 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4686                       struct rte_eth_rss_conf *rss_conf)
4687 {
4688         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4689         struct ice_vsi *vsi = pf->main_vsi;
4690
4691         ice_get_rss_key(vsi, rss_conf->rss_key,
4692                         &rss_conf->rss_key_len);
4693
4694         rss_conf->rss_hf = pf->rss_hf;
4695         return 0;
4696 }
4697
4698 static int
4699 ice_promisc_enable(struct rte_eth_dev *dev)
4700 {
4701         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4702         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4703         struct ice_vsi *vsi = pf->main_vsi;
4704         enum ice_status status;
4705         uint8_t pmask;
4706         int ret = 0;
4707
4708         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4709                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4710
4711         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4712         switch (status) {
4713         case ICE_ERR_ALREADY_EXISTS:
4714                 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4715         case ICE_SUCCESS:
4716                 break;
4717         default:
4718                 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4719                 ret = -EAGAIN;
4720         }
4721
4722         return ret;
4723 }
4724
4725 static int
4726 ice_promisc_disable(struct rte_eth_dev *dev)
4727 {
4728         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4729         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4730         struct ice_vsi *vsi = pf->main_vsi;
4731         enum ice_status status;
4732         uint8_t pmask;
4733         int ret = 0;
4734
4735         if (dev->data->all_multicast == 1)
4736                 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX;
4737         else
4738                 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4739                         ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4740
4741         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4742         if (status != ICE_SUCCESS) {
4743                 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4744                 ret = -EAGAIN;
4745         }
4746
4747         return ret;
4748 }
4749
4750 static int
4751 ice_allmulti_enable(struct rte_eth_dev *dev)
4752 {
4753         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4754         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4755         struct ice_vsi *vsi = pf->main_vsi;
4756         enum ice_status status;
4757         uint8_t pmask;
4758         int ret = 0;
4759
4760         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4761
4762         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4763
4764         switch (status) {
4765         case ICE_ERR_ALREADY_EXISTS:
4766                 PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4767         case ICE_SUCCESS:
4768                 break;
4769         default:
4770                 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4771                 ret = -EAGAIN;
4772         }
4773
4774         return ret;
4775 }
4776
4777 static int
4778 ice_allmulti_disable(struct rte_eth_dev *dev)
4779 {
4780         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4781         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4782         struct ice_vsi *vsi = pf->main_vsi;
4783         enum ice_status status;
4784         uint8_t pmask;
4785         int ret = 0;
4786
4787         if (dev->data->promiscuous == 1)
4788                 return 0; /* must remain in all_multicast mode */
4789
4790         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4791
4792         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4793         if (status != ICE_SUCCESS) {
4794                 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4795                 ret = -EAGAIN;
4796         }
4797
4798         return ret;
4799 }
4800
4801 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4802                                     uint16_t queue_id)
4803 {
4804         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4805         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
4806         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4807         uint32_t val;
4808         uint16_t msix_intr;
4809
4810         msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
4811
4812         val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4813               GLINT_DYN_CTL_ITR_INDX_M;
4814         val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4815
4816         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4817         rte_intr_ack(pci_dev->intr_handle);
4818
4819         return 0;
4820 }
4821
4822 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4823                                      uint16_t queue_id)
4824 {
4825         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4826         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
4827         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4828         uint16_t msix_intr;
4829
4830         msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
4831
4832         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4833
4834         return 0;
4835 }
4836
4837 static int
4838 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4839 {
4840         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4841         u8 ver, patch;
4842         u16 build;
4843         int ret;
4844
4845         ver = hw->flash.orom.major;
4846         patch = hw->flash.orom.patch;
4847         build = hw->flash.orom.build;
4848
4849         ret = snprintf(fw_version, fw_size,
4850                         "%x.%02x 0x%08x %d.%d.%d",
4851                         hw->flash.nvm.major,
4852                         hw->flash.nvm.minor,
4853                         hw->flash.nvm.eetrack,
4854                         ver, build, patch);
4855         if (ret < 0)
4856                 return -EINVAL;
4857
4858         /* add the size of '\0' */
4859         ret += 1;
4860         if (fw_size < (size_t)ret)
4861                 return ret;
4862         else
4863                 return 0;
4864 }
4865
4866 static int
4867 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4868 {
4869         struct ice_hw *hw;
4870         struct ice_vsi_ctx ctxt;
4871         uint8_t vlan_flags = 0;
4872         int ret;
4873
4874         if (!vsi || !info) {
4875                 PMD_DRV_LOG(ERR, "invalid parameters");
4876                 return -EINVAL;
4877         }
4878
4879         if (info->on) {
4880                 vsi->info.port_based_inner_vlan = info->config.pvid;
4881                 /**
4882                  * If insert pvid is enabled, only tagged pkts are
4883                  * allowed to be sent out.
4884                  */
4885                 vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4886                              ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4887         } else {
4888                 vsi->info.port_based_inner_vlan = 0;
4889                 if (info->config.reject.tagged == 0)
4890                         vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED;
4891
4892                 if (info->config.reject.untagged == 0)
4893                         vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4894         }
4895         vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4896                                   ICE_AQ_VSI_INNER_VLAN_EMODE_M);
4897         vsi->info.inner_vlan_flags |= vlan_flags;
4898         memset(&ctxt, 0, sizeof(ctxt));
4899         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4900         ctxt.info.valid_sections =
4901                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4902         ctxt.vsi_num = vsi->vsi_id;
4903
4904         hw = ICE_VSI_TO_HW(vsi);
4905         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4906         if (ret != ICE_SUCCESS) {
4907                 PMD_DRV_LOG(ERR,
4908                             "update VSI for VLAN insert failed, err %d",
4909                             ret);
4910                 return -EINVAL;
4911         }
4912
4913         vsi->info.valid_sections |=
4914                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4915
4916         return ret;
4917 }
4918
4919 static int
4920 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4921 {
4922         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4923         struct ice_vsi *vsi = pf->main_vsi;
4924         struct rte_eth_dev_data *data = pf->dev_data;
4925         struct ice_vsi_vlan_pvid_info info;
4926         int ret;
4927
4928         memset(&info, 0, sizeof(info));
4929         info.on = on;
4930         if (info.on) {
4931                 info.config.pvid = pvid;
4932         } else {
4933                 info.config.reject.tagged =
4934                         data->dev_conf.txmode.hw_vlan_reject_tagged;
4935                 info.config.reject.untagged =
4936                         data->dev_conf.txmode.hw_vlan_reject_untagged;
4937         }
4938
4939         ret = ice_vsi_vlan_pvid_set(vsi, &info);
4940         if (ret < 0) {
4941                 PMD_DRV_LOG(ERR, "Failed to set pvid.");
4942                 return -EINVAL;
4943         }
4944
4945         return 0;
4946 }
4947
4948 static int
4949 ice_get_eeprom_length(struct rte_eth_dev *dev)
4950 {
4951         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4952
4953         return hw->flash.flash_size;
4954 }
4955
4956 static int
4957 ice_get_eeprom(struct rte_eth_dev *dev,
4958                struct rte_dev_eeprom_info *eeprom)
4959 {
4960         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4961         enum ice_status status = ICE_SUCCESS;
4962         uint8_t *data = eeprom->data;
4963
4964         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4965
4966         status = ice_acquire_nvm(hw, ICE_RES_READ);
4967         if (status) {
4968                 PMD_DRV_LOG(ERR, "acquire nvm failed.");
4969                 return -EIO;
4970         }
4971
4972         status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4973                                    data, false);
4974
4975         ice_release_nvm(hw);
4976
4977         if (status) {
4978                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
4979                 return -EIO;
4980         }
4981
4982         return 0;
4983 }
4984
4985 static int
4986 ice_get_module_info(struct rte_eth_dev *dev,
4987                     struct rte_eth_dev_module_info *modinfo)
4988 {
4989         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4990         enum ice_status status;
4991         u8 sff8472_comp = 0;
4992         u8 sff8472_swap = 0;
4993         u8 sff8636_rev = 0;
4994         u8 value = 0;
4995
4996         status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
4997                                    0, &value, 1, 0, NULL);
4998         if (status)
4999                 return -EIO;
5000
5001         switch (value) {
5002         case ICE_MODULE_TYPE_SFP:
5003                 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
5004                                            ICE_MODULE_SFF_8472_COMP, 0x00, 0,
5005                                            &sff8472_comp, 1, 0, NULL);
5006                 if (status)
5007                         return -EIO;
5008                 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
5009                                            ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
5010                                            &sff8472_swap, 1, 0, NULL);
5011                 if (status)
5012                         return -EIO;
5013
5014                 if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
5015                         modinfo->type = ICE_MODULE_SFF_8079;
5016                         modinfo->eeprom_len = ICE_MODULE_SFF_8079_LEN;
5017                 } else if (sff8472_comp &&
5018                            (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) {
5019                         modinfo->type = ICE_MODULE_SFF_8472;
5020                         modinfo->eeprom_len = ICE_MODULE_SFF_8472_LEN;
5021                 } else {
5022                         modinfo->type = ICE_MODULE_SFF_8079;
5023                         modinfo->eeprom_len = ICE_MODULE_SFF_8079_LEN;
5024                 }
5025                 break;
5026         case ICE_MODULE_TYPE_QSFP_PLUS:
5027         case ICE_MODULE_TYPE_QSFP28:
5028                 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
5029                                            ICE_MODULE_REVISION_ADDR, 0x00, 0,
5030                                            &sff8636_rev, 1, 0, NULL);
5031                 if (status)
5032                         return -EIO;
5033                 /* Check revision compliance */
5034                 if (sff8636_rev > 0x02) {
5035                         /* Module is SFF-8636 compliant */
5036                         modinfo->type = ICE_MODULE_SFF_8636;
5037                         modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
5038                 } else {
5039                         modinfo->type = ICE_MODULE_SFF_8436;
5040                         modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
5041                 }
5042                 break;
5043         default:
5044                 PMD_DRV_LOG(WARNING, "SFF Module Type not recognized.\n");
5045                 return -EINVAL;
5046         }
5047         return 0;
5048 }
5049
5050 static int
5051 ice_get_module_eeprom(struct rte_eth_dev *dev,
5052                       struct rte_dev_eeprom_info *info)
5053 {
5054         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5055 #define SFF_READ_BLOCK_SIZE 8
5056 #define I2C_BUSY_TRY_TIMES 4
5057 #define I2C_USLEEP_MIN_TIME 1500
5058 #define I2C_USLEEP_MAX_TIME 2500
5059         uint8_t value[SFF_READ_BLOCK_SIZE] = {0};
5060         uint8_t addr = ICE_I2C_EEPROM_DEV_ADDR;
5061         uint8_t *data = NULL;
5062         enum ice_status status;
5063         bool is_sfp = false;
5064         uint32_t i, j;
5065         uint32_t offset = 0;
5066         uint8_t page = 0;
5067
5068         if (!info || !info->length || !info->data)
5069                 return -EINVAL;
5070
5071         status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
5072                                    NULL);
5073         if (status)
5074                 return -EIO;
5075
5076         if (value[0] == ICE_MODULE_TYPE_SFP)
5077                 is_sfp = true;
5078
5079         data = info->data;
5080         memset(data, 0, info->length);
5081         for (i = 0; i < info->length; i += SFF_READ_BLOCK_SIZE) {
5082                 offset = i + info->offset;
5083                 page = 0;
5084
5085                 /* Check if we need to access the other memory page */
5086                 if (is_sfp) {
5087                         if (offset >= ICE_MODULE_SFF_8079_LEN) {
5088                                 offset -= ICE_MODULE_SFF_8079_LEN;
5089                                 addr = ICE_I2C_EEPROM_DEV_ADDR2;
5090                         }
5091                 } else {
5092                         while (offset >= ICE_MODULE_SFF_8436_LEN) {
5093                                 /* Compute memory page number and offset. */
5094                                 offset -= ICE_MODULE_SFF_8436_LEN / 2;
5095                                 page++;
5096                         }
5097                 }
5098
5099                 /* Bit 2 of eeprom address 0x02 declares upper
5100                  * pages are disabled on QSFP modules.
5101                  * SFP modules only ever use page 0.
5102                  */
5103                 if (page == 0 || !(data[0x2] & 0x4)) {
5104                         /* If i2c bus is busy due to slow page change or
5105                          * link management access, call can fail.
5106                          * This is normal. So we retry this a few times.
5107                          */
5108                         for (j = 0; j < I2C_BUSY_TRY_TIMES; j++) {
5109                                 status = ice_aq_sff_eeprom(hw, 0, addr, offset,
5110                                                            page, !is_sfp, value,
5111                                                            SFF_READ_BLOCK_SIZE,
5112                                                            0, NULL);
5113                                 PMD_DRV_LOG(DEBUG, "SFF %02X %02X %02X %X = "
5114                                         "%02X%02X%02X%02X."
5115                                         "%02X%02X%02X%02X (%X)\n",
5116                                         addr, offset, page, is_sfp,
5117                                         value[0], value[1],
5118                                         value[2], value[3],
5119                                         value[4], value[5],
5120                                         value[6], value[7],
5121                                         status);
5122                                 if (status) {
5123                                         usleep_range(I2C_USLEEP_MIN_TIME,
5124                                                      I2C_USLEEP_MAX_TIME);
5125                                         memset(value, 0, SFF_READ_BLOCK_SIZE);
5126                                         continue;
5127                                 }
5128                                 break;
5129                         }
5130
5131                         /* Make sure we have enough room for the new block */
5132                         if ((i + SFF_READ_BLOCK_SIZE) < info->length)
5133                                 memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
5134                 }
5135         }
5136
5137         return 0;
5138 }
5139
5140 static void
5141 ice_stat_update_32(struct ice_hw *hw,
5142                    uint32_t reg,
5143                    bool offset_loaded,
5144                    uint64_t *offset,
5145                    uint64_t *stat)
5146 {
5147         uint64_t new_data;
5148
5149         new_data = (uint64_t)ICE_READ_REG(hw, reg);
5150         if (!offset_loaded)
5151                 *offset = new_data;
5152
5153         if (new_data >= *offset)
5154                 *stat = (uint64_t)(new_data - *offset);
5155         else
5156                 *stat = (uint64_t)((new_data +
5157                                     ((uint64_t)1 << ICE_32_BIT_WIDTH))
5158                                    - *offset);
5159 }
5160
5161 static void
5162 ice_stat_update_40(struct ice_hw *hw,
5163                    uint32_t hireg,
5164                    uint32_t loreg,
5165                    bool offset_loaded,
5166                    uint64_t *offset,
5167                    uint64_t *stat)
5168 {
5169         uint64_t new_data;
5170
5171         new_data = (uint64_t)ICE_READ_REG(hw, loreg);
5172         new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
5173                     ICE_32_BIT_WIDTH;
5174
5175         if (!offset_loaded)
5176                 *offset = new_data;
5177
5178         if (new_data >= *offset)
5179                 *stat = new_data - *offset;
5180         else
5181                 *stat = (uint64_t)((new_data +
5182                                     ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
5183                                    *offset);
5184
5185         *stat &= ICE_40_BIT_MASK;
5186 }
5187
5188 /* Get all the statistics of a VSI */
5189 static void
5190 ice_update_vsi_stats(struct ice_vsi *vsi)
5191 {
5192         struct ice_eth_stats *oes = &vsi->eth_stats_offset;
5193         struct ice_eth_stats *nes = &vsi->eth_stats;
5194         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
5195         int idx = rte_le_to_cpu_16(vsi->vsi_id);
5196
5197         ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
5198                            vsi->offset_loaded, &oes->rx_bytes,
5199                            &nes->rx_bytes);
5200         ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
5201                            vsi->offset_loaded, &oes->rx_unicast,
5202                            &nes->rx_unicast);
5203         ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
5204                            vsi->offset_loaded, &oes->rx_multicast,
5205                            &nes->rx_multicast);
5206         ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
5207                            vsi->offset_loaded, &oes->rx_broadcast,
5208                            &nes->rx_broadcast);
5209         /* enlarge the limitation when rx_bytes overflowed */
5210         if (vsi->offset_loaded) {
5211                 if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
5212                         nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5213                 nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
5214         }
5215         vsi->old_rx_bytes = nes->rx_bytes;
5216         /* exclude CRC bytes */
5217         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
5218                           nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
5219
5220         ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
5221                            &oes->rx_discards, &nes->rx_discards);
5222         /* GLV_REPC not supported */
5223         /* GLV_RMPC not supported */
5224         ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
5225                            &oes->rx_unknown_protocol,
5226                            &nes->rx_unknown_protocol);
5227         ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
5228                            vsi->offset_loaded, &oes->tx_bytes,
5229                            &nes->tx_bytes);
5230         ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
5231                            vsi->offset_loaded, &oes->tx_unicast,
5232                            &nes->tx_unicast);
5233         ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
5234                            vsi->offset_loaded, &oes->tx_multicast,
5235                            &nes->tx_multicast);
5236         ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
5237                            vsi->offset_loaded,  &oes->tx_broadcast,
5238                            &nes->tx_broadcast);
5239         /* GLV_TDPC not supported */
5240         ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
5241                            &oes->tx_errors, &nes->tx_errors);
5242         /* enlarge the limitation when tx_bytes overflowed */
5243         if (vsi->offset_loaded) {
5244                 if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
5245                         nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5246                 nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
5247         }
5248         vsi->old_tx_bytes = nes->tx_bytes;
5249         vsi->offset_loaded = true;
5250
5251         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
5252                     vsi->vsi_id);
5253         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
5254         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
5255         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
5256         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
5257         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
5258         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
5259                     nes->rx_unknown_protocol);
5260         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
5261         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
5262         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
5263         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
5264         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
5265         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
5266         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
5267                     vsi->vsi_id);
5268 }
5269
5270 static void
5271 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
5272 {
5273         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5274         struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
5275
5276         /* Get statistics of struct ice_eth_stats */
5277         ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
5278                            GLPRT_GORCL(hw->port_info->lport),
5279                            pf->offset_loaded, &os->eth.rx_bytes,
5280                            &ns->eth.rx_bytes);
5281         ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
5282                            GLPRT_UPRCL(hw->port_info->lport),
5283                            pf->offset_loaded, &os->eth.rx_unicast,
5284                            &ns->eth.rx_unicast);
5285         ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
5286                            GLPRT_MPRCL(hw->port_info->lport),
5287                            pf->offset_loaded, &os->eth.rx_multicast,
5288                            &ns->eth.rx_multicast);
5289         ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
5290                            GLPRT_BPRCL(hw->port_info->lport),
5291                            pf->offset_loaded, &os->eth.rx_broadcast,
5292                            &ns->eth.rx_broadcast);
5293         ice_stat_update_32(hw, PRTRPB_RDPC,
5294                            pf->offset_loaded, &os->eth.rx_discards,
5295                            &ns->eth.rx_discards);
5296         /* enlarge the limitation when rx_bytes overflowed */
5297         if (pf->offset_loaded) {
5298                 if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
5299                         ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5300                 ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
5301         }
5302         pf->old_rx_bytes = ns->eth.rx_bytes;
5303
5304         /* Workaround: CRC size should not be included in byte statistics,
5305          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
5306          * packet.
5307          */
5308         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
5309                              ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
5310
5311         /* GLPRT_REPC not supported */
5312         /* GLPRT_RMPC not supported */
5313         ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
5314                            pf->offset_loaded,
5315                            &os->eth.rx_unknown_protocol,
5316                            &ns->eth.rx_unknown_protocol);
5317         ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
5318                            GLPRT_GOTCL(hw->port_info->lport),
5319                            pf->offset_loaded, &os->eth.tx_bytes,
5320                            &ns->eth.tx_bytes);
5321         ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
5322                            GLPRT_UPTCL(hw->port_info->lport),
5323                            pf->offset_loaded, &os->eth.tx_unicast,
5324                            &ns->eth.tx_unicast);
5325         ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
5326                            GLPRT_MPTCL(hw->port_info->lport),
5327                            pf->offset_loaded, &os->eth.tx_multicast,
5328                            &ns->eth.tx_multicast);
5329         ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
5330                            GLPRT_BPTCL(hw->port_info->lport),
5331                            pf->offset_loaded, &os->eth.tx_broadcast,
5332                            &ns->eth.tx_broadcast);
5333         /* enlarge the limitation when tx_bytes overflowed */
5334         if (pf->offset_loaded) {
5335                 if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
5336                         ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5337                 ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
5338         }
5339         pf->old_tx_bytes = ns->eth.tx_bytes;
5340         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
5341                              ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
5342
5343         /* GLPRT_TEPC not supported */
5344
5345         /* additional port specific stats */
5346         ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
5347                            pf->offset_loaded, &os->tx_dropped_link_down,
5348                            &ns->tx_dropped_link_down);
5349         ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
5350                            pf->offset_loaded, &os->crc_errors,
5351                            &ns->crc_errors);
5352         ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
5353                            pf->offset_loaded, &os->illegal_bytes,
5354                            &ns->illegal_bytes);
5355         /* GLPRT_ERRBC not supported */
5356         ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
5357                            pf->offset_loaded, &os->mac_local_faults,
5358                            &ns->mac_local_faults);
5359         ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
5360                            pf->offset_loaded, &os->mac_remote_faults,
5361                            &ns->mac_remote_faults);
5362
5363         ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
5364                            pf->offset_loaded, &os->rx_len_errors,
5365                            &ns->rx_len_errors);
5366
5367         ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
5368                            pf->offset_loaded, &os->link_xon_rx,
5369                            &ns->link_xon_rx);
5370         ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
5371                            pf->offset_loaded, &os->link_xoff_rx,
5372                            &ns->link_xoff_rx);
5373         ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
5374                            pf->offset_loaded, &os->link_xon_tx,
5375                            &ns->link_xon_tx);
5376         ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
5377                            pf->offset_loaded, &os->link_xoff_tx,
5378                            &ns->link_xoff_tx);
5379         ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
5380                            GLPRT_PRC64L(hw->port_info->lport),
5381                            pf->offset_loaded, &os->rx_size_64,
5382                            &ns->rx_size_64);
5383         ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
5384                            GLPRT_PRC127L(hw->port_info->lport),
5385                            pf->offset_loaded, &os->rx_size_127,
5386                            &ns->rx_size_127);
5387         ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
5388                            GLPRT_PRC255L(hw->port_info->lport),
5389                            pf->offset_loaded, &os->rx_size_255,
5390                            &ns->rx_size_255);
5391         ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
5392                            GLPRT_PRC511L(hw->port_info->lport),
5393                            pf->offset_loaded, &os->rx_size_511,
5394                            &ns->rx_size_511);
5395         ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
5396                            GLPRT_PRC1023L(hw->port_info->lport),
5397                            pf->offset_loaded, &os->rx_size_1023,
5398                            &ns->rx_size_1023);
5399         ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
5400                            GLPRT_PRC1522L(hw->port_info->lport),
5401                            pf->offset_loaded, &os->rx_size_1522,
5402                            &ns->rx_size_1522);
5403         ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
5404                            GLPRT_PRC9522L(hw->port_info->lport),
5405                            pf->offset_loaded, &os->rx_size_big,
5406                            &ns->rx_size_big);
5407         ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
5408                            pf->offset_loaded, &os->rx_undersize,
5409                            &ns->rx_undersize);
5410         ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
5411                            pf->offset_loaded, &os->rx_fragments,
5412                            &ns->rx_fragments);
5413         ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
5414                            pf->offset_loaded, &os->rx_oversize,
5415                            &ns->rx_oversize);
5416         ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
5417                            pf->offset_loaded, &os->rx_jabber,
5418                            &ns->rx_jabber);
5419         ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
5420                            GLPRT_PTC64L(hw->port_info->lport),
5421                            pf->offset_loaded, &os->tx_size_64,
5422                            &ns->tx_size_64);
5423         ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
5424                            GLPRT_PTC127L(hw->port_info->lport),
5425                            pf->offset_loaded, &os->tx_size_127,
5426                            &ns->tx_size_127);
5427         ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
5428                            GLPRT_PTC255L(hw->port_info->lport),
5429                            pf->offset_loaded, &os->tx_size_255,
5430                            &ns->tx_size_255);
5431         ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
5432                            GLPRT_PTC511L(hw->port_info->lport),
5433                            pf->offset_loaded, &os->tx_size_511,
5434                            &ns->tx_size_511);
5435         ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
5436                            GLPRT_PTC1023L(hw->port_info->lport),
5437                            pf->offset_loaded, &os->tx_size_1023,
5438                            &ns->tx_size_1023);
5439         ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
5440                            GLPRT_PTC1522L(hw->port_info->lport),
5441                            pf->offset_loaded, &os->tx_size_1522,
5442                            &ns->tx_size_1522);
5443         ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
5444                            GLPRT_PTC9522L(hw->port_info->lport),
5445                            pf->offset_loaded, &os->tx_size_big,
5446                            &ns->tx_size_big);
5447
5448         /* GLPRT_MSPDC not supported */
5449         /* GLPRT_XEC not supported */
5450
5451         pf->offset_loaded = true;
5452
5453         if (pf->main_vsi)
5454                 ice_update_vsi_stats(pf->main_vsi);
5455 }
5456
5457 /* Get all statistics of a port */
5458 static int
5459 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
5460 {
5461         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5462         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5463         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5464
5465         /* call read registers - updates values, now write them to struct */
5466         ice_read_stats_registers(pf, hw);
5467
5468         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
5469                           pf->main_vsi->eth_stats.rx_multicast +
5470                           pf->main_vsi->eth_stats.rx_broadcast -
5471                           pf->main_vsi->eth_stats.rx_discards;
5472         stats->opackets = ns->eth.tx_unicast +
5473                           ns->eth.tx_multicast +
5474                           ns->eth.tx_broadcast;
5475         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
5476         stats->obytes   = ns->eth.tx_bytes;
5477         stats->oerrors  = ns->eth.tx_errors +
5478                           pf->main_vsi->eth_stats.tx_errors;
5479
5480         /* Rx Errors */
5481         stats->imissed  = ns->eth.rx_discards +
5482                           pf->main_vsi->eth_stats.rx_discards;
5483         stats->ierrors  = ns->crc_errors +
5484                           ns->rx_undersize +
5485                           ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
5486
5487         PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
5488         PMD_DRV_LOG(DEBUG, "rx_bytes:   %"PRIu64"", ns->eth.rx_bytes);
5489         PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
5490         PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
5491         PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
5492         PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
5493         PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
5494                     pf->main_vsi->eth_stats.rx_discards);
5495         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
5496                     ns->eth.rx_unknown_protocol);
5497         PMD_DRV_LOG(DEBUG, "tx_bytes:   %"PRIu64"", ns->eth.tx_bytes);
5498         PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
5499         PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
5500         PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
5501         PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
5502         PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
5503                     pf->main_vsi->eth_stats.tx_discards);
5504         PMD_DRV_LOG(DEBUG, "tx_errors:          %"PRIu64"", ns->eth.tx_errors);
5505
5506         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:       %"PRIu64"",
5507                     ns->tx_dropped_link_down);
5508         PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
5509         PMD_DRV_LOG(DEBUG, "illegal_bytes:      %"PRIu64"",
5510                     ns->illegal_bytes);
5511         PMD_DRV_LOG(DEBUG, "error_bytes:        %"PRIu64"", ns->error_bytes);
5512         PMD_DRV_LOG(DEBUG, "mac_local_faults:   %"PRIu64"",
5513                     ns->mac_local_faults);
5514         PMD_DRV_LOG(DEBUG, "mac_remote_faults:  %"PRIu64"",
5515                     ns->mac_remote_faults);
5516         PMD_DRV_LOG(DEBUG, "link_xon_rx:        %"PRIu64"", ns->link_xon_rx);
5517         PMD_DRV_LOG(DEBUG, "link_xoff_rx:       %"PRIu64"", ns->link_xoff_rx);
5518         PMD_DRV_LOG(DEBUG, "link_xon_tx:        %"PRIu64"", ns->link_xon_tx);
5519         PMD_DRV_LOG(DEBUG, "link_xoff_tx:       %"PRIu64"", ns->link_xoff_tx);
5520         PMD_DRV_LOG(DEBUG, "rx_size_64:         %"PRIu64"", ns->rx_size_64);
5521         PMD_DRV_LOG(DEBUG, "rx_size_127:        %"PRIu64"", ns->rx_size_127);
5522         PMD_DRV_LOG(DEBUG, "rx_size_255:        %"PRIu64"", ns->rx_size_255);
5523         PMD_DRV_LOG(DEBUG, "rx_size_511:        %"PRIu64"", ns->rx_size_511);
5524         PMD_DRV_LOG(DEBUG, "rx_size_1023:       %"PRIu64"", ns->rx_size_1023);
5525         PMD_DRV_LOG(DEBUG, "rx_size_1522:       %"PRIu64"", ns->rx_size_1522);
5526         PMD_DRV_LOG(DEBUG, "rx_size_big:        %"PRIu64"", ns->rx_size_big);
5527         PMD_DRV_LOG(DEBUG, "rx_undersize:       %"PRIu64"", ns->rx_undersize);
5528         PMD_DRV_LOG(DEBUG, "rx_fragments:       %"PRIu64"", ns->rx_fragments);
5529         PMD_DRV_LOG(DEBUG, "rx_oversize:        %"PRIu64"", ns->rx_oversize);
5530         PMD_DRV_LOG(DEBUG, "rx_jabber:          %"PRIu64"", ns->rx_jabber);
5531         PMD_DRV_LOG(DEBUG, "tx_size_64:         %"PRIu64"", ns->tx_size_64);
5532         PMD_DRV_LOG(DEBUG, "tx_size_127:        %"PRIu64"", ns->tx_size_127);
5533         PMD_DRV_LOG(DEBUG, "tx_size_255:        %"PRIu64"", ns->tx_size_255);
5534         PMD_DRV_LOG(DEBUG, "tx_size_511:        %"PRIu64"", ns->tx_size_511);
5535         PMD_DRV_LOG(DEBUG, "tx_size_1023:       %"PRIu64"", ns->tx_size_1023);
5536         PMD_DRV_LOG(DEBUG, "tx_size_1522:       %"PRIu64"", ns->tx_size_1522);
5537         PMD_DRV_LOG(DEBUG, "tx_size_big:        %"PRIu64"", ns->tx_size_big);
5538         PMD_DRV_LOG(DEBUG, "rx_len_errors:      %"PRIu64"", ns->rx_len_errors);
5539         PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
5540         return 0;
5541 }
5542
5543 /* Reset the statistics */
5544 static int
5545 ice_stats_reset(struct rte_eth_dev *dev)
5546 {
5547         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5548         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5549
5550         /* Mark PF and VSI stats to update the offset, aka "reset" */
5551         pf->offset_loaded = false;
5552         if (pf->main_vsi)
5553                 pf->main_vsi->offset_loaded = false;
5554
5555         /* read the stats, reading current register values into offset */
5556         ice_read_stats_registers(pf, hw);
5557
5558         return 0;
5559 }
5560
5561 static uint32_t
5562 ice_xstats_calc_num(void)
5563 {
5564         uint32_t num;
5565
5566         num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5567
5568         return num;
5569 }
5570
5571 static int
5572 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5573                unsigned int n)
5574 {
5575         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5576         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5577         unsigned int i;
5578         unsigned int count;
5579         struct ice_hw_port_stats *hw_stats = &pf->stats;
5580
5581         count = ice_xstats_calc_num();
5582         if (n < count)
5583                 return count;
5584
5585         ice_read_stats_registers(pf, hw);
5586
5587         if (!xstats)
5588                 return 0;
5589
5590         count = 0;
5591
5592         /* Get stats from ice_eth_stats struct */
5593         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5594                 xstats[count].value =
5595                         *(uint64_t *)((char *)&hw_stats->eth +
5596                                       ice_stats_strings[i].offset);
5597                 xstats[count].id = count;
5598                 count++;
5599         }
5600
5601         /* Get individual stats from ice_hw_port struct */
5602         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5603                 xstats[count].value =
5604                         *(uint64_t *)((char *)hw_stats +
5605                                       ice_hw_port_strings[i].offset);
5606                 xstats[count].id = count;
5607                 count++;
5608         }
5609
5610         return count;
5611 }
5612
5613 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5614                                 struct rte_eth_xstat_name *xstats_names,
5615                                 __rte_unused unsigned int limit)
5616 {
5617         unsigned int count = 0;
5618         unsigned int i;
5619
5620         if (!xstats_names)
5621                 return ice_xstats_calc_num();
5622
5623         /* Note: limit checked in rte_eth_xstats_names() */
5624
5625         /* Get stats from ice_eth_stats struct */
5626         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5627                 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5628                         sizeof(xstats_names[count].name));
5629                 count++;
5630         }
5631
5632         /* Get individual stats from ice_hw_port struct */
5633         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5634                 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5635                         sizeof(xstats_names[count].name));
5636                 count++;
5637         }
5638
5639         return count;
5640 }
5641
5642 static int
5643 ice_dev_flow_ops_get(struct rte_eth_dev *dev,
5644                      const struct rte_flow_ops **ops)
5645 {
5646         if (!dev)
5647                 return -EINVAL;
5648
5649         *ops = &ice_flow_ops;
5650         return 0;
5651 }
5652
5653 /* Add UDP tunneling port */
5654 static int
5655 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5656                              struct rte_eth_udp_tunnel *udp_tunnel)
5657 {
5658         int ret = 0;
5659         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5660         struct ice_adapter *ad =
5661                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5662
5663         if (udp_tunnel == NULL)
5664                 return -EINVAL;
5665
5666         switch (udp_tunnel->prot_type) {
5667         case RTE_ETH_TUNNEL_TYPE_VXLAN:
5668                 ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5669                 if (!ret && ad->psr != NULL)
5670                         ice_parser_vxlan_tunnel_set(ad->psr,
5671                                         udp_tunnel->udp_port, true);
5672                 break;
5673         default:
5674                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5675                 ret = -EINVAL;
5676                 break;
5677         }
5678
5679         return ret;
5680 }
5681
5682 /* Delete UDP tunneling port */
5683 static int
5684 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5685                              struct rte_eth_udp_tunnel *udp_tunnel)
5686 {
5687         int ret = 0;
5688         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5689         struct ice_adapter *ad =
5690                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5691
5692         if (udp_tunnel == NULL)
5693                 return -EINVAL;
5694
5695         switch (udp_tunnel->prot_type) {
5696         case RTE_ETH_TUNNEL_TYPE_VXLAN:
5697                 ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5698                 if (!ret && ad->psr != NULL)
5699                         ice_parser_vxlan_tunnel_set(ad->psr,
5700                                         udp_tunnel->udp_port, false);
5701                 break;
5702         default:
5703                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5704                 ret = -EINVAL;
5705                 break;
5706         }
5707
5708         return ret;
5709 }
5710
5711 static int
5712 ice_timesync_enable(struct rte_eth_dev *dev)
5713 {
5714         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5715         struct ice_adapter *ad =
5716                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5717         int ret;
5718
5719         if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
5720             RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
5721                 PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
5722                 return -1;
5723         }
5724
5725         if (hw->func_caps.ts_func_info.src_tmr_owned) {
5726                 ret = ice_ptp_init_phc(hw);
5727                 if (ret) {
5728                         PMD_DRV_LOG(ERR, "Failed to initialize PHC");
5729                         return -1;
5730                 }
5731
5732                 ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
5733                 if (ret) {
5734                         PMD_DRV_LOG(ERR,
5735                                 "Failed to write PHC increment time value");
5736                         return -1;
5737                 }
5738         }
5739
5740         /* Initialize cycle counters for system time/RX/TX timestamp */
5741         memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
5742         memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5743         memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5744
5745         ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5746         ad->systime_tc.cc_shift = 0;
5747         ad->systime_tc.nsec_mask = 0;
5748
5749         ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5750         ad->rx_tstamp_tc.cc_shift = 0;
5751         ad->rx_tstamp_tc.nsec_mask = 0;
5752
5753         ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5754         ad->tx_tstamp_tc.cc_shift = 0;
5755         ad->tx_tstamp_tc.nsec_mask = 0;
5756
5757         ad->ptp_ena = 1;
5758
5759         return 0;
5760 }
5761
5762 static int
5763 ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
5764                                struct timespec *timestamp, uint32_t flags)
5765 {
5766         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5767         struct ice_adapter *ad =
5768                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5769         struct ice_rx_queue *rxq;
5770         uint32_t ts_high;
5771         uint64_t ts_ns, ns;
5772
5773         rxq = dev->data->rx_queues[flags];
5774
5775         ts_high = rxq->time_high;
5776         ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, ts_high);
5777         ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
5778         *timestamp = rte_ns_to_timespec(ns);
5779
5780         return 0;
5781 }
5782
5783 static int
5784 ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
5785                                struct timespec *timestamp)
5786 {
5787         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5788         struct ice_adapter *ad =
5789                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5790         uint8_t lport;
5791         uint64_t ts_ns, ns, tstamp;
5792         const uint64_t mask = 0xFFFFFFFF;
5793         int ret;
5794
5795         lport = hw->port_info->lport;
5796
5797         ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
5798         if (ret) {
5799                 PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
5800                 return -1;
5801         }
5802
5803         ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, (tstamp >> 8) & mask);
5804         ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
5805         *timestamp = rte_ns_to_timespec(ns);
5806
5807         return 0;
5808 }
5809
5810 static int
5811 ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
5812 {
5813         struct ice_adapter *ad =
5814                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5815
5816         ad->systime_tc.nsec += delta;
5817         ad->rx_tstamp_tc.nsec += delta;
5818         ad->tx_tstamp_tc.nsec += delta;
5819
5820         return 0;
5821 }
5822
5823 static int
5824 ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
5825 {
5826         struct ice_adapter *ad =
5827                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5828         uint64_t ns;
5829
5830         ns = rte_timespec_to_ns(ts);
5831
5832         ad->systime_tc.nsec = ns;
5833         ad->rx_tstamp_tc.nsec = ns;
5834         ad->tx_tstamp_tc.nsec = ns;
5835
5836         return 0;
5837 }
5838
5839 static int
5840 ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
5841 {
5842         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5843         struct ice_adapter *ad =
5844                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5845         uint32_t hi, lo, lo2;
5846         uint64_t time, ns;
5847
5848         lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5849         hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
5850         lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5851
5852         if (lo2 < lo) {
5853                 lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5854                 hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
5855         }
5856
5857         time = ((uint64_t)hi << 32) | lo;
5858         ns = rte_timecounter_update(&ad->systime_tc, time);
5859         *ts = rte_ns_to_timespec(ns);
5860
5861         return 0;
5862 }
5863
5864 static int
5865 ice_timesync_disable(struct rte_eth_dev *dev)
5866 {
5867         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5868         struct ice_adapter *ad =
5869                         ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5870         uint64_t val;
5871         uint8_t lport;
5872
5873         lport = hw->port_info->lport;
5874
5875         ice_clear_phy_tstamp(hw, lport, 0);
5876
5877         val = ICE_READ_REG(hw, GLTSYN_ENA(0));
5878         val &= ~GLTSYN_ENA_TSYN_ENA_M;
5879         ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
5880
5881         ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
5882         ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
5883
5884         ad->ptp_ena = 0;
5885
5886         return 0;
5887 }
5888
5889 static int
5890 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5891               struct rte_pci_device *pci_dev)
5892 {
5893         return rte_eth_dev_pci_generic_probe(pci_dev,
5894                                              sizeof(struct ice_adapter),
5895                                              ice_dev_init);
5896 }
5897
5898 static int
5899 ice_pci_remove(struct rte_pci_device *pci_dev)
5900 {
5901         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5902 }
5903
5904 static struct rte_pci_driver rte_ice_pmd = {
5905         .id_table = pci_id_ice_map,
5906         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5907         .probe = ice_pci_probe,
5908         .remove = ice_pci_remove,
5909 };
5910
5911 /**
5912  * Driver initialization routine.
5913  * Invoked once at EAL init time.
5914  * Register itself as the [Poll Mode] Driver of PCI devices.
5915  */
5916 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5917 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5918 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5919 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5920                               ICE_HW_DEBUG_MASK_ARG "=0xXXX"
5921                               ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5922                               ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5923                               ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
5924                               ICE_RX_LOW_LATENCY_ARG "=<0|1>");
5925
5926 RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE);
5927 RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE);
5928 #ifdef RTE_ETHDEV_DEBUG_RX
5929 RTE_LOG_REGISTER_SUFFIX(ice_logtype_rx, rx, DEBUG);
5930 #endif
5931 #ifdef RTE_ETHDEV_DEBUG_TX
5932 RTE_LOG_REGISTER_SUFFIX(ice_logtype_tx, tx, DEBUG);
5933 #endif