net/ice: add safe mode
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include <stdio.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11
12 #include "base/ice_sched.h"
13 #include "ice_ethdev.h"
14 #include "ice_rxtx.h"
15
16 #define ICE_MAX_QP_NUM "max_queue_pair_num"
17 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
18 #define ICE_DFLT_PKG_FILE "/lib/firmware/intel/ice/ddp/ice.pkg"
19
20 int ice_logtype_init;
21 int ice_logtype_driver;
22
23 static int ice_dev_configure(struct rte_eth_dev *dev);
24 static int ice_dev_start(struct rte_eth_dev *dev);
25 static void ice_dev_stop(struct rte_eth_dev *dev);
26 static void ice_dev_close(struct rte_eth_dev *dev);
27 static int ice_dev_reset(struct rte_eth_dev *dev);
28 static void ice_dev_info_get(struct rte_eth_dev *dev,
29                              struct rte_eth_dev_info *dev_info);
30 static int ice_link_update(struct rte_eth_dev *dev,
31                            int wait_to_complete);
32 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
33 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
34 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
35                              enum rte_vlan_type vlan_type,
36                              uint16_t tpid);
37 static int ice_rss_reta_update(struct rte_eth_dev *dev,
38                                struct rte_eth_rss_reta_entry64 *reta_conf,
39                                uint16_t reta_size);
40 static int ice_rss_reta_query(struct rte_eth_dev *dev,
41                               struct rte_eth_rss_reta_entry64 *reta_conf,
42                               uint16_t reta_size);
43 static int ice_rss_hash_update(struct rte_eth_dev *dev,
44                                struct rte_eth_rss_conf *rss_conf);
45 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
46                                  struct rte_eth_rss_conf *rss_conf);
47 static void ice_promisc_enable(struct rte_eth_dev *dev);
48 static void ice_promisc_disable(struct rte_eth_dev *dev);
49 static void ice_allmulti_enable(struct rte_eth_dev *dev);
50 static void ice_allmulti_disable(struct rte_eth_dev *dev);
51 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
52                                uint16_t vlan_id,
53                                int on);
54 static int ice_macaddr_set(struct rte_eth_dev *dev,
55                            struct ether_addr *mac_addr);
56 static int ice_macaddr_add(struct rte_eth_dev *dev,
57                            struct ether_addr *mac_addr,
58                            __rte_unused uint32_t index,
59                            uint32_t pool);
60 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
61 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
62                                     uint16_t queue_id);
63 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
64                                      uint16_t queue_id);
65 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
66                               size_t fw_size);
67 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
68                              uint16_t pvid, int on);
69 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
70 static int ice_get_eeprom(struct rte_eth_dev *dev,
71                           struct rte_dev_eeprom_info *eeprom);
72 static int ice_stats_get(struct rte_eth_dev *dev,
73                          struct rte_eth_stats *stats);
74 static void ice_stats_reset(struct rte_eth_dev *dev);
75 static int ice_xstats_get(struct rte_eth_dev *dev,
76                           struct rte_eth_xstat *xstats, unsigned int n);
77 static int ice_xstats_get_names(struct rte_eth_dev *dev,
78                                 struct rte_eth_xstat_name *xstats_names,
79                                 unsigned int limit);
80
81 static const struct rte_pci_id pci_id_ice_map[] = {
82         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
83         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
84         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
85         { .vendor_id = 0, /* sentinel */ },
86 };
87
88 static const struct eth_dev_ops ice_eth_dev_ops = {
89         .dev_configure                = ice_dev_configure,
90         .dev_start                    = ice_dev_start,
91         .dev_stop                     = ice_dev_stop,
92         .dev_close                    = ice_dev_close,
93         .dev_reset                    = ice_dev_reset,
94         .rx_queue_start               = ice_rx_queue_start,
95         .rx_queue_stop                = ice_rx_queue_stop,
96         .tx_queue_start               = ice_tx_queue_start,
97         .tx_queue_stop                = ice_tx_queue_stop,
98         .rx_queue_setup               = ice_rx_queue_setup,
99         .rx_queue_release             = ice_rx_queue_release,
100         .tx_queue_setup               = ice_tx_queue_setup,
101         .tx_queue_release             = ice_tx_queue_release,
102         .dev_infos_get                = ice_dev_info_get,
103         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
104         .link_update                  = ice_link_update,
105         .mtu_set                      = ice_mtu_set,
106         .mac_addr_set                 = ice_macaddr_set,
107         .mac_addr_add                 = ice_macaddr_add,
108         .mac_addr_remove              = ice_macaddr_remove,
109         .vlan_filter_set              = ice_vlan_filter_set,
110         .vlan_offload_set             = ice_vlan_offload_set,
111         .vlan_tpid_set                = ice_vlan_tpid_set,
112         .reta_update                  = ice_rss_reta_update,
113         .reta_query                   = ice_rss_reta_query,
114         .rss_hash_update              = ice_rss_hash_update,
115         .rss_hash_conf_get            = ice_rss_hash_conf_get,
116         .promiscuous_enable           = ice_promisc_enable,
117         .promiscuous_disable          = ice_promisc_disable,
118         .allmulticast_enable          = ice_allmulti_enable,
119         .allmulticast_disable         = ice_allmulti_disable,
120         .rx_queue_intr_enable         = ice_rx_queue_intr_enable,
121         .rx_queue_intr_disable        = ice_rx_queue_intr_disable,
122         .fw_version_get               = ice_fw_version_get,
123         .vlan_pvid_set                = ice_vlan_pvid_set,
124         .rxq_info_get                 = ice_rxq_info_get,
125         .txq_info_get                 = ice_txq_info_get,
126         .get_eeprom_length            = ice_get_eeprom_length,
127         .get_eeprom                   = ice_get_eeprom,
128         .rx_queue_count               = ice_rx_queue_count,
129         .rx_descriptor_status         = ice_rx_descriptor_status,
130         .tx_descriptor_status         = ice_tx_descriptor_status,
131         .stats_get                    = ice_stats_get,
132         .stats_reset                  = ice_stats_reset,
133         .xstats_get                   = ice_xstats_get,
134         .xstats_get_names             = ice_xstats_get_names,
135         .xstats_reset                 = ice_stats_reset,
136 };
137
138 /* store statistics names and its offset in stats structure */
139 struct ice_xstats_name_off {
140         char name[RTE_ETH_XSTATS_NAME_SIZE];
141         unsigned int offset;
142 };
143
144 static const struct ice_xstats_name_off ice_stats_strings[] = {
145         {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
146         {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
147         {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
148         {"rx_dropped", offsetof(struct ice_eth_stats, rx_discards)},
149         {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
150                 rx_unknown_protocol)},
151         {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
152         {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
153         {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
154         {"tx_dropped", offsetof(struct ice_eth_stats, tx_discards)},
155 };
156
157 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
158                 sizeof(ice_stats_strings[0]))
159
160 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
161         {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
162                 tx_dropped_link_down)},
163         {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
164         {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
165                 illegal_bytes)},
166         {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
167         {"mac_local_errors", offsetof(struct ice_hw_port_stats,
168                 mac_local_faults)},
169         {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
170                 mac_remote_faults)},
171         {"rx_len_errors", offsetof(struct ice_hw_port_stats,
172                 rx_len_errors)},
173         {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
174         {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
175         {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
176         {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
177         {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
178         {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
179                 rx_size_127)},
180         {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
181                 rx_size_255)},
182         {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
183                 rx_size_511)},
184         {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
185                 rx_size_1023)},
186         {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
187                 rx_size_1522)},
188         {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
189                 rx_size_big)},
190         {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
191                 rx_undersize)},
192         {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
193                 rx_oversize)},
194         {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
195                 mac_short_pkt_dropped)},
196         {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
197                 rx_fragments)},
198         {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
199         {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
200         {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
201                 tx_size_127)},
202         {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
203                 tx_size_255)},
204         {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
205                 tx_size_511)},
206         {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
207                 tx_size_1023)},
208         {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
209                 tx_size_1522)},
210         {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
211                 tx_size_big)},
212 };
213
214 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
215                 sizeof(ice_hw_port_strings[0]))
216
217 static void
218 ice_init_controlq_parameter(struct ice_hw *hw)
219 {
220         /* fields for adminq */
221         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
222         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
223         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
224         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
225
226         /* fields for mailboxq, DPDK used as PF host */
227         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
228         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
229         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
230         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
231 }
232
233 static int
234 ice_check_qp_num(const char *key, const char *qp_value,
235                  __rte_unused void *opaque)
236 {
237         char *end = NULL;
238         int num = 0;
239
240         while (isblank(*qp_value))
241                 qp_value++;
242
243         num = strtoul(qp_value, &end, 10);
244
245         if (!num || (*end == '-') || errno) {
246                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
247                             "value must be > 0",
248                             qp_value, key);
249                 return -1;
250         }
251
252         return num;
253 }
254
255 static int
256 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
257 {
258         struct rte_kvargs *kvlist;
259         const char *queue_num_key = ICE_MAX_QP_NUM;
260         int ret;
261
262         if (!devargs)
263                 return 0;
264
265         kvlist = rte_kvargs_parse(devargs->args, NULL);
266         if (!kvlist)
267                 return 0;
268
269         if (!rte_kvargs_count(kvlist, queue_num_key)) {
270                 rte_kvargs_free(kvlist);
271                 return 0;
272         }
273
274         if (rte_kvargs_process(kvlist, queue_num_key,
275                                ice_check_qp_num, NULL) < 0) {
276                 rte_kvargs_free(kvlist);
277                 return 0;
278         }
279         ret = rte_kvargs_process(kvlist, queue_num_key,
280                                  ice_check_qp_num, NULL);
281         rte_kvargs_free(kvlist);
282
283         return ret;
284 }
285
286 static int
287 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
288                   uint32_t num)
289 {
290         struct pool_entry *entry;
291
292         if (!pool || !num)
293                 return -EINVAL;
294
295         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
296         if (!entry) {
297                 PMD_INIT_LOG(ERR,
298                              "Failed to allocate memory for resource pool");
299                 return -ENOMEM;
300         }
301
302         /* queue heap initialize */
303         pool->num_free = num;
304         pool->num_alloc = 0;
305         pool->base = base;
306         LIST_INIT(&pool->alloc_list);
307         LIST_INIT(&pool->free_list);
308
309         /* Initialize element  */
310         entry->base = 0;
311         entry->len = num;
312
313         LIST_INSERT_HEAD(&pool->free_list, entry, next);
314         return 0;
315 }
316
317 static int
318 ice_res_pool_alloc(struct ice_res_pool_info *pool,
319                    uint16_t num)
320 {
321         struct pool_entry *entry, *valid_entry;
322
323         if (!pool || !num) {
324                 PMD_INIT_LOG(ERR, "Invalid parameter");
325                 return -EINVAL;
326         }
327
328         if (pool->num_free < num) {
329                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
330                              num, pool->num_free);
331                 return -ENOMEM;
332         }
333
334         valid_entry = NULL;
335         /* Lookup  in free list and find most fit one */
336         LIST_FOREACH(entry, &pool->free_list, next) {
337                 if (entry->len >= num) {
338                         /* Find best one */
339                         if (entry->len == num) {
340                                 valid_entry = entry;
341                                 break;
342                         }
343                         if (!valid_entry ||
344                             valid_entry->len > entry->len)
345                                 valid_entry = entry;
346                 }
347         }
348
349         /* Not find one to satisfy the request, return */
350         if (!valid_entry) {
351                 PMD_INIT_LOG(ERR, "No valid entry found");
352                 return -ENOMEM;
353         }
354         /**
355          * The entry have equal queue number as requested,
356          * remove it from alloc_list.
357          */
358         if (valid_entry->len == num) {
359                 LIST_REMOVE(valid_entry, next);
360         } else {
361                 /**
362                  * The entry have more numbers than requested,
363                  * create a new entry for alloc_list and minus its
364                  * queue base and number in free_list.
365                  */
366                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
367                 if (!entry) {
368                         PMD_INIT_LOG(ERR,
369                                      "Failed to allocate memory for "
370                                      "resource pool");
371                         return -ENOMEM;
372                 }
373                 entry->base = valid_entry->base;
374                 entry->len = num;
375                 valid_entry->base += num;
376                 valid_entry->len -= num;
377                 valid_entry = entry;
378         }
379
380         /* Insert it into alloc list, not sorted */
381         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
382
383         pool->num_free -= valid_entry->len;
384         pool->num_alloc += valid_entry->len;
385
386         return valid_entry->base + pool->base;
387 }
388
389 static void
390 ice_res_pool_destroy(struct ice_res_pool_info *pool)
391 {
392         struct pool_entry *entry, *next_entry;
393
394         if (!pool)
395                 return;
396
397         for (entry = LIST_FIRST(&pool->alloc_list);
398              entry && (next_entry = LIST_NEXT(entry, next), 1);
399              entry = next_entry) {
400                 LIST_REMOVE(entry, next);
401                 rte_free(entry);
402         }
403
404         for (entry = LIST_FIRST(&pool->free_list);
405              entry && (next_entry = LIST_NEXT(entry, next), 1);
406              entry = next_entry) {
407                 LIST_REMOVE(entry, next);
408                 rte_free(entry);
409         }
410
411         pool->num_free = 0;
412         pool->num_alloc = 0;
413         pool->base = 0;
414         LIST_INIT(&pool->alloc_list);
415         LIST_INIT(&pool->free_list);
416 }
417
418 static void
419 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
420 {
421         /* Set VSI LUT selection */
422         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
423                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
424         /* Set Hash scheme */
425         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
426                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
427         /* enable TC */
428         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
429 }
430
431 static enum ice_status
432 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
433                                 struct ice_aqc_vsi_props *info,
434                                 uint8_t enabled_tcmap)
435 {
436         uint16_t bsf, qp_idx;
437
438         /* default tc 0 now. Multi-TC supporting need to be done later.
439          * Configure TC and queue mapping parameters, for enabled TC,
440          * allocate qpnum_per_tc queues to this traffic.
441          */
442         if (enabled_tcmap != 0x01) {
443                 PMD_INIT_LOG(ERR, "only TC0 is supported");
444                 return -ENOTSUP;
445         }
446
447         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
448         bsf = rte_bsf32(vsi->nb_qps);
449         /* Adjust the queue number to actual queues that can be applied */
450         vsi->nb_qps = 0x1 << bsf;
451
452         qp_idx = 0;
453         /* Set tc and queue mapping with VSI */
454         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
455                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
456                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
457
458         /* Associate queue number with VSI */
459         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
460         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
461         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
462         info->valid_sections |=
463                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
464         /* Set the info.ingress_table and info.egress_table
465          * for UP translate table. Now just set it to 1:1 map by default
466          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
467          */
468 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
469         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
470         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
471         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
472         return 0;
473 }
474
475 static int
476 ice_init_mac_address(struct rte_eth_dev *dev)
477 {
478         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
479
480         if (!is_unicast_ether_addr
481                 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
482                 PMD_INIT_LOG(ERR, "Invalid MAC address");
483                 return -EINVAL;
484         }
485
486         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
487                         (struct ether_addr *)hw->port_info[0].mac.perm_addr);
488
489         dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
490         if (!dev->data->mac_addrs) {
491                 PMD_INIT_LOG(ERR,
492                              "Failed to allocate memory to store mac address");
493                 return -ENOMEM;
494         }
495         /* store it to dev data */
496         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
497                         &dev->data->mac_addrs[0]);
498         return 0;
499 }
500
501 /* Find out specific MAC filter */
502 static struct ice_mac_filter *
503 ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
504 {
505         struct ice_mac_filter *f;
506
507         TAILQ_FOREACH(f, &vsi->mac_list, next) {
508                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
509                         return f;
510         }
511
512         return NULL;
513 }
514
515 static int
516 ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
517 {
518         struct ice_fltr_list_entry *m_list_itr = NULL;
519         struct ice_mac_filter *f;
520         struct LIST_HEAD_TYPE list_head;
521         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
522         int ret = 0;
523
524         /* If it's added and configured, return */
525         f = ice_find_mac_filter(vsi, mac_addr);
526         if (f) {
527                 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
528                 return 0;
529         }
530
531         INIT_LIST_HEAD(&list_head);
532
533         m_list_itr = (struct ice_fltr_list_entry *)
534                 ice_malloc(hw, sizeof(*m_list_itr));
535         if (!m_list_itr) {
536                 ret = -ENOMEM;
537                 goto DONE;
538         }
539         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
540                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
541         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
542         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
543         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
544         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
545         m_list_itr->fltr_info.vsi_handle = vsi->idx;
546
547         LIST_ADD(&m_list_itr->list_entry, &list_head);
548
549         /* Add the mac */
550         ret = ice_add_mac(hw, &list_head);
551         if (ret != ICE_SUCCESS) {
552                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
553                 ret = -EINVAL;
554                 goto DONE;
555         }
556         /* Add the mac addr into mac list */
557         f = rte_zmalloc(NULL, sizeof(*f), 0);
558         if (!f) {
559                 PMD_DRV_LOG(ERR, "failed to allocate memory");
560                 ret = -ENOMEM;
561                 goto DONE;
562         }
563         rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
564         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
565         vsi->mac_num++;
566
567         ret = 0;
568
569 DONE:
570         rte_free(m_list_itr);
571         return ret;
572 }
573
574 static int
575 ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
576 {
577         struct ice_fltr_list_entry *m_list_itr = NULL;
578         struct ice_mac_filter *f;
579         struct LIST_HEAD_TYPE list_head;
580         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
581         int ret = 0;
582
583         /* Can't find it, return an error */
584         f = ice_find_mac_filter(vsi, mac_addr);
585         if (!f)
586                 return -EINVAL;
587
588         INIT_LIST_HEAD(&list_head);
589
590         m_list_itr = (struct ice_fltr_list_entry *)
591                 ice_malloc(hw, sizeof(*m_list_itr));
592         if (!m_list_itr) {
593                 ret = -ENOMEM;
594                 goto DONE;
595         }
596         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
597                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
598         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
599         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
600         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
601         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
602         m_list_itr->fltr_info.vsi_handle = vsi->idx;
603
604         LIST_ADD(&m_list_itr->list_entry, &list_head);
605
606         /* remove the mac filter */
607         ret = ice_remove_mac(hw, &list_head);
608         if (ret != ICE_SUCCESS) {
609                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
610                 ret = -EINVAL;
611                 goto DONE;
612         }
613
614         /* Remove the mac addr from mac list */
615         TAILQ_REMOVE(&vsi->mac_list, f, next);
616         rte_free(f);
617         vsi->mac_num--;
618
619         ret = 0;
620 DONE:
621         rte_free(m_list_itr);
622         return ret;
623 }
624
625 /* Find out specific VLAN filter */
626 static struct ice_vlan_filter *
627 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
628 {
629         struct ice_vlan_filter *f;
630
631         TAILQ_FOREACH(f, &vsi->vlan_list, next) {
632                 if (vlan_id == f->vlan_info.vlan_id)
633                         return f;
634         }
635
636         return NULL;
637 }
638
639 static int
640 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
641 {
642         struct ice_fltr_list_entry *v_list_itr = NULL;
643         struct ice_vlan_filter *f;
644         struct LIST_HEAD_TYPE list_head;
645         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
646         int ret = 0;
647
648         if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
649                 return -EINVAL;
650
651         /* If it's added and configured, return. */
652         f = ice_find_vlan_filter(vsi, vlan_id);
653         if (f) {
654                 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
655                 return 0;
656         }
657
658         if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
659                 return 0;
660
661         INIT_LIST_HEAD(&list_head);
662
663         v_list_itr = (struct ice_fltr_list_entry *)
664                       ice_malloc(hw, sizeof(*v_list_itr));
665         if (!v_list_itr) {
666                 ret = -ENOMEM;
667                 goto DONE;
668         }
669         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
670         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
671         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
672         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
673         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
674         v_list_itr->fltr_info.vsi_handle = vsi->idx;
675
676         LIST_ADD(&v_list_itr->list_entry, &list_head);
677
678         /* Add the vlan */
679         ret = ice_add_vlan(hw, &list_head);
680         if (ret != ICE_SUCCESS) {
681                 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
682                 ret = -EINVAL;
683                 goto DONE;
684         }
685
686         /* Add vlan into vlan list */
687         f = rte_zmalloc(NULL, sizeof(*f), 0);
688         if (!f) {
689                 PMD_DRV_LOG(ERR, "failed to allocate memory");
690                 ret = -ENOMEM;
691                 goto DONE;
692         }
693         f->vlan_info.vlan_id = vlan_id;
694         TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
695         vsi->vlan_num++;
696
697         ret = 0;
698
699 DONE:
700         rte_free(v_list_itr);
701         return ret;
702 }
703
704 static int
705 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
706 {
707         struct ice_fltr_list_entry *v_list_itr = NULL;
708         struct ice_vlan_filter *f;
709         struct LIST_HEAD_TYPE list_head;
710         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
711         int ret = 0;
712
713         /**
714          * Vlan 0 is the generic filter for untagged packets
715          * and can't be removed.
716          */
717         if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
718                 return -EINVAL;
719
720         /* Can't find it, return an error */
721         f = ice_find_vlan_filter(vsi, vlan_id);
722         if (!f)
723                 return -EINVAL;
724
725         INIT_LIST_HEAD(&list_head);
726
727         v_list_itr = (struct ice_fltr_list_entry *)
728                       ice_malloc(hw, sizeof(*v_list_itr));
729         if (!v_list_itr) {
730                 ret = -ENOMEM;
731                 goto DONE;
732         }
733
734         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
735         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
736         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
737         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
738         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
739         v_list_itr->fltr_info.vsi_handle = vsi->idx;
740
741         LIST_ADD(&v_list_itr->list_entry, &list_head);
742
743         /* remove the vlan filter */
744         ret = ice_remove_vlan(hw, &list_head);
745         if (ret != ICE_SUCCESS) {
746                 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
747                 ret = -EINVAL;
748                 goto DONE;
749         }
750
751         /* Remove the vlan id from vlan list */
752         TAILQ_REMOVE(&vsi->vlan_list, f, next);
753         rte_free(f);
754         vsi->vlan_num--;
755
756         ret = 0;
757 DONE:
758         rte_free(v_list_itr);
759         return ret;
760 }
761
762 static int
763 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
764 {
765         struct ice_mac_filter *m_f;
766         struct ice_vlan_filter *v_f;
767         int ret = 0;
768
769         if (!vsi || !vsi->mac_num)
770                 return -EINVAL;
771
772         TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
773                 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
774                 if (ret != ICE_SUCCESS) {
775                         ret = -EINVAL;
776                         goto DONE;
777                 }
778         }
779
780         if (vsi->vlan_num == 0)
781                 return 0;
782
783         TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
784                 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
785                 if (ret != ICE_SUCCESS) {
786                         ret = -EINVAL;
787                         goto DONE;
788                 }
789         }
790
791 DONE:
792         return ret;
793 }
794
795 static int
796 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
797 {
798         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
799         struct ice_vsi_ctx ctxt;
800         uint8_t qinq_flags;
801         int ret = 0;
802
803         /* Check if it has been already on or off */
804         if (vsi->info.valid_sections &
805                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
806                 if (on) {
807                         if ((vsi->info.outer_tag_flags &
808                              ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
809                             ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
810                                 return 0; /* already on */
811                 } else {
812                         if (!(vsi->info.outer_tag_flags &
813                               ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
814                                 return 0; /* already off */
815                 }
816         }
817
818         if (on)
819                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
820         else
821                 qinq_flags = 0;
822         /* clear global insertion and use per packet insertion */
823         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
824         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
825         vsi->info.outer_tag_flags |= qinq_flags;
826         /* use default vlan type 0x8100 */
827         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
828         vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
829                                      ICE_AQ_VSI_OUTER_TAG_TYPE_S;
830         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
831         ctxt.info.valid_sections =
832                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
833         ctxt.vsi_num = vsi->vsi_id;
834         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
835         if (ret) {
836                 PMD_DRV_LOG(INFO,
837                             "Update VSI failed to %s qinq stripping",
838                             on ? "enable" : "disable");
839                 return -EINVAL;
840         }
841
842         vsi->info.valid_sections |=
843                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
844
845         return ret;
846 }
847
848 static int
849 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
850 {
851         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
852         struct ice_vsi_ctx ctxt;
853         uint8_t qinq_flags;
854         int ret = 0;
855
856         /* Check if it has been already on or off */
857         if (vsi->info.valid_sections &
858                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
859                 if (on) {
860                         if ((vsi->info.outer_tag_flags &
861                              ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
862                             ICE_AQ_VSI_OUTER_TAG_COPY)
863                                 return 0; /* already on */
864                 } else {
865                         if ((vsi->info.outer_tag_flags &
866                              ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
867                             ICE_AQ_VSI_OUTER_TAG_NOTHING)
868                                 return 0; /* already off */
869                 }
870         }
871
872         if (on)
873                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
874         else
875                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
876         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
877         vsi->info.outer_tag_flags |= qinq_flags;
878         /* use default vlan type 0x8100 */
879         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
880         vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
881                                      ICE_AQ_VSI_OUTER_TAG_TYPE_S;
882         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
883         ctxt.info.valid_sections =
884                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
885         ctxt.vsi_num = vsi->vsi_id;
886         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
887         if (ret) {
888                 PMD_DRV_LOG(INFO,
889                             "Update VSI failed to %s qinq stripping",
890                             on ? "enable" : "disable");
891                 return -EINVAL;
892         }
893
894         vsi->info.valid_sections |=
895                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
896
897         return ret;
898 }
899
900 static int
901 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
902 {
903         int ret;
904
905         ret = ice_vsi_config_qinq_stripping(vsi, on);
906         if (ret)
907                 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
908
909         ret = ice_vsi_config_qinq_insertion(vsi, on);
910         if (ret)
911                 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
912
913         return ret;
914 }
915
916 /* Enable IRQ0 */
917 static void
918 ice_pf_enable_irq0(struct ice_hw *hw)
919 {
920         /* reset the registers */
921         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
922         ICE_READ_REG(hw, PFINT_OICR);
923
924 #ifdef ICE_LSE_SPT
925         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
926                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
927                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
928
929         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
930                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
931                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
932                        PFINT_OICR_CTL_ITR_INDX_M) |
933                       PFINT_OICR_CTL_CAUSE_ENA_M);
934
935         ICE_WRITE_REG(hw, PFINT_FW_CTL,
936                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
937                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
938                        PFINT_FW_CTL_ITR_INDX_M) |
939                       PFINT_FW_CTL_CAUSE_ENA_M);
940 #else
941         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
942 #endif
943
944         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
945                       GLINT_DYN_CTL_INTENA_M |
946                       GLINT_DYN_CTL_CLEARPBA_M |
947                       GLINT_DYN_CTL_ITR_INDX_M);
948
949         ice_flush(hw);
950 }
951
952 /* Disable IRQ0 */
953 static void
954 ice_pf_disable_irq0(struct ice_hw *hw)
955 {
956         /* Disable all interrupt types */
957         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
958         ice_flush(hw);
959 }
960
961 #ifdef ICE_LSE_SPT
962 static void
963 ice_handle_aq_msg(struct rte_eth_dev *dev)
964 {
965         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
966         struct ice_ctl_q_info *cq = &hw->adminq;
967         struct ice_rq_event_info event;
968         uint16_t pending, opcode;
969         int ret;
970
971         event.buf_len = ICE_AQ_MAX_BUF_LEN;
972         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
973         if (!event.msg_buf) {
974                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
975                 return;
976         }
977
978         pending = 1;
979         while (pending) {
980                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
981
982                 if (ret != ICE_SUCCESS) {
983                         PMD_DRV_LOG(INFO,
984                                     "Failed to read msg from AdminQ, "
985                                     "adminq_err: %u",
986                                     hw->adminq.sq_last_status);
987                         break;
988                 }
989                 opcode = rte_le_to_cpu_16(event.desc.opcode);
990
991                 switch (opcode) {
992                 case ice_aqc_opc_get_link_status:
993                         ret = ice_link_update(dev, 0);
994                         if (!ret)
995                                 _rte_eth_dev_callback_process
996                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
997                         break;
998                 default:
999                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1000                                     opcode);
1001                         break;
1002                 }
1003         }
1004         rte_free(event.msg_buf);
1005 }
1006 #endif
1007
1008 /**
1009  * Interrupt handler triggered by NIC for handling
1010  * specific interrupt.
1011  *
1012  * @param handle
1013  *  Pointer to interrupt handle.
1014  * @param param
1015  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1016  *
1017  * @return
1018  *  void
1019  */
1020 static void
1021 ice_interrupt_handler(void *param)
1022 {
1023         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1024         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1025         uint32_t oicr;
1026         uint32_t reg;
1027         uint8_t pf_num;
1028         uint8_t event;
1029         uint16_t queue;
1030 #ifdef ICE_LSE_SPT
1031         uint32_t int_fw_ctl;
1032 #endif
1033
1034         /* Disable interrupt */
1035         ice_pf_disable_irq0(hw);
1036
1037         /* read out interrupt causes */
1038         oicr = ICE_READ_REG(hw, PFINT_OICR);
1039 #ifdef ICE_LSE_SPT
1040         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1041 #endif
1042
1043         /* No interrupt event indicated */
1044         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1045                 PMD_DRV_LOG(INFO, "No interrupt event");
1046                 goto done;
1047         }
1048
1049 #ifdef ICE_LSE_SPT
1050         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1051                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1052                 ice_handle_aq_msg(dev);
1053         }
1054 #else
1055         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1056                 PMD_DRV_LOG(INFO, "OICR: link state change event");
1057                 ice_link_update(dev, 0);
1058         }
1059 #endif
1060
1061         if (oicr & PFINT_OICR_MAL_DETECT_M) {
1062                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1063                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1064                 if (reg & GL_MDET_TX_PQM_VALID_M) {
1065                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1066                                  GL_MDET_TX_PQM_PF_NUM_S;
1067                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1068                                 GL_MDET_TX_PQM_MAL_TYPE_S;
1069                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1070                                 GL_MDET_TX_PQM_QNUM_S;
1071
1072                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1073                                     "%d by PQM on TX queue %d PF# %d",
1074                                     event, queue, pf_num);
1075                 }
1076
1077                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1078                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1079                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1080                                  GL_MDET_TX_TCLAN_PF_NUM_S;
1081                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1082                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1083                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1084                                 GL_MDET_TX_TCLAN_QNUM_S;
1085
1086                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1087                                     "%d by TCLAN on TX queue %d PF# %d",
1088                                     event, queue, pf_num);
1089                 }
1090         }
1091 done:
1092         /* Enable interrupt */
1093         ice_pf_enable_irq0(hw);
1094         rte_intr_enable(dev->intr_handle);
1095 }
1096
1097 /*  Initialize SW parameters of PF */
1098 static int
1099 ice_pf_sw_init(struct rte_eth_dev *dev)
1100 {
1101         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1102         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1103
1104         if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
1105                 pf->lan_nb_qp_max =
1106                         ice_config_max_queue_pair_num(dev->device->devargs);
1107         else
1108                 pf->lan_nb_qp_max =
1109                         (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1110                                           hw->func_caps.common_cap.num_rxq);
1111
1112         pf->lan_nb_qps = pf->lan_nb_qp_max;
1113
1114         return 0;
1115 }
1116
1117 static struct ice_vsi *
1118 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1119 {
1120         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1121         struct ice_vsi *vsi = NULL;
1122         struct ice_vsi_ctx vsi_ctx;
1123         int ret;
1124         struct ether_addr broadcast = {
1125                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1126         struct ether_addr mac_addr;
1127         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1128         uint8_t tc_bitmap = 0x1;
1129
1130         /* hw->num_lports = 1 in NIC mode */
1131         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1132         if (!vsi)
1133                 return NULL;
1134
1135         vsi->idx = pf->next_vsi_idx;
1136         pf->next_vsi_idx++;
1137         vsi->type = type;
1138         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1139         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1140         vsi->vlan_anti_spoof_on = 0;
1141         vsi->vlan_filter_on = 1;
1142         TAILQ_INIT(&vsi->mac_list);
1143         TAILQ_INIT(&vsi->vlan_list);
1144
1145         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1146         /* base_queue in used in queue mapping of VSI add/update command.
1147          * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1148          * cases in the first stage. Only Main VSI.
1149          */
1150         vsi->base_queue = 0;
1151         switch (type) {
1152         case ICE_VSI_PF:
1153                 vsi->nb_qps = pf->lan_nb_qps;
1154                 ice_vsi_config_default_rss(&vsi_ctx.info);
1155                 vsi_ctx.alloc_from_pool = true;
1156                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1157                 /* switch_id is queried by get_switch_config aq, which is done
1158                  * by ice_init_hw
1159                  */
1160                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1161                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1162                 /* Allow all untagged or tagged packets */
1163                 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1164                 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1165                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1166                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1167                 /* Enable VLAN/UP trip */
1168                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1169                                                       &vsi_ctx.info,
1170                                                       ICE_DEFAULT_TCMAP);
1171                 if (ret) {
1172                         PMD_INIT_LOG(ERR,
1173                                      "tc queue mapping with vsi failed, "
1174                                      "err = %d",
1175                                      ret);
1176                         goto fail_mem;
1177                 }
1178
1179                 break;
1180         default:
1181                 /* for other types of VSI */
1182                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1183                 goto fail_mem;
1184         }
1185
1186         /* VF has MSIX interrupt in VF range, don't allocate here */
1187         if (type == ICE_VSI_PF) {
1188                 ret = ice_res_pool_alloc(&pf->msix_pool,
1189                                          RTE_MIN(vsi->nb_qps,
1190                                                  RTE_MAX_RXTX_INTR_VEC_ID));
1191                 if (ret < 0) {
1192                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1193                                      vsi->vsi_id, ret);
1194                 }
1195                 vsi->msix_intr = ret;
1196                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1197         } else {
1198                 vsi->msix_intr = 0;
1199                 vsi->nb_msix = 0;
1200         }
1201         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1202         if (ret != ICE_SUCCESS) {
1203                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1204                 goto fail_mem;
1205         }
1206         /* store vsi information is SW structure */
1207         vsi->vsi_id = vsi_ctx.vsi_num;
1208         vsi->info = vsi_ctx.info;
1209         pf->vsis_allocated = vsi_ctx.vsis_allocd;
1210         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1211
1212         /* MAC configuration */
1213         rte_memcpy(pf->dev_addr.addr_bytes,
1214                    hw->port_info->mac.perm_addr,
1215                    ETH_ADDR_LEN);
1216
1217         rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
1218         ret = ice_add_mac_filter(vsi, &mac_addr);
1219         if (ret != ICE_SUCCESS)
1220                 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1221
1222         rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
1223         ret = ice_add_mac_filter(vsi, &mac_addr);
1224         if (ret != ICE_SUCCESS)
1225                 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1226
1227         /* At the beginning, only TC0. */
1228         /* What we need here is the maximam number of the TX queues.
1229          * Currently vsi->nb_qps means it.
1230          * Correct it if any change.
1231          */
1232         max_txqs[0] = vsi->nb_qps;
1233         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1234                               tc_bitmap, max_txqs);
1235         if (ret != ICE_SUCCESS)
1236                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1237
1238         return vsi;
1239 fail_mem:
1240         rte_free(vsi);
1241         pf->next_vsi_idx--;
1242         return NULL;
1243 }
1244
1245 static int
1246 ice_pf_setup(struct ice_pf *pf)
1247 {
1248         struct ice_vsi *vsi;
1249
1250         /* Clear all stats counters */
1251         pf->offset_loaded = FALSE;
1252         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1253         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1254         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1255         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1256
1257         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1258         if (!vsi) {
1259                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1260                 return -EINVAL;
1261         }
1262
1263         pf->main_vsi = vsi;
1264
1265         return 0;
1266 }
1267
1268 static int ice_load_pkg(struct rte_eth_dev *dev)
1269 {
1270         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1271         const char *pkg_file = ICE_DFLT_PKG_FILE;
1272         int err;
1273         uint8_t *buf;
1274         int buf_len;
1275         FILE *file;
1276         struct stat fstat;
1277
1278         file = fopen(pkg_file, "rb");
1279         if (!file)  {
1280                 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1281                 return -1;
1282         }
1283
1284         err = stat(pkg_file, &fstat);
1285         if (err) {
1286                 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1287                 fclose(file);
1288                 return err;
1289         }
1290
1291         buf_len = fstat.st_size;
1292         buf = rte_malloc(NULL, buf_len, 0);
1293
1294         if (!buf) {
1295                 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1296                                 buf_len);
1297                 fclose(file);
1298                 return -1;
1299         }
1300
1301         err = fread(buf, buf_len, 1, file);
1302         if (err != 1) {
1303                 PMD_INIT_LOG(ERR, "failed to read package data\n");
1304                 fclose(file);
1305                 err = -1;
1306                 goto fail_exit;
1307         }
1308
1309         fclose(file);
1310
1311         err = ice_copy_and_init_pkg(hw, buf, buf_len);
1312         if (err) {
1313                 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1314                 goto fail_exit;
1315         }
1316         err = ice_init_hw_tbls(hw);
1317         if (err) {
1318                 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1319                 goto fail_init_tbls;
1320         }
1321
1322         return 0;
1323
1324 fail_init_tbls:
1325         rte_free(hw->pkg_copy);
1326 fail_exit:
1327         rte_free(buf);
1328         return err;
1329 }
1330
1331 static int
1332 ice_dev_init(struct rte_eth_dev *dev)
1333 {
1334         struct rte_pci_device *pci_dev;
1335         struct rte_intr_handle *intr_handle;
1336         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1338         struct ice_adapter *ad =
1339                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1340         struct ice_vsi *vsi;
1341         int ret;
1342
1343         dev->dev_ops = &ice_eth_dev_ops;
1344         dev->rx_pkt_burst = ice_recv_pkts;
1345         dev->tx_pkt_burst = ice_xmit_pkts;
1346         dev->tx_pkt_prepare = ice_prep_pkts;
1347
1348         ice_set_default_ptype_table(dev);
1349         pci_dev = RTE_DEV_TO_PCI(dev->device);
1350         intr_handle = &pci_dev->intr_handle;
1351
1352         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1353         pf->adapter->eth_dev = dev;
1354         pf->dev_data = dev->data;
1355         hw->back = pf->adapter;
1356         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1357         hw->vendor_id = pci_dev->id.vendor_id;
1358         hw->device_id = pci_dev->id.device_id;
1359         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1360         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1361         hw->bus.device = pci_dev->addr.devid;
1362         hw->bus.func = pci_dev->addr.function;
1363
1364         ice_init_controlq_parameter(hw);
1365
1366         ret = ice_init_hw(hw);
1367         if (ret) {
1368                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1369                 return -EINVAL;
1370         }
1371
1372         ret = ice_load_pkg(dev);
1373         if (ret) {
1374                 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
1375                                 "Entering Safe Mode");
1376                 ad->is_safe_mode = 1;
1377         }
1378
1379         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1380                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1381                      hw->api_maj_ver, hw->api_min_ver);
1382
1383         ice_pf_sw_init(dev);
1384         ret = ice_init_mac_address(dev);
1385         if (ret) {
1386                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1387                 goto err_init_mac;
1388         }
1389
1390         ret = ice_res_pool_init(&pf->msix_pool, 1,
1391                                 hw->func_caps.common_cap.num_msix_vectors - 1);
1392         if (ret) {
1393                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1394                 goto err_msix_pool_init;
1395         }
1396
1397         ret = ice_pf_setup(pf);
1398         if (ret) {
1399                 PMD_INIT_LOG(ERR, "Failed to setup PF");
1400                 goto err_pf_setup;
1401         }
1402
1403         vsi = pf->main_vsi;
1404
1405         /* Disable double vlan by default */
1406         ice_vsi_config_double_vlan(vsi, FALSE);
1407
1408         /* register callback func to eal lib */
1409         rte_intr_callback_register(intr_handle,
1410                                    ice_interrupt_handler, dev);
1411
1412         ice_pf_enable_irq0(hw);
1413
1414         /* enable uio intr after callback register */
1415         rte_intr_enable(intr_handle);
1416
1417         return 0;
1418
1419 err_pf_setup:
1420         ice_res_pool_destroy(&pf->msix_pool);
1421 err_msix_pool_init:
1422         rte_free(dev->data->mac_addrs);
1423 err_init_mac:
1424         ice_sched_cleanup_all(hw);
1425         rte_free(hw->port_info);
1426         ice_shutdown_all_ctrlq(hw);
1427
1428         return ret;
1429 }
1430
1431 static int
1432 ice_release_vsi(struct ice_vsi *vsi)
1433 {
1434         struct ice_hw *hw;
1435         struct ice_vsi_ctx vsi_ctx;
1436         enum ice_status ret;
1437
1438         if (!vsi)
1439                 return 0;
1440
1441         hw = ICE_VSI_TO_HW(vsi);
1442
1443         ice_remove_all_mac_vlan_filters(vsi);
1444
1445         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1446
1447         vsi_ctx.vsi_num = vsi->vsi_id;
1448         vsi_ctx.info = vsi->info;
1449         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1450         if (ret != ICE_SUCCESS) {
1451                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1452                 rte_free(vsi);
1453                 return -1;
1454         }
1455
1456         rte_free(vsi);
1457         return 0;
1458 }
1459
1460 static void
1461 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
1462 {
1463         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1464         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1465         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1466         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1467         uint16_t msix_intr, i;
1468
1469         /* disable interrupt and also clear all the exist config */
1470         for (i = 0; i < vsi->nb_qps; i++) {
1471                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1472                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1473                 rte_wmb();
1474         }
1475
1476         if (rte_intr_allow_others(intr_handle))
1477                 /* vfio-pci */
1478                 for (i = 0; i < vsi->nb_msix; i++) {
1479                         msix_intr = vsi->msix_intr + i;
1480                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1481                                       GLINT_DYN_CTL_WB_ON_ITR_M);
1482                 }
1483         else
1484                 /* igb_uio */
1485                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1486 }
1487
1488 static void
1489 ice_dev_stop(struct rte_eth_dev *dev)
1490 {
1491         struct rte_eth_dev_data *data = dev->data;
1492         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1493         struct ice_vsi *main_vsi = pf->main_vsi;
1494         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1495         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1496         uint16_t i;
1497
1498         /* avoid stopping again */
1499         if (pf->adapter_stopped)
1500                 return;
1501
1502         /* stop and clear all Rx queues */
1503         for (i = 0; i < data->nb_rx_queues; i++)
1504                 ice_rx_queue_stop(dev, i);
1505
1506         /* stop and clear all Tx queues */
1507         for (i = 0; i < data->nb_tx_queues; i++)
1508                 ice_tx_queue_stop(dev, i);
1509
1510         /* disable all queue interrupts */
1511         ice_vsi_disable_queues_intr(main_vsi);
1512
1513         /* Clear all queues and release mbufs */
1514         ice_clear_queues(dev);
1515
1516         /* Clean datapath event and queue/vec mapping */
1517         rte_intr_efd_disable(intr_handle);
1518         if (intr_handle->intr_vec) {
1519                 rte_free(intr_handle->intr_vec);
1520                 intr_handle->intr_vec = NULL;
1521         }
1522
1523         pf->adapter_stopped = true;
1524 }
1525
1526 static void
1527 ice_dev_close(struct rte_eth_dev *dev)
1528 {
1529         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1530         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1531
1532         ice_dev_stop(dev);
1533
1534         /* release all queue resource */
1535         ice_free_queues(dev);
1536
1537         ice_res_pool_destroy(&pf->msix_pool);
1538         ice_release_vsi(pf->main_vsi);
1539         ice_sched_cleanup_all(hw);
1540         rte_free(hw->port_info);
1541         ice_shutdown_all_ctrlq(hw);
1542 }
1543
1544 static int
1545 ice_dev_uninit(struct rte_eth_dev *dev)
1546 {
1547         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1548         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1549
1550         ice_dev_close(dev);
1551
1552         dev->dev_ops = NULL;
1553         dev->rx_pkt_burst = NULL;
1554         dev->tx_pkt_burst = NULL;
1555
1556         rte_free(dev->data->mac_addrs);
1557         dev->data->mac_addrs = NULL;
1558
1559         /* disable uio intr before callback unregister */
1560         rte_intr_disable(intr_handle);
1561
1562         /* register callback func to eal lib */
1563         rte_intr_callback_unregister(intr_handle,
1564                                      ice_interrupt_handler, dev);
1565
1566         return 0;
1567 }
1568
1569 static int
1570 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1571 {
1572         struct ice_adapter *ad =
1573                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1574
1575         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1576          * bulk allocation or vector Rx preconditions we will reset it.
1577          */
1578         ad->rx_bulk_alloc_allowed = true;
1579         ad->tx_simple_allowed = true;
1580
1581         return 0;
1582 }
1583
1584 static int ice_init_rss(struct ice_pf *pf)
1585 {
1586         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1587         struct ice_vsi *vsi = pf->main_vsi;
1588         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1589         struct rte_eth_rss_conf *rss_conf;
1590         struct ice_aqc_get_set_rss_keys key;
1591         uint16_t i, nb_q;
1592         int ret = 0;
1593         bool is_safe_mode = pf->adapter->is_safe_mode;
1594
1595         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1596         nb_q = dev->data->nb_rx_queues;
1597         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1598         vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
1599
1600         if (is_safe_mode) {
1601                 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
1602                 return 0;
1603         }
1604
1605         if (!vsi->rss_key)
1606                 vsi->rss_key = rte_zmalloc(NULL,
1607                                            vsi->rss_key_size, 0);
1608         if (!vsi->rss_lut)
1609                 vsi->rss_lut = rte_zmalloc(NULL,
1610                                            vsi->rss_lut_size, 0);
1611
1612         /* configure RSS key */
1613         if (!rss_conf->rss_key) {
1614                 /* Calculate the default hash key */
1615                 for (i = 0; i <= vsi->rss_key_size; i++)
1616                         vsi->rss_key[i] = (uint8_t)rte_rand();
1617         } else {
1618                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1619                            RTE_MIN(rss_conf->rss_key_len,
1620                                    vsi->rss_key_size));
1621         }
1622         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1623         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1624         if (ret)
1625                 return -EINVAL;
1626
1627         /* init RSS LUT table */
1628         for (i = 0; i < vsi->rss_lut_size; i++)
1629                 vsi->rss_lut[i] = i % nb_q;
1630
1631         ret = ice_aq_set_rss_lut(hw, vsi->idx,
1632                                  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1633                                  vsi->rss_lut, vsi->rss_lut_size);
1634         if (ret)
1635                 return -EINVAL;
1636
1637         return 0;
1638 }
1639
1640 static void
1641 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
1642                        int base_queue, int nb_queue)
1643 {
1644         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1645         uint32_t val, val_tx;
1646         int i;
1647
1648         for (i = 0; i < nb_queue; i++) {
1649                 /*do actual bind*/
1650                 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
1651                       (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
1652                 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
1653                          (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
1654
1655                 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
1656                             base_queue + i, msix_vect);
1657                 /* set ITR0 value */
1658                 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
1659                 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
1660                 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
1661         }
1662 }
1663
1664 static void
1665 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
1666 {
1667         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1668         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1669         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1670         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1671         uint16_t msix_vect = vsi->msix_intr;
1672         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1673         uint16_t queue_idx = 0;
1674         int record = 0;
1675         int i;
1676
1677         /* clear Rx/Tx queue interrupt */
1678         for (i = 0; i < vsi->nb_used_qps; i++) {
1679                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1680                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1681         }
1682
1683         /* PF bind interrupt */
1684         if (rte_intr_dp_is_en(intr_handle)) {
1685                 queue_idx = 0;
1686                 record = 1;
1687         }
1688
1689         for (i = 0; i < vsi->nb_used_qps; i++) {
1690                 if (nb_msix <= 1) {
1691                         if (!rte_intr_allow_others(intr_handle))
1692                                 msix_vect = ICE_MISC_VEC_ID;
1693
1694                         /* uio mapping all queue to one msix_vect */
1695                         __vsi_queues_bind_intr(vsi, msix_vect,
1696                                                vsi->base_queue + i,
1697                                                vsi->nb_used_qps - i);
1698
1699                         for (; !!record && i < vsi->nb_used_qps; i++)
1700                                 intr_handle->intr_vec[queue_idx + i] =
1701                                         msix_vect;
1702                         break;
1703                 }
1704
1705                 /* vfio 1:1 queue/msix_vect mapping */
1706                 __vsi_queues_bind_intr(vsi, msix_vect,
1707                                        vsi->base_queue + i, 1);
1708
1709                 if (!!record)
1710                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1711
1712                 msix_vect++;
1713                 nb_msix--;
1714         }
1715 }
1716
1717 static void
1718 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
1719 {
1720         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1721         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1722         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1723         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1724         uint16_t msix_intr, i;
1725
1726         if (rte_intr_allow_others(intr_handle))
1727                 for (i = 0; i < vsi->nb_used_qps; i++) {
1728                         msix_intr = vsi->msix_intr + i;
1729                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1730                                       GLINT_DYN_CTL_INTENA_M |
1731                                       GLINT_DYN_CTL_CLEARPBA_M |
1732                                       GLINT_DYN_CTL_ITR_INDX_M |
1733                                       GLINT_DYN_CTL_WB_ON_ITR_M);
1734                 }
1735         else
1736                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1737                               GLINT_DYN_CTL_INTENA_M |
1738                               GLINT_DYN_CTL_CLEARPBA_M |
1739                               GLINT_DYN_CTL_ITR_INDX_M |
1740                               GLINT_DYN_CTL_WB_ON_ITR_M);
1741 }
1742
1743 static int
1744 ice_rxq_intr_setup(struct rte_eth_dev *dev)
1745 {
1746         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1747         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1748         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1749         struct ice_vsi *vsi = pf->main_vsi;
1750         uint32_t intr_vector = 0;
1751
1752         rte_intr_disable(intr_handle);
1753
1754         /* check and configure queue intr-vector mapping */
1755         if ((rte_intr_cap_multiple(intr_handle) ||
1756              !RTE_ETH_DEV_SRIOV(dev).active) &&
1757             dev->data->dev_conf.intr_conf.rxq != 0) {
1758                 intr_vector = dev->data->nb_rx_queues;
1759                 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
1760                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
1761                                     ICE_MAX_INTR_QUEUE_NUM);
1762                         return -ENOTSUP;
1763                 }
1764                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1765                         return -1;
1766         }
1767
1768         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1769                 intr_handle->intr_vec =
1770                 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
1771                             0);
1772                 if (!intr_handle->intr_vec) {
1773                         PMD_DRV_LOG(ERR,
1774                                     "Failed to allocate %d rx_queues intr_vec",
1775                                     dev->data->nb_rx_queues);
1776                         return -ENOMEM;
1777                 }
1778         }
1779
1780         /* Map queues with MSIX interrupt */
1781         vsi->nb_used_qps = dev->data->nb_rx_queues;
1782         ice_vsi_queues_bind_intr(vsi);
1783
1784         /* Enable interrupts for all the queues */
1785         ice_vsi_enable_queues_intr(vsi);
1786
1787         rte_intr_enable(intr_handle);
1788
1789         return 0;
1790 }
1791
1792 static int
1793 ice_dev_start(struct rte_eth_dev *dev)
1794 {
1795         struct rte_eth_dev_data *data = dev->data;
1796         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1797         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1798         struct ice_vsi *vsi = pf->main_vsi;
1799         uint16_t nb_rxq = 0;
1800         uint16_t nb_txq, i;
1801         int mask, ret;
1802
1803         /* program Tx queues' context in hardware */
1804         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1805                 ret = ice_tx_queue_start(dev, nb_txq);
1806                 if (ret) {
1807                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1808                         goto tx_err;
1809                 }
1810         }
1811
1812         /* program Rx queues' context in hardware*/
1813         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1814                 ret = ice_rx_queue_start(dev, nb_rxq);
1815                 if (ret) {
1816                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1817                         goto rx_err;
1818                 }
1819         }
1820
1821         ret = ice_init_rss(pf);
1822         if (ret) {
1823                 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1824                 goto rx_err;
1825         }
1826
1827         ice_set_rx_function(dev);
1828         ice_set_tx_function(dev);
1829
1830         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1831                         ETH_VLAN_EXTEND_MASK;
1832         ret = ice_vlan_offload_set(dev, mask);
1833         if (ret) {
1834                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1835                 goto rx_err;
1836         }
1837
1838         /* enable Rx interrput and mapping Rx queue to interrupt vector */
1839         if (ice_rxq_intr_setup(dev))
1840                 return -EIO;
1841
1842         /* Enable receiving broadcast packets and transmitting packets */
1843         ret = ice_set_vsi_promisc(hw, vsi->idx,
1844                                   ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
1845                                   ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
1846                                   0);
1847         if (ret != ICE_SUCCESS)
1848                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1849
1850         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1851                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1852                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1853                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1854                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1855                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
1856                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1857                                      NULL);
1858         if (ret != ICE_SUCCESS)
1859                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1860
1861         /* Call get_link_info aq commond to enable/disable LSE */
1862         ice_link_update(dev, 0);
1863
1864         pf->adapter_stopped = false;
1865
1866         return 0;
1867
1868         /* stop the started queues if failed to start all queues */
1869 rx_err:
1870         for (i = 0; i < nb_rxq; i++)
1871                 ice_rx_queue_stop(dev, i);
1872 tx_err:
1873         for (i = 0; i < nb_txq; i++)
1874                 ice_tx_queue_stop(dev, i);
1875
1876         return -EIO;
1877 }
1878
1879 static int
1880 ice_dev_reset(struct rte_eth_dev *dev)
1881 {
1882         int ret;
1883
1884         if (dev->data->sriov.active)
1885                 return -ENOTSUP;
1886
1887         ret = ice_dev_uninit(dev);
1888         if (ret) {
1889                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1890                 return -ENXIO;
1891         }
1892
1893         ret = ice_dev_init(dev);
1894         if (ret) {
1895                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1896                 return -ENXIO;
1897         }
1898
1899         return 0;
1900 }
1901
1902 static void
1903 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1904 {
1905         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1906         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1907         struct ice_vsi *vsi = pf->main_vsi;
1908         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1909         bool is_safe_mode = pf->adapter->is_safe_mode;
1910
1911         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1912         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1913         dev_info->max_rx_queues = vsi->nb_qps;
1914         dev_info->max_tx_queues = vsi->nb_qps;
1915         dev_info->max_mac_addrs = vsi->max_macaddrs;
1916         dev_info->max_vfs = pci_dev->max_vfs;
1917
1918         dev_info->rx_offload_capa =
1919                 DEV_RX_OFFLOAD_VLAN_STRIP |
1920                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1921                 DEV_RX_OFFLOAD_KEEP_CRC |
1922                 DEV_RX_OFFLOAD_SCATTER |
1923                 DEV_RX_OFFLOAD_VLAN_FILTER;
1924         dev_info->tx_offload_capa =
1925                 DEV_TX_OFFLOAD_VLAN_INSERT |
1926                 DEV_TX_OFFLOAD_TCP_TSO |
1927                 DEV_TX_OFFLOAD_MULTI_SEGS |
1928                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1929         dev_info->flow_type_rss_offloads = 0;
1930
1931         if (!is_safe_mode) {
1932                 dev_info->rx_offload_capa |=
1933                         DEV_RX_OFFLOAD_IPV4_CKSUM |
1934                         DEV_RX_OFFLOAD_UDP_CKSUM |
1935                         DEV_RX_OFFLOAD_TCP_CKSUM |
1936                         DEV_RX_OFFLOAD_QINQ_STRIP |
1937                         DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1938                         DEV_RX_OFFLOAD_VLAN_EXTEND;
1939                 dev_info->tx_offload_capa |=
1940                         DEV_TX_OFFLOAD_QINQ_INSERT |
1941                         DEV_TX_OFFLOAD_IPV4_CKSUM |
1942                         DEV_TX_OFFLOAD_UDP_CKSUM |
1943                         DEV_TX_OFFLOAD_TCP_CKSUM |
1944                         DEV_TX_OFFLOAD_SCTP_CKSUM |
1945                         DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
1946                 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
1947         }
1948
1949         dev_info->rx_queue_offload_capa = 0;
1950         dev_info->tx_queue_offload_capa = 0;
1951
1952         dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1953         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1954
1955         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1956                 .rx_thresh = {
1957                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
1958                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
1959                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
1960                 },
1961                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1962                 .rx_drop_en = 0,
1963                 .offloads = 0,
1964         };
1965
1966         dev_info->default_txconf = (struct rte_eth_txconf) {
1967                 .tx_thresh = {
1968                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
1969                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
1970                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
1971                 },
1972                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1973                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1974                 .offloads = 0,
1975         };
1976
1977         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1978                 .nb_max = ICE_MAX_RING_DESC,
1979                 .nb_min = ICE_MIN_RING_DESC,
1980                 .nb_align = ICE_ALIGN_RING_DESC,
1981         };
1982
1983         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1984                 .nb_max = ICE_MAX_RING_DESC,
1985                 .nb_min = ICE_MIN_RING_DESC,
1986                 .nb_align = ICE_ALIGN_RING_DESC,
1987         };
1988
1989         dev_info->speed_capa = ETH_LINK_SPEED_10M |
1990                                ETH_LINK_SPEED_100M |
1991                                ETH_LINK_SPEED_1G |
1992                                ETH_LINK_SPEED_2_5G |
1993                                ETH_LINK_SPEED_5G |
1994                                ETH_LINK_SPEED_10G |
1995                                ETH_LINK_SPEED_20G |
1996                                ETH_LINK_SPEED_25G |
1997                                ETH_LINK_SPEED_40G |
1998                                ETH_LINK_SPEED_50G |
1999                                ETH_LINK_SPEED_100G;
2000
2001         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2002         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2003
2004         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
2005         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
2006         dev_info->default_rxportconf.nb_queues = 1;
2007         dev_info->default_txportconf.nb_queues = 1;
2008         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
2009         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
2010 }
2011
2012 static inline int
2013 ice_atomic_read_link_status(struct rte_eth_dev *dev,
2014                             struct rte_eth_link *link)
2015 {
2016         struct rte_eth_link *dst = link;
2017         struct rte_eth_link *src = &dev->data->dev_link;
2018
2019         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2020                                 *(uint64_t *)src) == 0)
2021                 return -1;
2022
2023         return 0;
2024 }
2025
2026 static inline int
2027 ice_atomic_write_link_status(struct rte_eth_dev *dev,
2028                              struct rte_eth_link *link)
2029 {
2030         struct rte_eth_link *dst = &dev->data->dev_link;
2031         struct rte_eth_link *src = link;
2032
2033         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2034                                 *(uint64_t *)src) == 0)
2035                 return -1;
2036
2037         return 0;
2038 }
2039
2040 static int
2041 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
2042 {
2043 #define CHECK_INTERVAL 100  /* 100ms */
2044 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2045         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2046         struct ice_link_status link_status;
2047         struct rte_eth_link link, old;
2048         int status;
2049         unsigned int rep_cnt = MAX_REPEAT_TIME;
2050         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2051
2052         memset(&link, 0, sizeof(link));
2053         memset(&old, 0, sizeof(old));
2054         memset(&link_status, 0, sizeof(link_status));
2055         ice_atomic_read_link_status(dev, &old);
2056
2057         do {
2058                 /* Get link status information from hardware */
2059                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
2060                                               &link_status, NULL);
2061                 if (status != ICE_SUCCESS) {
2062                         link.link_speed = ETH_SPEED_NUM_100M;
2063                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2064                         PMD_DRV_LOG(ERR, "Failed to get link info");
2065                         goto out;
2066                 }
2067
2068                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
2069                 if (!wait_to_complete || link.link_status)
2070                         break;
2071
2072                 rte_delay_ms(CHECK_INTERVAL);
2073         } while (--rep_cnt);
2074
2075         if (!link.link_status)
2076                 goto out;
2077
2078         /* Full-duplex operation at all supported speeds */
2079         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2080
2081         /* Parse the link status */
2082         switch (link_status.link_speed) {
2083         case ICE_AQ_LINK_SPEED_10MB:
2084                 link.link_speed = ETH_SPEED_NUM_10M;
2085                 break;
2086         case ICE_AQ_LINK_SPEED_100MB:
2087                 link.link_speed = ETH_SPEED_NUM_100M;
2088                 break;
2089         case ICE_AQ_LINK_SPEED_1000MB:
2090                 link.link_speed = ETH_SPEED_NUM_1G;
2091                 break;
2092         case ICE_AQ_LINK_SPEED_2500MB:
2093                 link.link_speed = ETH_SPEED_NUM_2_5G;
2094                 break;
2095         case ICE_AQ_LINK_SPEED_5GB:
2096                 link.link_speed = ETH_SPEED_NUM_5G;
2097                 break;
2098         case ICE_AQ_LINK_SPEED_10GB:
2099                 link.link_speed = ETH_SPEED_NUM_10G;
2100                 break;
2101         case ICE_AQ_LINK_SPEED_20GB:
2102                 link.link_speed = ETH_SPEED_NUM_20G;
2103                 break;
2104         case ICE_AQ_LINK_SPEED_25GB:
2105                 link.link_speed = ETH_SPEED_NUM_25G;
2106                 break;
2107         case ICE_AQ_LINK_SPEED_40GB:
2108                 link.link_speed = ETH_SPEED_NUM_40G;
2109                 break;
2110         case ICE_AQ_LINK_SPEED_50GB:
2111                 link.link_speed = ETH_SPEED_NUM_50G;
2112                 break;
2113         case ICE_AQ_LINK_SPEED_100GB:
2114                 link.link_speed = ETH_SPEED_NUM_100G;
2115                 break;
2116         case ICE_AQ_LINK_SPEED_UNKNOWN:
2117         default:
2118                 PMD_DRV_LOG(ERR, "Unknown link speed");
2119                 link.link_speed = ETH_SPEED_NUM_NONE;
2120                 break;
2121         }
2122
2123         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2124                               ETH_LINK_SPEED_FIXED);
2125
2126 out:
2127         ice_atomic_write_link_status(dev, &link);
2128         if (link.link_status == old.link_status)
2129                 return -1;
2130
2131         return 0;
2132 }
2133
2134 static int
2135 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2136 {
2137         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2138         struct rte_eth_dev_data *dev_data = pf->dev_data;
2139         uint32_t frame_size = mtu + ETHER_HDR_LEN
2140                               + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE;
2141
2142         /* check if mtu is within the allowed range */
2143         if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
2144                 return -EINVAL;
2145
2146         /* mtu setting is forbidden if port is start */
2147         if (dev_data->dev_started) {
2148                 PMD_DRV_LOG(ERR,
2149                             "port %d must be stopped before configuration",
2150                             dev_data->port_id);
2151                 return -EBUSY;
2152         }
2153
2154         if (frame_size > ETHER_MAX_LEN)
2155                 dev_data->dev_conf.rxmode.offloads |=
2156                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2157         else
2158                 dev_data->dev_conf.rxmode.offloads &=
2159                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2160
2161         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2162
2163         return 0;
2164 }
2165
2166 static int ice_macaddr_set(struct rte_eth_dev *dev,
2167                            struct ether_addr *mac_addr)
2168 {
2169         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2170         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2171         struct ice_vsi *vsi = pf->main_vsi;
2172         struct ice_mac_filter *f;
2173         uint8_t flags = 0;
2174         int ret;
2175
2176         if (!is_valid_assigned_ether_addr(mac_addr)) {
2177                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2178                 return -EINVAL;
2179         }
2180
2181         TAILQ_FOREACH(f, &vsi->mac_list, next) {
2182                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
2183                         break;
2184         }
2185
2186         if (!f) {
2187                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
2188                 return -EIO;
2189         }
2190
2191         ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
2192         if (ret != ICE_SUCCESS) {
2193                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
2194                 return -EIO;
2195         }
2196         ret = ice_add_mac_filter(vsi, mac_addr);
2197         if (ret != ICE_SUCCESS) {
2198                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
2199                 return -EIO;
2200         }
2201         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
2202
2203         flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2204         ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
2205         if (ret != ICE_SUCCESS)
2206                 PMD_DRV_LOG(ERR, "Failed to set manage mac");
2207
2208         return 0;
2209 }
2210
2211 /* Add a MAC address, and update filters */
2212 static int
2213 ice_macaddr_add(struct rte_eth_dev *dev,
2214                 struct ether_addr *mac_addr,
2215                 __rte_unused uint32_t index,
2216                 __rte_unused uint32_t pool)
2217 {
2218         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2219         struct ice_vsi *vsi = pf->main_vsi;
2220         int ret;
2221
2222         ret = ice_add_mac_filter(vsi, mac_addr);
2223         if (ret != ICE_SUCCESS) {
2224                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
2225                 return -EINVAL;
2226         }
2227
2228         return ICE_SUCCESS;
2229 }
2230
2231 /* Remove a MAC address, and update filters */
2232 static void
2233 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2234 {
2235         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2236         struct ice_vsi *vsi = pf->main_vsi;
2237         struct rte_eth_dev_data *data = dev->data;
2238         struct ether_addr *macaddr;
2239         int ret;
2240
2241         macaddr = &data->mac_addrs[index];
2242         ret = ice_remove_mac_filter(vsi, macaddr);
2243         if (ret) {
2244                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
2245                 return;
2246         }
2247 }
2248
2249 static int
2250 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2251 {
2252         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2253         struct ice_vsi *vsi = pf->main_vsi;
2254         int ret;
2255
2256         PMD_INIT_FUNC_TRACE();
2257
2258         if (on) {
2259                 ret = ice_add_vlan_filter(vsi, vlan_id);
2260                 if (ret < 0) {
2261                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
2262                         return -EINVAL;
2263                 }
2264         } else {
2265                 ret = ice_remove_vlan_filter(vsi, vlan_id);
2266                 if (ret < 0) {
2267                         PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
2268                         return -EINVAL;
2269                 }
2270         }
2271
2272         return 0;
2273 }
2274
2275 /* Configure vlan filter on or off */
2276 static int
2277 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
2278 {
2279         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2280         struct ice_vsi_ctx ctxt;
2281         uint8_t sec_flags, sw_flags2;
2282         int ret = 0;
2283
2284         sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2285                     ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2286         sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2287
2288         if (on) {
2289                 vsi->info.sec_flags |= sec_flags;
2290                 vsi->info.sw_flags2 |= sw_flags2;
2291         } else {
2292                 vsi->info.sec_flags &= ~sec_flags;
2293                 vsi->info.sw_flags2 &= ~sw_flags2;
2294         }
2295         vsi->info.sw_id = hw->port_info->sw_id;
2296         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2297         ctxt.info.valid_sections =
2298                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2299                                  ICE_AQ_VSI_PROP_SECURITY_VALID);
2300         ctxt.vsi_num = vsi->vsi_id;
2301
2302         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2303         if (ret) {
2304                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
2305                             on ? "enable" : "disable");
2306                 return -EINVAL;
2307         } else {
2308                 vsi->info.valid_sections |=
2309                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2310                                          ICE_AQ_VSI_PROP_SECURITY_VALID);
2311         }
2312
2313         /* consist with other drivers, allow untagged packet when vlan filter on */
2314         if (on)
2315                 ret = ice_add_vlan_filter(vsi, 0);
2316         else
2317                 ret = ice_remove_vlan_filter(vsi, 0);
2318
2319         return 0;
2320 }
2321
2322 static int
2323 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
2324 {
2325         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2326         struct ice_vsi_ctx ctxt;
2327         uint8_t vlan_flags;
2328         int ret = 0;
2329
2330         /* Check if it has been already on or off */
2331         if (vsi->info.valid_sections &
2332                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
2333                 if (on) {
2334                         if ((vsi->info.vlan_flags &
2335                              ICE_AQ_VSI_VLAN_EMOD_M) ==
2336                             ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
2337                                 return 0; /* already on */
2338                 } else {
2339                         if ((vsi->info.vlan_flags &
2340                              ICE_AQ_VSI_VLAN_EMOD_M) ==
2341                             ICE_AQ_VSI_VLAN_EMOD_NOTHING)
2342                                 return 0; /* already off */
2343                 }
2344         }
2345
2346         if (on)
2347                 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2348         else
2349                 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2350         vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
2351         vsi->info.vlan_flags |= vlan_flags;
2352         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2353         ctxt.info.valid_sections =
2354                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2355         ctxt.vsi_num = vsi->vsi_id;
2356         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2357         if (ret) {
2358                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2359                             on ? "enable" : "disable");
2360                 return -EINVAL;
2361         }
2362
2363         vsi->info.valid_sections |=
2364                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2365
2366         return ret;
2367 }
2368
2369 static int
2370 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2371 {
2372         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2373         struct ice_vsi *vsi = pf->main_vsi;
2374         struct rte_eth_rxmode *rxmode;
2375
2376         rxmode = &dev->data->dev_conf.rxmode;
2377         if (mask & ETH_VLAN_FILTER_MASK) {
2378                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2379                         ice_vsi_config_vlan_filter(vsi, TRUE);
2380                 else
2381                         ice_vsi_config_vlan_filter(vsi, FALSE);
2382         }
2383
2384         if (mask & ETH_VLAN_STRIP_MASK) {
2385                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2386                         ice_vsi_config_vlan_stripping(vsi, TRUE);
2387                 else
2388                         ice_vsi_config_vlan_stripping(vsi, FALSE);
2389         }
2390
2391         if (mask & ETH_VLAN_EXTEND_MASK) {
2392                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2393                         ice_vsi_config_double_vlan(vsi, TRUE);
2394                 else
2395                         ice_vsi_config_double_vlan(vsi, FALSE);
2396         }
2397
2398         return 0;
2399 }
2400
2401 static int
2402 ice_vlan_tpid_set(struct rte_eth_dev *dev,
2403                   enum rte_vlan_type vlan_type,
2404                   uint16_t tpid)
2405 {
2406         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2407         uint64_t reg_r = 0, reg_w = 0;
2408         uint16_t reg_id = 0;
2409         int ret = 0;
2410         int qinq = dev->data->dev_conf.rxmode.offloads &
2411                    DEV_RX_OFFLOAD_VLAN_EXTEND;
2412
2413         switch (vlan_type) {
2414         case ETH_VLAN_TYPE_OUTER:
2415                 if (qinq)
2416                         reg_id = 3;
2417                 else
2418                         reg_id = 5;
2419         break;
2420         case ETH_VLAN_TYPE_INNER:
2421                 if (qinq) {
2422                         reg_id = 5;
2423                 } else {
2424                         PMD_DRV_LOG(ERR,
2425                                     "Unsupported vlan type in single vlan.");
2426                         return -EINVAL;
2427                 }
2428                 break;
2429         default:
2430                 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2431                 return -EINVAL;
2432         }
2433         reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2434         PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2435                     "0x%08"PRIx64"", reg_id, reg_r);
2436
2437         reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2438         reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2439         if (reg_r == reg_w) {
2440                 PMD_DRV_LOG(DEBUG, "No need to write");
2441                 return 0;
2442         }
2443
2444         ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2445         PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2446                     "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2447
2448         return ret;
2449 }
2450
2451 static int
2452 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2453 {
2454         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2455         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2456         int ret;
2457
2458         if (!lut)
2459                 return -EINVAL;
2460
2461         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2462                 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2463                                          lut, lut_size);
2464                 if (ret) {
2465                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2466                         return -EINVAL;
2467                 }
2468         } else {
2469                 uint64_t *lut_dw = (uint64_t *)lut;
2470                 uint16_t i, lut_size_dw = lut_size / 4;
2471
2472                 for (i = 0; i < lut_size_dw; i++)
2473                         lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2474         }
2475
2476         return 0;
2477 }
2478
2479 static int
2480 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2481 {
2482         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2483         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2484         int ret;
2485
2486         if (!vsi || !lut)
2487                 return -EINVAL;
2488
2489         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2490                 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2491                                          lut, lut_size);
2492                 if (ret) {
2493                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2494                         return -EINVAL;
2495                 }
2496         } else {
2497                 uint64_t *lut_dw = (uint64_t *)lut;
2498                 uint16_t i, lut_size_dw = lut_size / 4;
2499
2500                 for (i = 0; i < lut_size_dw; i++)
2501                         ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2502
2503                 ice_flush(hw);
2504         }
2505
2506         return 0;
2507 }
2508
2509 static int
2510 ice_rss_reta_update(struct rte_eth_dev *dev,
2511                     struct rte_eth_rss_reta_entry64 *reta_conf,
2512                     uint16_t reta_size)
2513 {
2514         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2515         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2516         uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2517         uint16_t idx, shift;
2518         uint8_t *lut;
2519         int ret;
2520
2521         if (reta_size != lut_size ||
2522             reta_size > ETH_RSS_RETA_SIZE_512) {
2523                 PMD_DRV_LOG(ERR,
2524                             "The size of hash lookup table configured (%d)"
2525                             "doesn't match the number hardware can "
2526                             "supported (%d)",
2527                             reta_size, lut_size);
2528                 return -EINVAL;
2529         }
2530
2531         lut = rte_zmalloc(NULL, reta_size, 0);
2532         if (!lut) {
2533                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2534                 return -ENOMEM;
2535         }
2536         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2537         if (ret)
2538                 goto out;
2539
2540         for (i = 0; i < reta_size; i++) {
2541                 idx = i / RTE_RETA_GROUP_SIZE;
2542                 shift = i % RTE_RETA_GROUP_SIZE;
2543                 if (reta_conf[idx].mask & (1ULL << shift))
2544                         lut[i] = reta_conf[idx].reta[shift];
2545         }
2546         ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2547
2548 out:
2549         rte_free(lut);
2550
2551         return ret;
2552 }
2553
2554 static int
2555 ice_rss_reta_query(struct rte_eth_dev *dev,
2556                    struct rte_eth_rss_reta_entry64 *reta_conf,
2557                    uint16_t reta_size)
2558 {
2559         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2560         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2561         uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2562         uint16_t idx, shift;
2563         uint8_t *lut;
2564         int ret;
2565
2566         if (reta_size != lut_size ||
2567             reta_size > ETH_RSS_RETA_SIZE_512) {
2568                 PMD_DRV_LOG(ERR,
2569                             "The size of hash lookup table configured (%d)"
2570                             "doesn't match the number hardware can "
2571                             "supported (%d)",
2572                             reta_size, lut_size);
2573                 return -EINVAL;
2574         }
2575
2576         lut = rte_zmalloc(NULL, reta_size, 0);
2577         if (!lut) {
2578                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2579                 return -ENOMEM;
2580         }
2581
2582         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2583         if (ret)
2584                 goto out;
2585
2586         for (i = 0; i < reta_size; i++) {
2587                 idx = i / RTE_RETA_GROUP_SIZE;
2588                 shift = i % RTE_RETA_GROUP_SIZE;
2589                 if (reta_conf[idx].mask & (1ULL << shift))
2590                         reta_conf[idx].reta[shift] = lut[i];
2591         }
2592
2593 out:
2594         rte_free(lut);
2595
2596         return ret;
2597 }
2598
2599 static int
2600 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2601 {
2602         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2603         int ret = 0;
2604
2605         if (!key || key_len == 0) {
2606                 PMD_DRV_LOG(DEBUG, "No key to be configured");
2607                 return 0;
2608         } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2609                    sizeof(uint32_t)) {
2610                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2611                 return -EINVAL;
2612         }
2613
2614         struct ice_aqc_get_set_rss_keys *key_dw =
2615                 (struct ice_aqc_get_set_rss_keys *)key;
2616
2617         ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2618         if (ret) {
2619                 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2620                 ret = -EINVAL;
2621         }
2622
2623         return ret;
2624 }
2625
2626 static int
2627 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2628 {
2629         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2630         int ret;
2631
2632         if (!key || !key_len)
2633                 return -EINVAL;
2634
2635         ret = ice_aq_get_rss_key
2636                 (hw, vsi->idx,
2637                  (struct ice_aqc_get_set_rss_keys *)key);
2638         if (ret) {
2639                 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2640                 return -EINVAL;
2641         }
2642         *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2643
2644         return 0;
2645 }
2646
2647 static int
2648 ice_rss_hash_update(struct rte_eth_dev *dev,
2649                     struct rte_eth_rss_conf *rss_conf)
2650 {
2651         enum ice_status status = ICE_SUCCESS;
2652         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2653         struct ice_vsi *vsi = pf->main_vsi;
2654
2655         /* set hash key */
2656         status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2657         if (status)
2658                 return status;
2659
2660         /* TODO: hash enable config, ice_add_rss_cfg */
2661         return 0;
2662 }
2663
2664 static int
2665 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2666                       struct rte_eth_rss_conf *rss_conf)
2667 {
2668         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2669         struct ice_vsi *vsi = pf->main_vsi;
2670
2671         ice_get_rss_key(vsi, rss_conf->rss_key,
2672                         &rss_conf->rss_key_len);
2673
2674         /* TODO: default set to 0 as hf config is not supported now */
2675         rss_conf->rss_hf = 0;
2676         return 0;
2677 }
2678
2679 static void
2680 ice_promisc_enable(struct rte_eth_dev *dev)
2681 {
2682         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2683         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2684         struct ice_vsi *vsi = pf->main_vsi;
2685         uint8_t pmask;
2686         uint16_t status;
2687
2688         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2689                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2690
2691         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2692         if (status != ICE_SUCCESS)
2693                 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
2694 }
2695
2696 static void
2697 ice_promisc_disable(struct rte_eth_dev *dev)
2698 {
2699         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2700         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2701         struct ice_vsi *vsi = pf->main_vsi;
2702         uint16_t status;
2703         uint8_t pmask;
2704
2705         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2706                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2707
2708         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2709         if (status != ICE_SUCCESS)
2710                 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
2711 }
2712
2713 static void
2714 ice_allmulti_enable(struct rte_eth_dev *dev)
2715 {
2716         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2717         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2718         struct ice_vsi *vsi = pf->main_vsi;
2719         uint8_t pmask;
2720         uint16_t status;
2721
2722         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2723
2724         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2725         if (status != ICE_SUCCESS)
2726                 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
2727 }
2728
2729 static void
2730 ice_allmulti_disable(struct rte_eth_dev *dev)
2731 {
2732         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2733         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2734         struct ice_vsi *vsi = pf->main_vsi;
2735         uint16_t status;
2736         uint8_t pmask;
2737
2738         if (dev->data->promiscuous == 1)
2739                 return; /* must remain in all_multicast mode */
2740
2741         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2742
2743         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2744         if (status != ICE_SUCCESS)
2745                 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
2746 }
2747
2748 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
2749                                     uint16_t queue_id)
2750 {
2751         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2752         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2753         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2754         uint32_t val;
2755         uint16_t msix_intr;
2756
2757         msix_intr = intr_handle->intr_vec[queue_id];
2758
2759         val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
2760               GLINT_DYN_CTL_ITR_INDX_M;
2761         val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
2762
2763         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
2764         rte_intr_enable(&pci_dev->intr_handle);
2765
2766         return 0;
2767 }
2768
2769 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
2770                                      uint16_t queue_id)
2771 {
2772         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2773         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2774         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2775         uint16_t msix_intr;
2776
2777         msix_intr = intr_handle->intr_vec[queue_id];
2778
2779         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
2780
2781         return 0;
2782 }
2783
2784 static int
2785 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2786 {
2787         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2788         u32 full_ver;
2789         u8 ver, patch;
2790         u16 build;
2791         int ret;
2792
2793         full_ver = hw->nvm.oem_ver;
2794         ver = (u8)(full_ver >> 24);
2795         build = (u16)((full_ver >> 8) & 0xffff);
2796         patch = (u8)(full_ver & 0xff);
2797
2798         ret = snprintf(fw_version, fw_size,
2799                         "%d.%d%d 0x%08x %d.%d.%d",
2800                         ((hw->nvm.ver >> 12) & 0xf),
2801                         ((hw->nvm.ver >> 4) & 0xff),
2802                         (hw->nvm.ver & 0xf), hw->nvm.eetrack,
2803                         ver, build, patch);
2804
2805         /* add the size of '\0' */
2806         ret += 1;
2807         if (fw_size < (u32)ret)
2808                 return ret;
2809         else
2810                 return 0;
2811 }
2812
2813 static int
2814 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
2815 {
2816         struct ice_hw *hw;
2817         struct ice_vsi_ctx ctxt;
2818         uint8_t vlan_flags = 0;
2819         int ret;
2820
2821         if (!vsi || !info) {
2822                 PMD_DRV_LOG(ERR, "invalid parameters");
2823                 return -EINVAL;
2824         }
2825
2826         if (info->on) {
2827                 vsi->info.pvid = info->config.pvid;
2828                 /**
2829                  * If insert pvid is enabled, only tagged pkts are
2830                  * allowed to be sent out.
2831                  */
2832                 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
2833                              ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2834         } else {
2835                 vsi->info.pvid = 0;
2836                 if (info->config.reject.tagged == 0)
2837                         vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
2838
2839                 if (info->config.reject.untagged == 0)
2840                         vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2841         }
2842         vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
2843                                   ICE_AQ_VSI_VLAN_MODE_M);
2844         vsi->info.vlan_flags |= vlan_flags;
2845         memset(&ctxt, 0, sizeof(ctxt));
2846         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2847         ctxt.info.valid_sections =
2848                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2849         ctxt.vsi_num = vsi->vsi_id;
2850
2851         hw = ICE_VSI_TO_HW(vsi);
2852         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2853         if (ret != ICE_SUCCESS) {
2854                 PMD_DRV_LOG(ERR,
2855                             "update VSI for VLAN insert failed, err %d",
2856                             ret);
2857                 return -EINVAL;
2858         }
2859
2860         vsi->info.valid_sections |=
2861                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2862
2863         return ret;
2864 }
2865
2866 static int
2867 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2868 {
2869         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2870         struct ice_vsi *vsi = pf->main_vsi;
2871         struct rte_eth_dev_data *data = pf->dev_data;
2872         struct ice_vsi_vlan_pvid_info info;
2873         int ret;
2874
2875         memset(&info, 0, sizeof(info));
2876         info.on = on;
2877         if (info.on) {
2878                 info.config.pvid = pvid;
2879         } else {
2880                 info.config.reject.tagged =
2881                         data->dev_conf.txmode.hw_vlan_reject_tagged;
2882                 info.config.reject.untagged =
2883                         data->dev_conf.txmode.hw_vlan_reject_untagged;
2884         }
2885
2886         ret = ice_vsi_vlan_pvid_set(vsi, &info);
2887         if (ret < 0) {
2888                 PMD_DRV_LOG(ERR, "Failed to set pvid.");
2889                 return -EINVAL;
2890         }
2891
2892         return 0;
2893 }
2894
2895 static int
2896 ice_get_eeprom_length(struct rte_eth_dev *dev)
2897 {
2898         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2899
2900         /* Convert word count to byte count */
2901         return hw->nvm.sr_words << 1;
2902 }
2903
2904 static int
2905 ice_get_eeprom(struct rte_eth_dev *dev,
2906                struct rte_dev_eeprom_info *eeprom)
2907 {
2908         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2909         uint16_t *data = eeprom->data;
2910         uint16_t first_word, last_word, nwords;
2911         enum ice_status status = ICE_SUCCESS;
2912
2913         first_word = eeprom->offset >> 1;
2914         last_word = (eeprom->offset + eeprom->length - 1) >> 1;
2915         nwords = last_word - first_word + 1;
2916
2917         if (first_word > hw->nvm.sr_words ||
2918             last_word > hw->nvm.sr_words) {
2919                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
2920                 return -EINVAL;
2921         }
2922
2923         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2924
2925         status = ice_read_sr_buf(hw, first_word, &nwords, data);
2926         if (status) {
2927                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
2928                 eeprom->length = sizeof(uint16_t) * nwords;
2929                 return -EIO;
2930         }
2931
2932         return 0;
2933 }
2934
2935 static void
2936 ice_stat_update_32(struct ice_hw *hw,
2937                    uint32_t reg,
2938                    bool offset_loaded,
2939                    uint64_t *offset,
2940                    uint64_t *stat)
2941 {
2942         uint64_t new_data;
2943
2944         new_data = (uint64_t)ICE_READ_REG(hw, reg);
2945         if (!offset_loaded)
2946                 *offset = new_data;
2947
2948         if (new_data >= *offset)
2949                 *stat = (uint64_t)(new_data - *offset);
2950         else
2951                 *stat = (uint64_t)((new_data +
2952                                     ((uint64_t)1 << ICE_32_BIT_WIDTH))
2953                                    - *offset);
2954 }
2955
2956 static void
2957 ice_stat_update_40(struct ice_hw *hw,
2958                    uint32_t hireg,
2959                    uint32_t loreg,
2960                    bool offset_loaded,
2961                    uint64_t *offset,
2962                    uint64_t *stat)
2963 {
2964         uint64_t new_data;
2965
2966         new_data = (uint64_t)ICE_READ_REG(hw, loreg);
2967         new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
2968                     ICE_32_BIT_WIDTH;
2969
2970         if (!offset_loaded)
2971                 *offset = new_data;
2972
2973         if (new_data >= *offset)
2974                 *stat = new_data - *offset;
2975         else
2976                 *stat = (uint64_t)((new_data +
2977                                     ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
2978                                    *offset);
2979
2980         *stat &= ICE_40_BIT_MASK;
2981 }
2982
2983 /* Get all the statistics of a VSI */
2984 static void
2985 ice_update_vsi_stats(struct ice_vsi *vsi)
2986 {
2987         struct ice_eth_stats *oes = &vsi->eth_stats_offset;
2988         struct ice_eth_stats *nes = &vsi->eth_stats;
2989         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2990         int idx = rte_le_to_cpu_16(vsi->vsi_id);
2991
2992         ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
2993                            vsi->offset_loaded, &oes->rx_bytes,
2994                            &nes->rx_bytes);
2995         ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
2996                            vsi->offset_loaded, &oes->rx_unicast,
2997                            &nes->rx_unicast);
2998         ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
2999                            vsi->offset_loaded, &oes->rx_multicast,
3000                            &nes->rx_multicast);
3001         ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
3002                            vsi->offset_loaded, &oes->rx_broadcast,
3003                            &nes->rx_broadcast);
3004         /* exclude CRC bytes */
3005         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3006                           nes->rx_broadcast) * ETHER_CRC_LEN;
3007
3008         ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
3009                            &oes->rx_discards, &nes->rx_discards);
3010         /* GLV_REPC not supported */
3011         /* GLV_RMPC not supported */
3012         ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
3013                            &oes->rx_unknown_protocol,
3014                            &nes->rx_unknown_protocol);
3015         ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
3016                            vsi->offset_loaded, &oes->tx_bytes,
3017                            &nes->tx_bytes);
3018         ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
3019                            vsi->offset_loaded, &oes->tx_unicast,
3020                            &nes->tx_unicast);
3021         ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
3022                            vsi->offset_loaded, &oes->tx_multicast,
3023                            &nes->tx_multicast);
3024         ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
3025                            vsi->offset_loaded,  &oes->tx_broadcast,
3026                            &nes->tx_broadcast);
3027         /* GLV_TDPC not supported */
3028         ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
3029                            &oes->tx_errors, &nes->tx_errors);
3030         vsi->offset_loaded = true;
3031
3032         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
3033                     vsi->vsi_id);
3034         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3035         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3036         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3037         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3038         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3039         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3040                     nes->rx_unknown_protocol);
3041         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3042         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3043         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3044         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3045         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3046         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3047         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
3048                     vsi->vsi_id);
3049 }
3050
3051 static void
3052 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
3053 {
3054         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3055         struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
3056
3057         /* Get statistics of struct ice_eth_stats */
3058         ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
3059                            GLPRT_GORCL(hw->port_info->lport),
3060                            pf->offset_loaded, &os->eth.rx_bytes,
3061                            &ns->eth.rx_bytes);
3062         ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
3063                            GLPRT_UPRCL(hw->port_info->lport),
3064                            pf->offset_loaded, &os->eth.rx_unicast,
3065                            &ns->eth.rx_unicast);
3066         ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
3067                            GLPRT_MPRCL(hw->port_info->lport),
3068                            pf->offset_loaded, &os->eth.rx_multicast,
3069                            &ns->eth.rx_multicast);
3070         ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
3071                            GLPRT_BPRCL(hw->port_info->lport),
3072                            pf->offset_loaded, &os->eth.rx_broadcast,
3073                            &ns->eth.rx_broadcast);
3074         ice_stat_update_32(hw, PRTRPB_RDPC,
3075                            pf->offset_loaded, &os->eth.rx_discards,
3076                            &ns->eth.rx_discards);
3077
3078         /* Workaround: CRC size should not be included in byte statistics,
3079          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
3080          */
3081         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3082                              ns->eth.rx_broadcast) * ETHER_CRC_LEN;
3083
3084         /* GLPRT_REPC not supported */
3085         /* GLPRT_RMPC not supported */
3086         ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
3087                            pf->offset_loaded,
3088                            &os->eth.rx_unknown_protocol,
3089                            &ns->eth.rx_unknown_protocol);
3090         ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
3091                            GLPRT_GOTCL(hw->port_info->lport),
3092                            pf->offset_loaded, &os->eth.tx_bytes,
3093                            &ns->eth.tx_bytes);
3094         ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
3095                            GLPRT_UPTCL(hw->port_info->lport),
3096                            pf->offset_loaded, &os->eth.tx_unicast,
3097                            &ns->eth.tx_unicast);
3098         ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
3099                            GLPRT_MPTCL(hw->port_info->lport),
3100                            pf->offset_loaded, &os->eth.tx_multicast,
3101                            &ns->eth.tx_multicast);
3102         ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
3103                            GLPRT_BPTCL(hw->port_info->lport),
3104                            pf->offset_loaded, &os->eth.tx_broadcast,
3105                            &ns->eth.tx_broadcast);
3106         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3107                              ns->eth.tx_broadcast) * ETHER_CRC_LEN;
3108
3109         /* GLPRT_TEPC not supported */
3110
3111         /* additional port specific stats */
3112         ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
3113                            pf->offset_loaded, &os->tx_dropped_link_down,
3114                            &ns->tx_dropped_link_down);
3115         ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
3116                            pf->offset_loaded, &os->crc_errors,
3117                            &ns->crc_errors);
3118         ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
3119                            pf->offset_loaded, &os->illegal_bytes,
3120                            &ns->illegal_bytes);
3121         /* GLPRT_ERRBC not supported */
3122         ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
3123                            pf->offset_loaded, &os->mac_local_faults,
3124                            &ns->mac_local_faults);
3125         ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
3126                            pf->offset_loaded, &os->mac_remote_faults,
3127                            &ns->mac_remote_faults);
3128
3129         ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
3130                            pf->offset_loaded, &os->rx_len_errors,
3131                            &ns->rx_len_errors);
3132
3133         ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
3134                            pf->offset_loaded, &os->link_xon_rx,
3135                            &ns->link_xon_rx);
3136         ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
3137                            pf->offset_loaded, &os->link_xoff_rx,
3138                            &ns->link_xoff_rx);
3139         ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
3140                            pf->offset_loaded, &os->link_xon_tx,
3141                            &ns->link_xon_tx);
3142         ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
3143                            pf->offset_loaded, &os->link_xoff_tx,
3144                            &ns->link_xoff_tx);
3145         ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
3146                            GLPRT_PRC64L(hw->port_info->lport),
3147                            pf->offset_loaded, &os->rx_size_64,
3148                            &ns->rx_size_64);
3149         ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
3150                            GLPRT_PRC127L(hw->port_info->lport),
3151                            pf->offset_loaded, &os->rx_size_127,
3152                            &ns->rx_size_127);
3153         ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
3154                            GLPRT_PRC255L(hw->port_info->lport),
3155                            pf->offset_loaded, &os->rx_size_255,
3156                            &ns->rx_size_255);
3157         ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
3158                            GLPRT_PRC511L(hw->port_info->lport),
3159                            pf->offset_loaded, &os->rx_size_511,
3160                            &ns->rx_size_511);
3161         ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
3162                            GLPRT_PRC1023L(hw->port_info->lport),
3163                            pf->offset_loaded, &os->rx_size_1023,
3164                            &ns->rx_size_1023);
3165         ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
3166                            GLPRT_PRC1522L(hw->port_info->lport),
3167                            pf->offset_loaded, &os->rx_size_1522,
3168                            &ns->rx_size_1522);
3169         ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
3170                            GLPRT_PRC9522L(hw->port_info->lport),
3171                            pf->offset_loaded, &os->rx_size_big,
3172                            &ns->rx_size_big);
3173         ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
3174                            pf->offset_loaded, &os->rx_undersize,
3175                            &ns->rx_undersize);
3176         ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
3177                            pf->offset_loaded, &os->rx_fragments,
3178                            &ns->rx_fragments);
3179         ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
3180                            pf->offset_loaded, &os->rx_oversize,
3181                            &ns->rx_oversize);
3182         ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
3183                            pf->offset_loaded, &os->rx_jabber,
3184                            &ns->rx_jabber);
3185         ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
3186                            GLPRT_PTC64L(hw->port_info->lport),
3187                            pf->offset_loaded, &os->tx_size_64,
3188                            &ns->tx_size_64);
3189         ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
3190                            GLPRT_PTC127L(hw->port_info->lport),
3191                            pf->offset_loaded, &os->tx_size_127,
3192                            &ns->tx_size_127);
3193         ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
3194                            GLPRT_PTC255L(hw->port_info->lport),
3195                            pf->offset_loaded, &os->tx_size_255,
3196                            &ns->tx_size_255);
3197         ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
3198                            GLPRT_PTC511L(hw->port_info->lport),
3199                            pf->offset_loaded, &os->tx_size_511,
3200                            &ns->tx_size_511);
3201         ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
3202                            GLPRT_PTC1023L(hw->port_info->lport),
3203                            pf->offset_loaded, &os->tx_size_1023,
3204                            &ns->tx_size_1023);
3205         ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
3206                            GLPRT_PTC1522L(hw->port_info->lport),
3207                            pf->offset_loaded, &os->tx_size_1522,
3208                            &ns->tx_size_1522);
3209         ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
3210                            GLPRT_PTC9522L(hw->port_info->lport),
3211                            pf->offset_loaded, &os->tx_size_big,
3212                            &ns->tx_size_big);
3213
3214         /* GLPRT_MSPDC not supported */
3215         /* GLPRT_XEC not supported */
3216
3217         pf->offset_loaded = true;
3218
3219         if (pf->main_vsi)
3220                 ice_update_vsi_stats(pf->main_vsi);
3221 }
3222
3223 /* Get all statistics of a port */
3224 static int
3225 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3226 {
3227         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3228         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3229         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3230
3231         /* call read registers - updates values, now write them to struct */
3232         ice_read_stats_registers(pf, hw);
3233
3234         stats->ipackets = ns->eth.rx_unicast +
3235                           ns->eth.rx_multicast +
3236                           ns->eth.rx_broadcast -
3237                           ns->eth.rx_discards -
3238                           pf->main_vsi->eth_stats.rx_discards;
3239         stats->opackets = ns->eth.tx_unicast +
3240                           ns->eth.tx_multicast +
3241                           ns->eth.tx_broadcast;
3242         stats->ibytes   = ns->eth.rx_bytes;
3243         stats->obytes   = ns->eth.tx_bytes;
3244         stats->oerrors  = ns->eth.tx_errors +
3245                           pf->main_vsi->eth_stats.tx_errors;
3246
3247         /* Rx Errors */
3248         stats->imissed  = ns->eth.rx_discards +
3249                           pf->main_vsi->eth_stats.rx_discards;
3250         stats->ierrors  = ns->crc_errors +
3251                           ns->rx_undersize +
3252                           ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3253
3254         PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
3255         PMD_DRV_LOG(DEBUG, "rx_bytes:   %"PRIu64"", ns->eth.rx_bytes);
3256         PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
3257         PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
3258         PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
3259         PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
3260         PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
3261                     pf->main_vsi->eth_stats.rx_discards);
3262         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
3263                     ns->eth.rx_unknown_protocol);
3264         PMD_DRV_LOG(DEBUG, "tx_bytes:   %"PRIu64"", ns->eth.tx_bytes);
3265         PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
3266         PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
3267         PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
3268         PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
3269         PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
3270                     pf->main_vsi->eth_stats.tx_discards);
3271         PMD_DRV_LOG(DEBUG, "tx_errors:          %"PRIu64"", ns->eth.tx_errors);
3272
3273         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:       %"PRIu64"",
3274                     ns->tx_dropped_link_down);
3275         PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
3276         PMD_DRV_LOG(DEBUG, "illegal_bytes:      %"PRIu64"",
3277                     ns->illegal_bytes);
3278         PMD_DRV_LOG(DEBUG, "error_bytes:        %"PRIu64"", ns->error_bytes);
3279         PMD_DRV_LOG(DEBUG, "mac_local_faults:   %"PRIu64"",
3280                     ns->mac_local_faults);
3281         PMD_DRV_LOG(DEBUG, "mac_remote_faults:  %"PRIu64"",
3282                     ns->mac_remote_faults);
3283         PMD_DRV_LOG(DEBUG, "link_xon_rx:        %"PRIu64"", ns->link_xon_rx);
3284         PMD_DRV_LOG(DEBUG, "link_xoff_rx:       %"PRIu64"", ns->link_xoff_rx);
3285         PMD_DRV_LOG(DEBUG, "link_xon_tx:        %"PRIu64"", ns->link_xon_tx);
3286         PMD_DRV_LOG(DEBUG, "link_xoff_tx:       %"PRIu64"", ns->link_xoff_tx);
3287         PMD_DRV_LOG(DEBUG, "rx_size_64:         %"PRIu64"", ns->rx_size_64);
3288         PMD_DRV_LOG(DEBUG, "rx_size_127:        %"PRIu64"", ns->rx_size_127);
3289         PMD_DRV_LOG(DEBUG, "rx_size_255:        %"PRIu64"", ns->rx_size_255);
3290         PMD_DRV_LOG(DEBUG, "rx_size_511:        %"PRIu64"", ns->rx_size_511);
3291         PMD_DRV_LOG(DEBUG, "rx_size_1023:       %"PRIu64"", ns->rx_size_1023);
3292         PMD_DRV_LOG(DEBUG, "rx_size_1522:       %"PRIu64"", ns->rx_size_1522);
3293         PMD_DRV_LOG(DEBUG, "rx_size_big:        %"PRIu64"", ns->rx_size_big);
3294         PMD_DRV_LOG(DEBUG, "rx_undersize:       %"PRIu64"", ns->rx_undersize);
3295         PMD_DRV_LOG(DEBUG, "rx_fragments:       %"PRIu64"", ns->rx_fragments);
3296         PMD_DRV_LOG(DEBUG, "rx_oversize:        %"PRIu64"", ns->rx_oversize);
3297         PMD_DRV_LOG(DEBUG, "rx_jabber:          %"PRIu64"", ns->rx_jabber);
3298         PMD_DRV_LOG(DEBUG, "tx_size_64:         %"PRIu64"", ns->tx_size_64);
3299         PMD_DRV_LOG(DEBUG, "tx_size_127:        %"PRIu64"", ns->tx_size_127);
3300         PMD_DRV_LOG(DEBUG, "tx_size_255:        %"PRIu64"", ns->tx_size_255);
3301         PMD_DRV_LOG(DEBUG, "tx_size_511:        %"PRIu64"", ns->tx_size_511);
3302         PMD_DRV_LOG(DEBUG, "tx_size_1023:       %"PRIu64"", ns->tx_size_1023);
3303         PMD_DRV_LOG(DEBUG, "tx_size_1522:       %"PRIu64"", ns->tx_size_1522);
3304         PMD_DRV_LOG(DEBUG, "tx_size_big:        %"PRIu64"", ns->tx_size_big);
3305         PMD_DRV_LOG(DEBUG, "rx_len_errors:      %"PRIu64"", ns->rx_len_errors);
3306         PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
3307         return 0;
3308 }
3309
3310 /* Reset the statistics */
3311 static void
3312 ice_stats_reset(struct rte_eth_dev *dev)
3313 {
3314         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3315         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3316
3317         /* Mark PF and VSI stats to update the offset, aka "reset" */
3318         pf->offset_loaded = false;
3319         if (pf->main_vsi)
3320                 pf->main_vsi->offset_loaded = false;
3321
3322         /* read the stats, reading current register values into offset */
3323         ice_read_stats_registers(pf, hw);
3324 }
3325
3326 static uint32_t
3327 ice_xstats_calc_num(void)
3328 {
3329         uint32_t num;
3330
3331         num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
3332
3333         return num;
3334 }
3335
3336 static int
3337 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3338                unsigned int n)
3339 {
3340         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3341         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3342         unsigned int i;
3343         unsigned int count;
3344         struct ice_hw_port_stats *hw_stats = &pf->stats;
3345
3346         count = ice_xstats_calc_num();
3347         if (n < count)
3348                 return count;
3349
3350         ice_read_stats_registers(pf, hw);
3351
3352         if (!xstats)
3353                 return 0;
3354
3355         count = 0;
3356
3357         /* Get stats from ice_eth_stats struct */
3358         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3359                 xstats[count].value =
3360                         *(uint64_t *)((char *)&hw_stats->eth +
3361                                       ice_stats_strings[i].offset);
3362                 xstats[count].id = count;
3363                 count++;
3364         }
3365
3366         /* Get individiual stats from ice_hw_port struct */
3367         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3368                 xstats[count].value =
3369                         *(uint64_t *)((char *)hw_stats +
3370                                       ice_hw_port_strings[i].offset);
3371                 xstats[count].id = count;
3372                 count++;
3373         }
3374
3375         return count;
3376 }
3377
3378 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3379                                 struct rte_eth_xstat_name *xstats_names,
3380                                 __rte_unused unsigned int limit)
3381 {
3382         unsigned int count = 0;
3383         unsigned int i;
3384
3385         if (!xstats_names)
3386                 return ice_xstats_calc_num();
3387
3388         /* Note: limit checked in rte_eth_xstats_names() */
3389
3390         /* Get stats from ice_eth_stats struct */
3391         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3392                 snprintf(xstats_names[count].name,
3393                          sizeof(xstats_names[count].name),
3394                          "%s", ice_stats_strings[i].name);
3395                 count++;
3396         }
3397
3398         /* Get individiual stats from ice_hw_port struct */
3399         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3400                 snprintf(xstats_names[count].name,
3401                          sizeof(xstats_names[count].name),
3402                          "%s", ice_hw_port_strings[i].name);
3403                 count++;
3404         }
3405
3406         return count;
3407 }
3408
3409 static int
3410 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3411               struct rte_pci_device *pci_dev)
3412 {
3413         return rte_eth_dev_pci_generic_probe(pci_dev,
3414                                              sizeof(struct ice_adapter),
3415                                              ice_dev_init);
3416 }
3417
3418 static int
3419 ice_pci_remove(struct rte_pci_device *pci_dev)
3420 {
3421         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
3422 }
3423
3424 static struct rte_pci_driver rte_ice_pmd = {
3425         .id_table = pci_id_ice_map,
3426         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3427                      RTE_PCI_DRV_IOVA_AS_VA,
3428         .probe = ice_pci_probe,
3429         .remove = ice_pci_remove,
3430 };
3431
3432 /**
3433  * Driver initialization routine.
3434  * Invoked once at EAL init time.
3435  * Register itself as the [Poll Mode] Driver of PCI devices.
3436  */
3437 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
3438 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
3439 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
3440 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
3441                               ICE_MAX_QP_NUM "=<int>");
3442
3443 RTE_INIT(ice_init_log)
3444 {
3445         ice_logtype_init = rte_log_register("pmd.net.ice.init");
3446         if (ice_logtype_init >= 0)
3447                 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
3448         ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
3449         if (ice_logtype_driver >= 0)
3450                 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
3451 }