net/ice: load OS default package
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include <stdio.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11
12 #include "base/ice_sched.h"
13 #include "ice_ethdev.h"
14 #include "ice_rxtx.h"
15
16 #define ICE_MAX_QP_NUM "max_queue_pair_num"
17 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
18 #define ICE_DFLT_PKG_FILE "/lib/firmware/intel/ice/ddp/ice.pkg"
19
20 int ice_logtype_init;
21 int ice_logtype_driver;
22
23 static int ice_dev_configure(struct rte_eth_dev *dev);
24 static int ice_dev_start(struct rte_eth_dev *dev);
25 static void ice_dev_stop(struct rte_eth_dev *dev);
26 static void ice_dev_close(struct rte_eth_dev *dev);
27 static int ice_dev_reset(struct rte_eth_dev *dev);
28 static void ice_dev_info_get(struct rte_eth_dev *dev,
29                              struct rte_eth_dev_info *dev_info);
30 static int ice_link_update(struct rte_eth_dev *dev,
31                            int wait_to_complete);
32 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
33 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
34 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
35                              enum rte_vlan_type vlan_type,
36                              uint16_t tpid);
37 static int ice_rss_reta_update(struct rte_eth_dev *dev,
38                                struct rte_eth_rss_reta_entry64 *reta_conf,
39                                uint16_t reta_size);
40 static int ice_rss_reta_query(struct rte_eth_dev *dev,
41                               struct rte_eth_rss_reta_entry64 *reta_conf,
42                               uint16_t reta_size);
43 static int ice_rss_hash_update(struct rte_eth_dev *dev,
44                                struct rte_eth_rss_conf *rss_conf);
45 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
46                                  struct rte_eth_rss_conf *rss_conf);
47 static void ice_promisc_enable(struct rte_eth_dev *dev);
48 static void ice_promisc_disable(struct rte_eth_dev *dev);
49 static void ice_allmulti_enable(struct rte_eth_dev *dev);
50 static void ice_allmulti_disable(struct rte_eth_dev *dev);
51 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
52                                uint16_t vlan_id,
53                                int on);
54 static int ice_macaddr_set(struct rte_eth_dev *dev,
55                            struct ether_addr *mac_addr);
56 static int ice_macaddr_add(struct rte_eth_dev *dev,
57                            struct ether_addr *mac_addr,
58                            __rte_unused uint32_t index,
59                            uint32_t pool);
60 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
61 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
62                                     uint16_t queue_id);
63 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
64                                      uint16_t queue_id);
65 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
66                               size_t fw_size);
67 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
68                              uint16_t pvid, int on);
69 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
70 static int ice_get_eeprom(struct rte_eth_dev *dev,
71                           struct rte_dev_eeprom_info *eeprom);
72 static int ice_stats_get(struct rte_eth_dev *dev,
73                          struct rte_eth_stats *stats);
74 static void ice_stats_reset(struct rte_eth_dev *dev);
75 static int ice_xstats_get(struct rte_eth_dev *dev,
76                           struct rte_eth_xstat *xstats, unsigned int n);
77 static int ice_xstats_get_names(struct rte_eth_dev *dev,
78                                 struct rte_eth_xstat_name *xstats_names,
79                                 unsigned int limit);
80
81 static const struct rte_pci_id pci_id_ice_map[] = {
82         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
83         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
84         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
85         { .vendor_id = 0, /* sentinel */ },
86 };
87
88 static const struct eth_dev_ops ice_eth_dev_ops = {
89         .dev_configure                = ice_dev_configure,
90         .dev_start                    = ice_dev_start,
91         .dev_stop                     = ice_dev_stop,
92         .dev_close                    = ice_dev_close,
93         .dev_reset                    = ice_dev_reset,
94         .rx_queue_start               = ice_rx_queue_start,
95         .rx_queue_stop                = ice_rx_queue_stop,
96         .tx_queue_start               = ice_tx_queue_start,
97         .tx_queue_stop                = ice_tx_queue_stop,
98         .rx_queue_setup               = ice_rx_queue_setup,
99         .rx_queue_release             = ice_rx_queue_release,
100         .tx_queue_setup               = ice_tx_queue_setup,
101         .tx_queue_release             = ice_tx_queue_release,
102         .dev_infos_get                = ice_dev_info_get,
103         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
104         .link_update                  = ice_link_update,
105         .mtu_set                      = ice_mtu_set,
106         .mac_addr_set                 = ice_macaddr_set,
107         .mac_addr_add                 = ice_macaddr_add,
108         .mac_addr_remove              = ice_macaddr_remove,
109         .vlan_filter_set              = ice_vlan_filter_set,
110         .vlan_offload_set             = ice_vlan_offload_set,
111         .vlan_tpid_set                = ice_vlan_tpid_set,
112         .reta_update                  = ice_rss_reta_update,
113         .reta_query                   = ice_rss_reta_query,
114         .rss_hash_update              = ice_rss_hash_update,
115         .rss_hash_conf_get            = ice_rss_hash_conf_get,
116         .promiscuous_enable           = ice_promisc_enable,
117         .promiscuous_disable          = ice_promisc_disable,
118         .allmulticast_enable          = ice_allmulti_enable,
119         .allmulticast_disable         = ice_allmulti_disable,
120         .rx_queue_intr_enable         = ice_rx_queue_intr_enable,
121         .rx_queue_intr_disable        = ice_rx_queue_intr_disable,
122         .fw_version_get               = ice_fw_version_get,
123         .vlan_pvid_set                = ice_vlan_pvid_set,
124         .rxq_info_get                 = ice_rxq_info_get,
125         .txq_info_get                 = ice_txq_info_get,
126         .get_eeprom_length            = ice_get_eeprom_length,
127         .get_eeprom                   = ice_get_eeprom,
128         .rx_queue_count               = ice_rx_queue_count,
129         .rx_descriptor_status         = ice_rx_descriptor_status,
130         .tx_descriptor_status         = ice_tx_descriptor_status,
131         .stats_get                    = ice_stats_get,
132         .stats_reset                  = ice_stats_reset,
133         .xstats_get                   = ice_xstats_get,
134         .xstats_get_names             = ice_xstats_get_names,
135         .xstats_reset                 = ice_stats_reset,
136 };
137
138 /* store statistics names and its offset in stats structure */
139 struct ice_xstats_name_off {
140         char name[RTE_ETH_XSTATS_NAME_SIZE];
141         unsigned int offset;
142 };
143
144 static const struct ice_xstats_name_off ice_stats_strings[] = {
145         {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
146         {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
147         {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
148         {"rx_dropped", offsetof(struct ice_eth_stats, rx_discards)},
149         {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
150                 rx_unknown_protocol)},
151         {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
152         {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
153         {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
154         {"tx_dropped", offsetof(struct ice_eth_stats, tx_discards)},
155 };
156
157 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
158                 sizeof(ice_stats_strings[0]))
159
160 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
161         {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
162                 tx_dropped_link_down)},
163         {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
164         {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
165                 illegal_bytes)},
166         {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
167         {"mac_local_errors", offsetof(struct ice_hw_port_stats,
168                 mac_local_faults)},
169         {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
170                 mac_remote_faults)},
171         {"rx_len_errors", offsetof(struct ice_hw_port_stats,
172                 rx_len_errors)},
173         {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
174         {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
175         {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
176         {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
177         {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
178         {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
179                 rx_size_127)},
180         {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
181                 rx_size_255)},
182         {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
183                 rx_size_511)},
184         {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
185                 rx_size_1023)},
186         {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
187                 rx_size_1522)},
188         {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
189                 rx_size_big)},
190         {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
191                 rx_undersize)},
192         {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
193                 rx_oversize)},
194         {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
195                 mac_short_pkt_dropped)},
196         {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
197                 rx_fragments)},
198         {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
199         {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
200         {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
201                 tx_size_127)},
202         {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
203                 tx_size_255)},
204         {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
205                 tx_size_511)},
206         {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
207                 tx_size_1023)},
208         {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
209                 tx_size_1522)},
210         {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
211                 tx_size_big)},
212 };
213
214 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
215                 sizeof(ice_hw_port_strings[0]))
216
217 static void
218 ice_init_controlq_parameter(struct ice_hw *hw)
219 {
220         /* fields for adminq */
221         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
222         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
223         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
224         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
225
226         /* fields for mailboxq, DPDK used as PF host */
227         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
228         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
229         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
230         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
231 }
232
233 static int
234 ice_check_qp_num(const char *key, const char *qp_value,
235                  __rte_unused void *opaque)
236 {
237         char *end = NULL;
238         int num = 0;
239
240         while (isblank(*qp_value))
241                 qp_value++;
242
243         num = strtoul(qp_value, &end, 10);
244
245         if (!num || (*end == '-') || errno) {
246                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
247                             "value must be > 0",
248                             qp_value, key);
249                 return -1;
250         }
251
252         return num;
253 }
254
255 static int
256 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
257 {
258         struct rte_kvargs *kvlist;
259         const char *queue_num_key = ICE_MAX_QP_NUM;
260         int ret;
261
262         if (!devargs)
263                 return 0;
264
265         kvlist = rte_kvargs_parse(devargs->args, NULL);
266         if (!kvlist)
267                 return 0;
268
269         if (!rte_kvargs_count(kvlist, queue_num_key)) {
270                 rte_kvargs_free(kvlist);
271                 return 0;
272         }
273
274         if (rte_kvargs_process(kvlist, queue_num_key,
275                                ice_check_qp_num, NULL) < 0) {
276                 rte_kvargs_free(kvlist);
277                 return 0;
278         }
279         ret = rte_kvargs_process(kvlist, queue_num_key,
280                                  ice_check_qp_num, NULL);
281         rte_kvargs_free(kvlist);
282
283         return ret;
284 }
285
286 static int
287 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
288                   uint32_t num)
289 {
290         struct pool_entry *entry;
291
292         if (!pool || !num)
293                 return -EINVAL;
294
295         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
296         if (!entry) {
297                 PMD_INIT_LOG(ERR,
298                              "Failed to allocate memory for resource pool");
299                 return -ENOMEM;
300         }
301
302         /* queue heap initialize */
303         pool->num_free = num;
304         pool->num_alloc = 0;
305         pool->base = base;
306         LIST_INIT(&pool->alloc_list);
307         LIST_INIT(&pool->free_list);
308
309         /* Initialize element  */
310         entry->base = 0;
311         entry->len = num;
312
313         LIST_INSERT_HEAD(&pool->free_list, entry, next);
314         return 0;
315 }
316
317 static int
318 ice_res_pool_alloc(struct ice_res_pool_info *pool,
319                    uint16_t num)
320 {
321         struct pool_entry *entry, *valid_entry;
322
323         if (!pool || !num) {
324                 PMD_INIT_LOG(ERR, "Invalid parameter");
325                 return -EINVAL;
326         }
327
328         if (pool->num_free < num) {
329                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
330                              num, pool->num_free);
331                 return -ENOMEM;
332         }
333
334         valid_entry = NULL;
335         /* Lookup  in free list and find most fit one */
336         LIST_FOREACH(entry, &pool->free_list, next) {
337                 if (entry->len >= num) {
338                         /* Find best one */
339                         if (entry->len == num) {
340                                 valid_entry = entry;
341                                 break;
342                         }
343                         if (!valid_entry ||
344                             valid_entry->len > entry->len)
345                                 valid_entry = entry;
346                 }
347         }
348
349         /* Not find one to satisfy the request, return */
350         if (!valid_entry) {
351                 PMD_INIT_LOG(ERR, "No valid entry found");
352                 return -ENOMEM;
353         }
354         /**
355          * The entry have equal queue number as requested,
356          * remove it from alloc_list.
357          */
358         if (valid_entry->len == num) {
359                 LIST_REMOVE(valid_entry, next);
360         } else {
361                 /**
362                  * The entry have more numbers than requested,
363                  * create a new entry for alloc_list and minus its
364                  * queue base and number in free_list.
365                  */
366                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
367                 if (!entry) {
368                         PMD_INIT_LOG(ERR,
369                                      "Failed to allocate memory for "
370                                      "resource pool");
371                         return -ENOMEM;
372                 }
373                 entry->base = valid_entry->base;
374                 entry->len = num;
375                 valid_entry->base += num;
376                 valid_entry->len -= num;
377                 valid_entry = entry;
378         }
379
380         /* Insert it into alloc list, not sorted */
381         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
382
383         pool->num_free -= valid_entry->len;
384         pool->num_alloc += valid_entry->len;
385
386         return valid_entry->base + pool->base;
387 }
388
389 static void
390 ice_res_pool_destroy(struct ice_res_pool_info *pool)
391 {
392         struct pool_entry *entry, *next_entry;
393
394         if (!pool)
395                 return;
396
397         for (entry = LIST_FIRST(&pool->alloc_list);
398              entry && (next_entry = LIST_NEXT(entry, next), 1);
399              entry = next_entry) {
400                 LIST_REMOVE(entry, next);
401                 rte_free(entry);
402         }
403
404         for (entry = LIST_FIRST(&pool->free_list);
405              entry && (next_entry = LIST_NEXT(entry, next), 1);
406              entry = next_entry) {
407                 LIST_REMOVE(entry, next);
408                 rte_free(entry);
409         }
410
411         pool->num_free = 0;
412         pool->num_alloc = 0;
413         pool->base = 0;
414         LIST_INIT(&pool->alloc_list);
415         LIST_INIT(&pool->free_list);
416 }
417
418 static void
419 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
420 {
421         /* Set VSI LUT selection */
422         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
423                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
424         /* Set Hash scheme */
425         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
426                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
427         /* enable TC */
428         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
429 }
430
431 static enum ice_status
432 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
433                                 struct ice_aqc_vsi_props *info,
434                                 uint8_t enabled_tcmap)
435 {
436         uint16_t bsf, qp_idx;
437
438         /* default tc 0 now. Multi-TC supporting need to be done later.
439          * Configure TC and queue mapping parameters, for enabled TC,
440          * allocate qpnum_per_tc queues to this traffic.
441          */
442         if (enabled_tcmap != 0x01) {
443                 PMD_INIT_LOG(ERR, "only TC0 is supported");
444                 return -ENOTSUP;
445         }
446
447         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
448         bsf = rte_bsf32(vsi->nb_qps);
449         /* Adjust the queue number to actual queues that can be applied */
450         vsi->nb_qps = 0x1 << bsf;
451
452         qp_idx = 0;
453         /* Set tc and queue mapping with VSI */
454         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
455                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
456                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
457
458         /* Associate queue number with VSI */
459         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
460         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
461         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
462         info->valid_sections |=
463                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
464         /* Set the info.ingress_table and info.egress_table
465          * for UP translate table. Now just set it to 1:1 map by default
466          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
467          */
468 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
469         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
470         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
471         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
472         return 0;
473 }
474
475 static int
476 ice_init_mac_address(struct rte_eth_dev *dev)
477 {
478         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
479
480         if (!is_unicast_ether_addr
481                 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
482                 PMD_INIT_LOG(ERR, "Invalid MAC address");
483                 return -EINVAL;
484         }
485
486         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
487                         (struct ether_addr *)hw->port_info[0].mac.perm_addr);
488
489         dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
490         if (!dev->data->mac_addrs) {
491                 PMD_INIT_LOG(ERR,
492                              "Failed to allocate memory to store mac address");
493                 return -ENOMEM;
494         }
495         /* store it to dev data */
496         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
497                         &dev->data->mac_addrs[0]);
498         return 0;
499 }
500
501 /* Find out specific MAC filter */
502 static struct ice_mac_filter *
503 ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
504 {
505         struct ice_mac_filter *f;
506
507         TAILQ_FOREACH(f, &vsi->mac_list, next) {
508                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
509                         return f;
510         }
511
512         return NULL;
513 }
514
515 static int
516 ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
517 {
518         struct ice_fltr_list_entry *m_list_itr = NULL;
519         struct ice_mac_filter *f;
520         struct LIST_HEAD_TYPE list_head;
521         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
522         int ret = 0;
523
524         /* If it's added and configured, return */
525         f = ice_find_mac_filter(vsi, mac_addr);
526         if (f) {
527                 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
528                 return 0;
529         }
530
531         INIT_LIST_HEAD(&list_head);
532
533         m_list_itr = (struct ice_fltr_list_entry *)
534                 ice_malloc(hw, sizeof(*m_list_itr));
535         if (!m_list_itr) {
536                 ret = -ENOMEM;
537                 goto DONE;
538         }
539         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
540                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
541         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
542         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
543         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
544         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
545         m_list_itr->fltr_info.vsi_handle = vsi->idx;
546
547         LIST_ADD(&m_list_itr->list_entry, &list_head);
548
549         /* Add the mac */
550         ret = ice_add_mac(hw, &list_head);
551         if (ret != ICE_SUCCESS) {
552                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
553                 ret = -EINVAL;
554                 goto DONE;
555         }
556         /* Add the mac addr into mac list */
557         f = rte_zmalloc(NULL, sizeof(*f), 0);
558         if (!f) {
559                 PMD_DRV_LOG(ERR, "failed to allocate memory");
560                 ret = -ENOMEM;
561                 goto DONE;
562         }
563         rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
564         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
565         vsi->mac_num++;
566
567         ret = 0;
568
569 DONE:
570         rte_free(m_list_itr);
571         return ret;
572 }
573
574 static int
575 ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
576 {
577         struct ice_fltr_list_entry *m_list_itr = NULL;
578         struct ice_mac_filter *f;
579         struct LIST_HEAD_TYPE list_head;
580         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
581         int ret = 0;
582
583         /* Can't find it, return an error */
584         f = ice_find_mac_filter(vsi, mac_addr);
585         if (!f)
586                 return -EINVAL;
587
588         INIT_LIST_HEAD(&list_head);
589
590         m_list_itr = (struct ice_fltr_list_entry *)
591                 ice_malloc(hw, sizeof(*m_list_itr));
592         if (!m_list_itr) {
593                 ret = -ENOMEM;
594                 goto DONE;
595         }
596         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
597                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
598         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
599         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
600         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
601         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
602         m_list_itr->fltr_info.vsi_handle = vsi->idx;
603
604         LIST_ADD(&m_list_itr->list_entry, &list_head);
605
606         /* remove the mac filter */
607         ret = ice_remove_mac(hw, &list_head);
608         if (ret != ICE_SUCCESS) {
609                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
610                 ret = -EINVAL;
611                 goto DONE;
612         }
613
614         /* Remove the mac addr from mac list */
615         TAILQ_REMOVE(&vsi->mac_list, f, next);
616         rte_free(f);
617         vsi->mac_num--;
618
619         ret = 0;
620 DONE:
621         rte_free(m_list_itr);
622         return ret;
623 }
624
625 /* Find out specific VLAN filter */
626 static struct ice_vlan_filter *
627 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
628 {
629         struct ice_vlan_filter *f;
630
631         TAILQ_FOREACH(f, &vsi->vlan_list, next) {
632                 if (vlan_id == f->vlan_info.vlan_id)
633                         return f;
634         }
635
636         return NULL;
637 }
638
639 static int
640 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
641 {
642         struct ice_fltr_list_entry *v_list_itr = NULL;
643         struct ice_vlan_filter *f;
644         struct LIST_HEAD_TYPE list_head;
645         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
646         int ret = 0;
647
648         if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
649                 return -EINVAL;
650
651         /* If it's added and configured, return. */
652         f = ice_find_vlan_filter(vsi, vlan_id);
653         if (f) {
654                 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
655                 return 0;
656         }
657
658         if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
659                 return 0;
660
661         INIT_LIST_HEAD(&list_head);
662
663         v_list_itr = (struct ice_fltr_list_entry *)
664                       ice_malloc(hw, sizeof(*v_list_itr));
665         if (!v_list_itr) {
666                 ret = -ENOMEM;
667                 goto DONE;
668         }
669         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
670         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
671         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
672         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
673         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
674         v_list_itr->fltr_info.vsi_handle = vsi->idx;
675
676         LIST_ADD(&v_list_itr->list_entry, &list_head);
677
678         /* Add the vlan */
679         ret = ice_add_vlan(hw, &list_head);
680         if (ret != ICE_SUCCESS) {
681                 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
682                 ret = -EINVAL;
683                 goto DONE;
684         }
685
686         /* Add vlan into vlan list */
687         f = rte_zmalloc(NULL, sizeof(*f), 0);
688         if (!f) {
689                 PMD_DRV_LOG(ERR, "failed to allocate memory");
690                 ret = -ENOMEM;
691                 goto DONE;
692         }
693         f->vlan_info.vlan_id = vlan_id;
694         TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
695         vsi->vlan_num++;
696
697         ret = 0;
698
699 DONE:
700         rte_free(v_list_itr);
701         return ret;
702 }
703
704 static int
705 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
706 {
707         struct ice_fltr_list_entry *v_list_itr = NULL;
708         struct ice_vlan_filter *f;
709         struct LIST_HEAD_TYPE list_head;
710         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
711         int ret = 0;
712
713         /**
714          * Vlan 0 is the generic filter for untagged packets
715          * and can't be removed.
716          */
717         if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
718                 return -EINVAL;
719
720         /* Can't find it, return an error */
721         f = ice_find_vlan_filter(vsi, vlan_id);
722         if (!f)
723                 return -EINVAL;
724
725         INIT_LIST_HEAD(&list_head);
726
727         v_list_itr = (struct ice_fltr_list_entry *)
728                       ice_malloc(hw, sizeof(*v_list_itr));
729         if (!v_list_itr) {
730                 ret = -ENOMEM;
731                 goto DONE;
732         }
733
734         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
735         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
736         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
737         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
738         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
739         v_list_itr->fltr_info.vsi_handle = vsi->idx;
740
741         LIST_ADD(&v_list_itr->list_entry, &list_head);
742
743         /* remove the vlan filter */
744         ret = ice_remove_vlan(hw, &list_head);
745         if (ret != ICE_SUCCESS) {
746                 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
747                 ret = -EINVAL;
748                 goto DONE;
749         }
750
751         /* Remove the vlan id from vlan list */
752         TAILQ_REMOVE(&vsi->vlan_list, f, next);
753         rte_free(f);
754         vsi->vlan_num--;
755
756         ret = 0;
757 DONE:
758         rte_free(v_list_itr);
759         return ret;
760 }
761
762 static int
763 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
764 {
765         struct ice_mac_filter *m_f;
766         struct ice_vlan_filter *v_f;
767         int ret = 0;
768
769         if (!vsi || !vsi->mac_num)
770                 return -EINVAL;
771
772         TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
773                 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
774                 if (ret != ICE_SUCCESS) {
775                         ret = -EINVAL;
776                         goto DONE;
777                 }
778         }
779
780         if (vsi->vlan_num == 0)
781                 return 0;
782
783         TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
784                 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
785                 if (ret != ICE_SUCCESS) {
786                         ret = -EINVAL;
787                         goto DONE;
788                 }
789         }
790
791 DONE:
792         return ret;
793 }
794
795 static int
796 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
797 {
798         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
799         struct ice_vsi_ctx ctxt;
800         uint8_t qinq_flags;
801         int ret = 0;
802
803         /* Check if it has been already on or off */
804         if (vsi->info.valid_sections &
805                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
806                 if (on) {
807                         if ((vsi->info.outer_tag_flags &
808                              ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
809                             ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
810                                 return 0; /* already on */
811                 } else {
812                         if (!(vsi->info.outer_tag_flags &
813                               ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
814                                 return 0; /* already off */
815                 }
816         }
817
818         if (on)
819                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
820         else
821                 qinq_flags = 0;
822         /* clear global insertion and use per packet insertion */
823         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
824         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
825         vsi->info.outer_tag_flags |= qinq_flags;
826         /* use default vlan type 0x8100 */
827         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
828         vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
829                                      ICE_AQ_VSI_OUTER_TAG_TYPE_S;
830         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
831         ctxt.info.valid_sections =
832                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
833         ctxt.vsi_num = vsi->vsi_id;
834         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
835         if (ret) {
836                 PMD_DRV_LOG(INFO,
837                             "Update VSI failed to %s qinq stripping",
838                             on ? "enable" : "disable");
839                 return -EINVAL;
840         }
841
842         vsi->info.valid_sections |=
843                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
844
845         return ret;
846 }
847
848 static int
849 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
850 {
851         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
852         struct ice_vsi_ctx ctxt;
853         uint8_t qinq_flags;
854         int ret = 0;
855
856         /* Check if it has been already on or off */
857         if (vsi->info.valid_sections &
858                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
859                 if (on) {
860                         if ((vsi->info.outer_tag_flags &
861                              ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
862                             ICE_AQ_VSI_OUTER_TAG_COPY)
863                                 return 0; /* already on */
864                 } else {
865                         if ((vsi->info.outer_tag_flags &
866                              ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
867                             ICE_AQ_VSI_OUTER_TAG_NOTHING)
868                                 return 0; /* already off */
869                 }
870         }
871
872         if (on)
873                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
874         else
875                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
876         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
877         vsi->info.outer_tag_flags |= qinq_flags;
878         /* use default vlan type 0x8100 */
879         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
880         vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
881                                      ICE_AQ_VSI_OUTER_TAG_TYPE_S;
882         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
883         ctxt.info.valid_sections =
884                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
885         ctxt.vsi_num = vsi->vsi_id;
886         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
887         if (ret) {
888                 PMD_DRV_LOG(INFO,
889                             "Update VSI failed to %s qinq stripping",
890                             on ? "enable" : "disable");
891                 return -EINVAL;
892         }
893
894         vsi->info.valid_sections |=
895                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
896
897         return ret;
898 }
899
900 static int
901 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
902 {
903         int ret;
904
905         ret = ice_vsi_config_qinq_stripping(vsi, on);
906         if (ret)
907                 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
908
909         ret = ice_vsi_config_qinq_insertion(vsi, on);
910         if (ret)
911                 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
912
913         return ret;
914 }
915
916 /* Enable IRQ0 */
917 static void
918 ice_pf_enable_irq0(struct ice_hw *hw)
919 {
920         /* reset the registers */
921         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
922         ICE_READ_REG(hw, PFINT_OICR);
923
924 #ifdef ICE_LSE_SPT
925         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
926                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
927                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
928
929         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
930                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
931                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
932                        PFINT_OICR_CTL_ITR_INDX_M) |
933                       PFINT_OICR_CTL_CAUSE_ENA_M);
934
935         ICE_WRITE_REG(hw, PFINT_FW_CTL,
936                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
937                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
938                        PFINT_FW_CTL_ITR_INDX_M) |
939                       PFINT_FW_CTL_CAUSE_ENA_M);
940 #else
941         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
942 #endif
943
944         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
945                       GLINT_DYN_CTL_INTENA_M |
946                       GLINT_DYN_CTL_CLEARPBA_M |
947                       GLINT_DYN_CTL_ITR_INDX_M);
948
949         ice_flush(hw);
950 }
951
952 /* Disable IRQ0 */
953 static void
954 ice_pf_disable_irq0(struct ice_hw *hw)
955 {
956         /* Disable all interrupt types */
957         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
958         ice_flush(hw);
959 }
960
961 #ifdef ICE_LSE_SPT
962 static void
963 ice_handle_aq_msg(struct rte_eth_dev *dev)
964 {
965         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
966         struct ice_ctl_q_info *cq = &hw->adminq;
967         struct ice_rq_event_info event;
968         uint16_t pending, opcode;
969         int ret;
970
971         event.buf_len = ICE_AQ_MAX_BUF_LEN;
972         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
973         if (!event.msg_buf) {
974                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
975                 return;
976         }
977
978         pending = 1;
979         while (pending) {
980                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
981
982                 if (ret != ICE_SUCCESS) {
983                         PMD_DRV_LOG(INFO,
984                                     "Failed to read msg from AdminQ, "
985                                     "adminq_err: %u",
986                                     hw->adminq.sq_last_status);
987                         break;
988                 }
989                 opcode = rte_le_to_cpu_16(event.desc.opcode);
990
991                 switch (opcode) {
992                 case ice_aqc_opc_get_link_status:
993                         ret = ice_link_update(dev, 0);
994                         if (!ret)
995                                 _rte_eth_dev_callback_process
996                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
997                         break;
998                 default:
999                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1000                                     opcode);
1001                         break;
1002                 }
1003         }
1004         rte_free(event.msg_buf);
1005 }
1006 #endif
1007
1008 /**
1009  * Interrupt handler triggered by NIC for handling
1010  * specific interrupt.
1011  *
1012  * @param handle
1013  *  Pointer to interrupt handle.
1014  * @param param
1015  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1016  *
1017  * @return
1018  *  void
1019  */
1020 static void
1021 ice_interrupt_handler(void *param)
1022 {
1023         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1024         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1025         uint32_t oicr;
1026         uint32_t reg;
1027         uint8_t pf_num;
1028         uint8_t event;
1029         uint16_t queue;
1030 #ifdef ICE_LSE_SPT
1031         uint32_t int_fw_ctl;
1032 #endif
1033
1034         /* Disable interrupt */
1035         ice_pf_disable_irq0(hw);
1036
1037         /* read out interrupt causes */
1038         oicr = ICE_READ_REG(hw, PFINT_OICR);
1039 #ifdef ICE_LSE_SPT
1040         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1041 #endif
1042
1043         /* No interrupt event indicated */
1044         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1045                 PMD_DRV_LOG(INFO, "No interrupt event");
1046                 goto done;
1047         }
1048
1049 #ifdef ICE_LSE_SPT
1050         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1051                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1052                 ice_handle_aq_msg(dev);
1053         }
1054 #else
1055         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1056                 PMD_DRV_LOG(INFO, "OICR: link state change event");
1057                 ice_link_update(dev, 0);
1058         }
1059 #endif
1060
1061         if (oicr & PFINT_OICR_MAL_DETECT_M) {
1062                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1063                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1064                 if (reg & GL_MDET_TX_PQM_VALID_M) {
1065                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1066                                  GL_MDET_TX_PQM_PF_NUM_S;
1067                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1068                                 GL_MDET_TX_PQM_MAL_TYPE_S;
1069                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1070                                 GL_MDET_TX_PQM_QNUM_S;
1071
1072                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1073                                     "%d by PQM on TX queue %d PF# %d",
1074                                     event, queue, pf_num);
1075                 }
1076
1077                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1078                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1079                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1080                                  GL_MDET_TX_TCLAN_PF_NUM_S;
1081                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1082                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1083                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1084                                 GL_MDET_TX_TCLAN_QNUM_S;
1085
1086                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1087                                     "%d by TCLAN on TX queue %d PF# %d",
1088                                     event, queue, pf_num);
1089                 }
1090         }
1091 done:
1092         /* Enable interrupt */
1093         ice_pf_enable_irq0(hw);
1094         rte_intr_enable(dev->intr_handle);
1095 }
1096
1097 /*  Initialize SW parameters of PF */
1098 static int
1099 ice_pf_sw_init(struct rte_eth_dev *dev)
1100 {
1101         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1102         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1103
1104         if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
1105                 pf->lan_nb_qp_max =
1106                         ice_config_max_queue_pair_num(dev->device->devargs);
1107         else
1108                 pf->lan_nb_qp_max =
1109                         (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1110                                           hw->func_caps.common_cap.num_rxq);
1111
1112         pf->lan_nb_qps = pf->lan_nb_qp_max;
1113
1114         return 0;
1115 }
1116
1117 static struct ice_vsi *
1118 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1119 {
1120         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1121         struct ice_vsi *vsi = NULL;
1122         struct ice_vsi_ctx vsi_ctx;
1123         int ret;
1124         struct ether_addr broadcast = {
1125                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1126         struct ether_addr mac_addr;
1127         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1128         uint8_t tc_bitmap = 0x1;
1129
1130         /* hw->num_lports = 1 in NIC mode */
1131         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1132         if (!vsi)
1133                 return NULL;
1134
1135         vsi->idx = pf->next_vsi_idx;
1136         pf->next_vsi_idx++;
1137         vsi->type = type;
1138         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1139         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1140         vsi->vlan_anti_spoof_on = 0;
1141         vsi->vlan_filter_on = 1;
1142         TAILQ_INIT(&vsi->mac_list);
1143         TAILQ_INIT(&vsi->vlan_list);
1144
1145         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1146         /* base_queue in used in queue mapping of VSI add/update command.
1147          * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1148          * cases in the first stage. Only Main VSI.
1149          */
1150         vsi->base_queue = 0;
1151         switch (type) {
1152         case ICE_VSI_PF:
1153                 vsi->nb_qps = pf->lan_nb_qps;
1154                 ice_vsi_config_default_rss(&vsi_ctx.info);
1155                 vsi_ctx.alloc_from_pool = true;
1156                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1157                 /* switch_id is queried by get_switch_config aq, which is done
1158                  * by ice_init_hw
1159                  */
1160                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1161                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1162                 /* Allow all untagged or tagged packets */
1163                 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1164                 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1165                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1166                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1167                 /* Enable VLAN/UP trip */
1168                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1169                                                       &vsi_ctx.info,
1170                                                       ICE_DEFAULT_TCMAP);
1171                 if (ret) {
1172                         PMD_INIT_LOG(ERR,
1173                                      "tc queue mapping with vsi failed, "
1174                                      "err = %d",
1175                                      ret);
1176                         goto fail_mem;
1177                 }
1178
1179                 break;
1180         default:
1181                 /* for other types of VSI */
1182                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1183                 goto fail_mem;
1184         }
1185
1186         /* VF has MSIX interrupt in VF range, don't allocate here */
1187         if (type == ICE_VSI_PF) {
1188                 ret = ice_res_pool_alloc(&pf->msix_pool,
1189                                          RTE_MIN(vsi->nb_qps,
1190                                                  RTE_MAX_RXTX_INTR_VEC_ID));
1191                 if (ret < 0) {
1192                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1193                                      vsi->vsi_id, ret);
1194                 }
1195                 vsi->msix_intr = ret;
1196                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1197         } else {
1198                 vsi->msix_intr = 0;
1199                 vsi->nb_msix = 0;
1200         }
1201         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1202         if (ret != ICE_SUCCESS) {
1203                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1204                 goto fail_mem;
1205         }
1206         /* store vsi information is SW structure */
1207         vsi->vsi_id = vsi_ctx.vsi_num;
1208         vsi->info = vsi_ctx.info;
1209         pf->vsis_allocated = vsi_ctx.vsis_allocd;
1210         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1211
1212         /* MAC configuration */
1213         rte_memcpy(pf->dev_addr.addr_bytes,
1214                    hw->port_info->mac.perm_addr,
1215                    ETH_ADDR_LEN);
1216
1217         rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
1218         ret = ice_add_mac_filter(vsi, &mac_addr);
1219         if (ret != ICE_SUCCESS)
1220                 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1221
1222         rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
1223         ret = ice_add_mac_filter(vsi, &mac_addr);
1224         if (ret != ICE_SUCCESS)
1225                 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1226
1227         /* At the beginning, only TC0. */
1228         /* What we need here is the maximam number of the TX queues.
1229          * Currently vsi->nb_qps means it.
1230          * Correct it if any change.
1231          */
1232         max_txqs[0] = vsi->nb_qps;
1233         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1234                               tc_bitmap, max_txqs);
1235         if (ret != ICE_SUCCESS)
1236                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1237
1238         return vsi;
1239 fail_mem:
1240         rte_free(vsi);
1241         pf->next_vsi_idx--;
1242         return NULL;
1243 }
1244
1245 static int
1246 ice_pf_setup(struct ice_pf *pf)
1247 {
1248         struct ice_vsi *vsi;
1249
1250         /* Clear all stats counters */
1251         pf->offset_loaded = FALSE;
1252         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1253         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1254         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1255         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1256
1257         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1258         if (!vsi) {
1259                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1260                 return -EINVAL;
1261         }
1262
1263         pf->main_vsi = vsi;
1264
1265         return 0;
1266 }
1267
1268 static int ice_load_pkg(struct rte_eth_dev *dev)
1269 {
1270         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1271         const char *pkg_file = ICE_DFLT_PKG_FILE;
1272         int err;
1273         uint8_t *buf;
1274         int buf_len;
1275         FILE *file;
1276         struct stat fstat;
1277
1278         file = fopen(pkg_file, "rb");
1279         if (!file)  {
1280                 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1281                 return -1;
1282         }
1283
1284         err = stat(pkg_file, &fstat);
1285         if (err) {
1286                 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1287                 fclose(file);
1288                 return err;
1289         }
1290
1291         buf_len = fstat.st_size;
1292         buf = rte_malloc(NULL, buf_len, 0);
1293
1294         if (!buf) {
1295                 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1296                                 buf_len);
1297                 fclose(file);
1298                 return -1;
1299         }
1300
1301         err = fread(buf, buf_len, 1, file);
1302         if (err != 1) {
1303                 PMD_INIT_LOG(ERR, "failed to read package data\n");
1304                 fclose(file);
1305                 err = -1;
1306                 goto fail_exit;
1307         }
1308
1309         fclose(file);
1310
1311         err = ice_copy_and_init_pkg(hw, buf, buf_len);
1312         if (err) {
1313                 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1314                 goto fail_exit;
1315         }
1316         err = ice_init_hw_tbls(hw);
1317         if (err) {
1318                 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1319                 goto fail_init_tbls;
1320         }
1321
1322         return 0;
1323
1324 fail_init_tbls:
1325         rte_free(hw->pkg_copy);
1326 fail_exit:
1327         rte_free(buf);
1328         return err;
1329 }
1330
1331 static int
1332 ice_dev_init(struct rte_eth_dev *dev)
1333 {
1334         struct rte_pci_device *pci_dev;
1335         struct rte_intr_handle *intr_handle;
1336         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1338         struct ice_vsi *vsi;
1339         int ret;
1340
1341         dev->dev_ops = &ice_eth_dev_ops;
1342         dev->rx_pkt_burst = ice_recv_pkts;
1343         dev->tx_pkt_burst = ice_xmit_pkts;
1344         dev->tx_pkt_prepare = ice_prep_pkts;
1345
1346         ice_set_default_ptype_table(dev);
1347         pci_dev = RTE_DEV_TO_PCI(dev->device);
1348         intr_handle = &pci_dev->intr_handle;
1349
1350         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1351         pf->adapter->eth_dev = dev;
1352         pf->dev_data = dev->data;
1353         hw->back = pf->adapter;
1354         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1355         hw->vendor_id = pci_dev->id.vendor_id;
1356         hw->device_id = pci_dev->id.device_id;
1357         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1358         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1359         hw->bus.device = pci_dev->addr.devid;
1360         hw->bus.func = pci_dev->addr.function;
1361
1362         ice_init_controlq_parameter(hw);
1363
1364         ret = ice_init_hw(hw);
1365         if (ret) {
1366                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1367                 return -EINVAL;
1368         }
1369
1370         ret = ice_load_pkg(dev);
1371         if (ret) {
1372                 PMD_INIT_LOG(ERR, "Failed to load the DDP package");
1373                 goto err_load_pkg;
1374         }
1375
1376         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1377                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1378                      hw->api_maj_ver, hw->api_min_ver);
1379
1380         ice_pf_sw_init(dev);
1381         ret = ice_init_mac_address(dev);
1382         if (ret) {
1383                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1384                 goto err_init_mac;
1385         }
1386
1387         ret = ice_res_pool_init(&pf->msix_pool, 1,
1388                                 hw->func_caps.common_cap.num_msix_vectors - 1);
1389         if (ret) {
1390                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1391                 goto err_msix_pool_init;
1392         }
1393
1394         ret = ice_pf_setup(pf);
1395         if (ret) {
1396                 PMD_INIT_LOG(ERR, "Failed to setup PF");
1397                 goto err_pf_setup;
1398         }
1399
1400         vsi = pf->main_vsi;
1401
1402         /* Disable double vlan by default */
1403         ice_vsi_config_double_vlan(vsi, FALSE);
1404
1405         /* register callback func to eal lib */
1406         rte_intr_callback_register(intr_handle,
1407                                    ice_interrupt_handler, dev);
1408
1409         ice_pf_enable_irq0(hw);
1410
1411         /* enable uio intr after callback register */
1412         rte_intr_enable(intr_handle);
1413
1414         return 0;
1415
1416 err_pf_setup:
1417         ice_res_pool_destroy(&pf->msix_pool);
1418 err_msix_pool_init:
1419         rte_free(dev->data->mac_addrs);
1420 err_init_mac:
1421 err_load_pkg:
1422         ice_sched_cleanup_all(hw);
1423         rte_free(hw->port_info);
1424         ice_shutdown_all_ctrlq(hw);
1425
1426         return ret;
1427 }
1428
1429 static int
1430 ice_release_vsi(struct ice_vsi *vsi)
1431 {
1432         struct ice_hw *hw;
1433         struct ice_vsi_ctx vsi_ctx;
1434         enum ice_status ret;
1435
1436         if (!vsi)
1437                 return 0;
1438
1439         hw = ICE_VSI_TO_HW(vsi);
1440
1441         ice_remove_all_mac_vlan_filters(vsi);
1442
1443         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1444
1445         vsi_ctx.vsi_num = vsi->vsi_id;
1446         vsi_ctx.info = vsi->info;
1447         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1448         if (ret != ICE_SUCCESS) {
1449                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1450                 rte_free(vsi);
1451                 return -1;
1452         }
1453
1454         rte_free(vsi);
1455         return 0;
1456 }
1457
1458 static void
1459 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
1460 {
1461         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1462         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1463         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1464         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1465         uint16_t msix_intr, i;
1466
1467         /* disable interrupt and also clear all the exist config */
1468         for (i = 0; i < vsi->nb_qps; i++) {
1469                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1470                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1471                 rte_wmb();
1472         }
1473
1474         if (rte_intr_allow_others(intr_handle))
1475                 /* vfio-pci */
1476                 for (i = 0; i < vsi->nb_msix; i++) {
1477                         msix_intr = vsi->msix_intr + i;
1478                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1479                                       GLINT_DYN_CTL_WB_ON_ITR_M);
1480                 }
1481         else
1482                 /* igb_uio */
1483                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1484 }
1485
1486 static void
1487 ice_dev_stop(struct rte_eth_dev *dev)
1488 {
1489         struct rte_eth_dev_data *data = dev->data;
1490         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1491         struct ice_vsi *main_vsi = pf->main_vsi;
1492         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1493         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1494         uint16_t i;
1495
1496         /* avoid stopping again */
1497         if (pf->adapter_stopped)
1498                 return;
1499
1500         /* stop and clear all Rx queues */
1501         for (i = 0; i < data->nb_rx_queues; i++)
1502                 ice_rx_queue_stop(dev, i);
1503
1504         /* stop and clear all Tx queues */
1505         for (i = 0; i < data->nb_tx_queues; i++)
1506                 ice_tx_queue_stop(dev, i);
1507
1508         /* disable all queue interrupts */
1509         ice_vsi_disable_queues_intr(main_vsi);
1510
1511         /* Clear all queues and release mbufs */
1512         ice_clear_queues(dev);
1513
1514         /* Clean datapath event and queue/vec mapping */
1515         rte_intr_efd_disable(intr_handle);
1516         if (intr_handle->intr_vec) {
1517                 rte_free(intr_handle->intr_vec);
1518                 intr_handle->intr_vec = NULL;
1519         }
1520
1521         pf->adapter_stopped = true;
1522 }
1523
1524 static void
1525 ice_dev_close(struct rte_eth_dev *dev)
1526 {
1527         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1528         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1529
1530         ice_dev_stop(dev);
1531
1532         /* release all queue resource */
1533         ice_free_queues(dev);
1534
1535         ice_res_pool_destroy(&pf->msix_pool);
1536         ice_release_vsi(pf->main_vsi);
1537         ice_sched_cleanup_all(hw);
1538         rte_free(hw->port_info);
1539         ice_shutdown_all_ctrlq(hw);
1540 }
1541
1542 static int
1543 ice_dev_uninit(struct rte_eth_dev *dev)
1544 {
1545         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1546         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1547
1548         ice_dev_close(dev);
1549
1550         dev->dev_ops = NULL;
1551         dev->rx_pkt_burst = NULL;
1552         dev->tx_pkt_burst = NULL;
1553
1554         rte_free(dev->data->mac_addrs);
1555         dev->data->mac_addrs = NULL;
1556
1557         /* disable uio intr before callback unregister */
1558         rte_intr_disable(intr_handle);
1559
1560         /* register callback func to eal lib */
1561         rte_intr_callback_unregister(intr_handle,
1562                                      ice_interrupt_handler, dev);
1563
1564         return 0;
1565 }
1566
1567 static int
1568 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1569 {
1570         struct ice_adapter *ad =
1571                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1572
1573         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1574          * bulk allocation or vector Rx preconditions we will reset it.
1575          */
1576         ad->rx_bulk_alloc_allowed = true;
1577         ad->tx_simple_allowed = true;
1578
1579         return 0;
1580 }
1581
1582 static int ice_init_rss(struct ice_pf *pf)
1583 {
1584         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1585         struct ice_vsi *vsi = pf->main_vsi;
1586         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1587         struct rte_eth_rss_conf *rss_conf;
1588         struct ice_aqc_get_set_rss_keys key;
1589         uint16_t i, nb_q;
1590         int ret = 0;
1591
1592         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1593         nb_q = dev->data->nb_rx_queues;
1594         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1595         vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
1596
1597         if (!vsi->rss_key)
1598                 vsi->rss_key = rte_zmalloc(NULL,
1599                                            vsi->rss_key_size, 0);
1600         if (!vsi->rss_lut)
1601                 vsi->rss_lut = rte_zmalloc(NULL,
1602                                            vsi->rss_lut_size, 0);
1603
1604         /* configure RSS key */
1605         if (!rss_conf->rss_key) {
1606                 /* Calculate the default hash key */
1607                 for (i = 0; i <= vsi->rss_key_size; i++)
1608                         vsi->rss_key[i] = (uint8_t)rte_rand();
1609         } else {
1610                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1611                            RTE_MIN(rss_conf->rss_key_len,
1612                                    vsi->rss_key_size));
1613         }
1614         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1615         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1616         if (ret)
1617                 return -EINVAL;
1618
1619         /* init RSS LUT table */
1620         for (i = 0; i < vsi->rss_lut_size; i++)
1621                 vsi->rss_lut[i] = i % nb_q;
1622
1623         ret = ice_aq_set_rss_lut(hw, vsi->idx,
1624                                  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1625                                  vsi->rss_lut, vsi->rss_lut_size);
1626         if (ret)
1627                 return -EINVAL;
1628
1629         return 0;
1630 }
1631
1632 static void
1633 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
1634                        int base_queue, int nb_queue)
1635 {
1636         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1637         uint32_t val, val_tx;
1638         int i;
1639
1640         for (i = 0; i < nb_queue; i++) {
1641                 /*do actual bind*/
1642                 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
1643                       (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
1644                 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
1645                          (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
1646
1647                 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
1648                             base_queue + i, msix_vect);
1649                 /* set ITR0 value */
1650                 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
1651                 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
1652                 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
1653         }
1654 }
1655
1656 static void
1657 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
1658 {
1659         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1660         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1661         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1662         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1663         uint16_t msix_vect = vsi->msix_intr;
1664         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1665         uint16_t queue_idx = 0;
1666         int record = 0;
1667         int i;
1668
1669         /* clear Rx/Tx queue interrupt */
1670         for (i = 0; i < vsi->nb_used_qps; i++) {
1671                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1672                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1673         }
1674
1675         /* PF bind interrupt */
1676         if (rte_intr_dp_is_en(intr_handle)) {
1677                 queue_idx = 0;
1678                 record = 1;
1679         }
1680
1681         for (i = 0; i < vsi->nb_used_qps; i++) {
1682                 if (nb_msix <= 1) {
1683                         if (!rte_intr_allow_others(intr_handle))
1684                                 msix_vect = ICE_MISC_VEC_ID;
1685
1686                         /* uio mapping all queue to one msix_vect */
1687                         __vsi_queues_bind_intr(vsi, msix_vect,
1688                                                vsi->base_queue + i,
1689                                                vsi->nb_used_qps - i);
1690
1691                         for (; !!record && i < vsi->nb_used_qps; i++)
1692                                 intr_handle->intr_vec[queue_idx + i] =
1693                                         msix_vect;
1694                         break;
1695                 }
1696
1697                 /* vfio 1:1 queue/msix_vect mapping */
1698                 __vsi_queues_bind_intr(vsi, msix_vect,
1699                                        vsi->base_queue + i, 1);
1700
1701                 if (!!record)
1702                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1703
1704                 msix_vect++;
1705                 nb_msix--;
1706         }
1707 }
1708
1709 static void
1710 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
1711 {
1712         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1713         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1714         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1715         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1716         uint16_t msix_intr, i;
1717
1718         if (rte_intr_allow_others(intr_handle))
1719                 for (i = 0; i < vsi->nb_used_qps; i++) {
1720                         msix_intr = vsi->msix_intr + i;
1721                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1722                                       GLINT_DYN_CTL_INTENA_M |
1723                                       GLINT_DYN_CTL_CLEARPBA_M |
1724                                       GLINT_DYN_CTL_ITR_INDX_M |
1725                                       GLINT_DYN_CTL_WB_ON_ITR_M);
1726                 }
1727         else
1728                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1729                               GLINT_DYN_CTL_INTENA_M |
1730                               GLINT_DYN_CTL_CLEARPBA_M |
1731                               GLINT_DYN_CTL_ITR_INDX_M |
1732                               GLINT_DYN_CTL_WB_ON_ITR_M);
1733 }
1734
1735 static int
1736 ice_rxq_intr_setup(struct rte_eth_dev *dev)
1737 {
1738         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1739         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1740         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1741         struct ice_vsi *vsi = pf->main_vsi;
1742         uint32_t intr_vector = 0;
1743
1744         rte_intr_disable(intr_handle);
1745
1746         /* check and configure queue intr-vector mapping */
1747         if ((rte_intr_cap_multiple(intr_handle) ||
1748              !RTE_ETH_DEV_SRIOV(dev).active) &&
1749             dev->data->dev_conf.intr_conf.rxq != 0) {
1750                 intr_vector = dev->data->nb_rx_queues;
1751                 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
1752                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
1753                                     ICE_MAX_INTR_QUEUE_NUM);
1754                         return -ENOTSUP;
1755                 }
1756                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1757                         return -1;
1758         }
1759
1760         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1761                 intr_handle->intr_vec =
1762                 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
1763                             0);
1764                 if (!intr_handle->intr_vec) {
1765                         PMD_DRV_LOG(ERR,
1766                                     "Failed to allocate %d rx_queues intr_vec",
1767                                     dev->data->nb_rx_queues);
1768                         return -ENOMEM;
1769                 }
1770         }
1771
1772         /* Map queues with MSIX interrupt */
1773         vsi->nb_used_qps = dev->data->nb_rx_queues;
1774         ice_vsi_queues_bind_intr(vsi);
1775
1776         /* Enable interrupts for all the queues */
1777         ice_vsi_enable_queues_intr(vsi);
1778
1779         rte_intr_enable(intr_handle);
1780
1781         return 0;
1782 }
1783
1784 static int
1785 ice_dev_start(struct rte_eth_dev *dev)
1786 {
1787         struct rte_eth_dev_data *data = dev->data;
1788         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1789         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1790         struct ice_vsi *vsi = pf->main_vsi;
1791         uint16_t nb_rxq = 0;
1792         uint16_t nb_txq, i;
1793         int mask, ret;
1794
1795         /* program Tx queues' context in hardware */
1796         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1797                 ret = ice_tx_queue_start(dev, nb_txq);
1798                 if (ret) {
1799                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1800                         goto tx_err;
1801                 }
1802         }
1803
1804         /* program Rx queues' context in hardware*/
1805         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1806                 ret = ice_rx_queue_start(dev, nb_rxq);
1807                 if (ret) {
1808                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1809                         goto rx_err;
1810                 }
1811         }
1812
1813         ret = ice_init_rss(pf);
1814         if (ret) {
1815                 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1816                 goto rx_err;
1817         }
1818
1819         ice_set_rx_function(dev);
1820         ice_set_tx_function(dev);
1821
1822         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1823                         ETH_VLAN_EXTEND_MASK;
1824         ret = ice_vlan_offload_set(dev, mask);
1825         if (ret) {
1826                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1827                 goto rx_err;
1828         }
1829
1830         /* enable Rx interrput and mapping Rx queue to interrupt vector */
1831         if (ice_rxq_intr_setup(dev))
1832                 return -EIO;
1833
1834         /* Enable receiving broadcast packets and transmitting packets */
1835         ret = ice_set_vsi_promisc(hw, vsi->idx,
1836                                   ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
1837                                   ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
1838                                   0);
1839         if (ret != ICE_SUCCESS)
1840                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1841
1842         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1843                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1844                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1845                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1846                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1847                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
1848                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1849                                      NULL);
1850         if (ret != ICE_SUCCESS)
1851                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1852
1853         /* Call get_link_info aq commond to enable/disable LSE */
1854         ice_link_update(dev, 0);
1855
1856         pf->adapter_stopped = false;
1857
1858         return 0;
1859
1860         /* stop the started queues if failed to start all queues */
1861 rx_err:
1862         for (i = 0; i < nb_rxq; i++)
1863                 ice_rx_queue_stop(dev, i);
1864 tx_err:
1865         for (i = 0; i < nb_txq; i++)
1866                 ice_tx_queue_stop(dev, i);
1867
1868         return -EIO;
1869 }
1870
1871 static int
1872 ice_dev_reset(struct rte_eth_dev *dev)
1873 {
1874         int ret;
1875
1876         if (dev->data->sriov.active)
1877                 return -ENOTSUP;
1878
1879         ret = ice_dev_uninit(dev);
1880         if (ret) {
1881                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1882                 return -ENXIO;
1883         }
1884
1885         ret = ice_dev_init(dev);
1886         if (ret) {
1887                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1888                 return -ENXIO;
1889         }
1890
1891         return 0;
1892 }
1893
1894 static void
1895 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1896 {
1897         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1898         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1899         struct ice_vsi *vsi = pf->main_vsi;
1900         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1901
1902         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1903         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1904         dev_info->max_rx_queues = vsi->nb_qps;
1905         dev_info->max_tx_queues = vsi->nb_qps;
1906         dev_info->max_mac_addrs = vsi->max_macaddrs;
1907         dev_info->max_vfs = pci_dev->max_vfs;
1908
1909         dev_info->rx_offload_capa =
1910                 DEV_RX_OFFLOAD_VLAN_STRIP |
1911                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1912                 DEV_RX_OFFLOAD_UDP_CKSUM |
1913                 DEV_RX_OFFLOAD_TCP_CKSUM |
1914                 DEV_RX_OFFLOAD_QINQ_STRIP |
1915                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1916                 DEV_RX_OFFLOAD_VLAN_EXTEND |
1917                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1918                 DEV_RX_OFFLOAD_KEEP_CRC |
1919                 DEV_RX_OFFLOAD_SCATTER |
1920                 DEV_RX_OFFLOAD_VLAN_FILTER;
1921         dev_info->tx_offload_capa =
1922                 DEV_TX_OFFLOAD_VLAN_INSERT |
1923                 DEV_TX_OFFLOAD_QINQ_INSERT |
1924                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1925                 DEV_TX_OFFLOAD_UDP_CKSUM |
1926                 DEV_TX_OFFLOAD_TCP_CKSUM |
1927                 DEV_TX_OFFLOAD_SCTP_CKSUM |
1928                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1929                 DEV_TX_OFFLOAD_TCP_TSO |
1930                 DEV_TX_OFFLOAD_MULTI_SEGS |
1931                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1932         dev_info->rx_queue_offload_capa = 0;
1933         dev_info->tx_queue_offload_capa = 0;
1934
1935         dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1936         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1937         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
1938
1939         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1940                 .rx_thresh = {
1941                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
1942                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
1943                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
1944                 },
1945                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1946                 .rx_drop_en = 0,
1947                 .offloads = 0,
1948         };
1949
1950         dev_info->default_txconf = (struct rte_eth_txconf) {
1951                 .tx_thresh = {
1952                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
1953                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
1954                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
1955                 },
1956                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1957                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1958                 .offloads = 0,
1959         };
1960
1961         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1962                 .nb_max = ICE_MAX_RING_DESC,
1963                 .nb_min = ICE_MIN_RING_DESC,
1964                 .nb_align = ICE_ALIGN_RING_DESC,
1965         };
1966
1967         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1968                 .nb_max = ICE_MAX_RING_DESC,
1969                 .nb_min = ICE_MIN_RING_DESC,
1970                 .nb_align = ICE_ALIGN_RING_DESC,
1971         };
1972
1973         dev_info->speed_capa = ETH_LINK_SPEED_10M |
1974                                ETH_LINK_SPEED_100M |
1975                                ETH_LINK_SPEED_1G |
1976                                ETH_LINK_SPEED_2_5G |
1977                                ETH_LINK_SPEED_5G |
1978                                ETH_LINK_SPEED_10G |
1979                                ETH_LINK_SPEED_20G |
1980                                ETH_LINK_SPEED_25G |
1981                                ETH_LINK_SPEED_40G |
1982                                ETH_LINK_SPEED_50G |
1983                                ETH_LINK_SPEED_100G;
1984
1985         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1986         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1987
1988         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1989         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1990         dev_info->default_rxportconf.nb_queues = 1;
1991         dev_info->default_txportconf.nb_queues = 1;
1992         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1993         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1994 }
1995
1996 static inline int
1997 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1998                             struct rte_eth_link *link)
1999 {
2000         struct rte_eth_link *dst = link;
2001         struct rte_eth_link *src = &dev->data->dev_link;
2002
2003         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2004                                 *(uint64_t *)src) == 0)
2005                 return -1;
2006
2007         return 0;
2008 }
2009
2010 static inline int
2011 ice_atomic_write_link_status(struct rte_eth_dev *dev,
2012                              struct rte_eth_link *link)
2013 {
2014         struct rte_eth_link *dst = &dev->data->dev_link;
2015         struct rte_eth_link *src = link;
2016
2017         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2018                                 *(uint64_t *)src) == 0)
2019                 return -1;
2020
2021         return 0;
2022 }
2023
2024 static int
2025 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
2026 {
2027 #define CHECK_INTERVAL 100  /* 100ms */
2028 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2029         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2030         struct ice_link_status link_status;
2031         struct rte_eth_link link, old;
2032         int status;
2033         unsigned int rep_cnt = MAX_REPEAT_TIME;
2034         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2035
2036         memset(&link, 0, sizeof(link));
2037         memset(&old, 0, sizeof(old));
2038         memset(&link_status, 0, sizeof(link_status));
2039         ice_atomic_read_link_status(dev, &old);
2040
2041         do {
2042                 /* Get link status information from hardware */
2043                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
2044                                               &link_status, NULL);
2045                 if (status != ICE_SUCCESS) {
2046                         link.link_speed = ETH_SPEED_NUM_100M;
2047                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2048                         PMD_DRV_LOG(ERR, "Failed to get link info");
2049                         goto out;
2050                 }
2051
2052                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
2053                 if (!wait_to_complete || link.link_status)
2054                         break;
2055
2056                 rte_delay_ms(CHECK_INTERVAL);
2057         } while (--rep_cnt);
2058
2059         if (!link.link_status)
2060                 goto out;
2061
2062         /* Full-duplex operation at all supported speeds */
2063         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2064
2065         /* Parse the link status */
2066         switch (link_status.link_speed) {
2067         case ICE_AQ_LINK_SPEED_10MB:
2068                 link.link_speed = ETH_SPEED_NUM_10M;
2069                 break;
2070         case ICE_AQ_LINK_SPEED_100MB:
2071                 link.link_speed = ETH_SPEED_NUM_100M;
2072                 break;
2073         case ICE_AQ_LINK_SPEED_1000MB:
2074                 link.link_speed = ETH_SPEED_NUM_1G;
2075                 break;
2076         case ICE_AQ_LINK_SPEED_2500MB:
2077                 link.link_speed = ETH_SPEED_NUM_2_5G;
2078                 break;
2079         case ICE_AQ_LINK_SPEED_5GB:
2080                 link.link_speed = ETH_SPEED_NUM_5G;
2081                 break;
2082         case ICE_AQ_LINK_SPEED_10GB:
2083                 link.link_speed = ETH_SPEED_NUM_10G;
2084                 break;
2085         case ICE_AQ_LINK_SPEED_20GB:
2086                 link.link_speed = ETH_SPEED_NUM_20G;
2087                 break;
2088         case ICE_AQ_LINK_SPEED_25GB:
2089                 link.link_speed = ETH_SPEED_NUM_25G;
2090                 break;
2091         case ICE_AQ_LINK_SPEED_40GB:
2092                 link.link_speed = ETH_SPEED_NUM_40G;
2093                 break;
2094         case ICE_AQ_LINK_SPEED_50GB:
2095                 link.link_speed = ETH_SPEED_NUM_50G;
2096                 break;
2097         case ICE_AQ_LINK_SPEED_100GB:
2098                 link.link_speed = ETH_SPEED_NUM_100G;
2099                 break;
2100         case ICE_AQ_LINK_SPEED_UNKNOWN:
2101         default:
2102                 PMD_DRV_LOG(ERR, "Unknown link speed");
2103                 link.link_speed = ETH_SPEED_NUM_NONE;
2104                 break;
2105         }
2106
2107         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2108                               ETH_LINK_SPEED_FIXED);
2109
2110 out:
2111         ice_atomic_write_link_status(dev, &link);
2112         if (link.link_status == old.link_status)
2113                 return -1;
2114
2115         return 0;
2116 }
2117
2118 static int
2119 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2120 {
2121         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2122         struct rte_eth_dev_data *dev_data = pf->dev_data;
2123         uint32_t frame_size = mtu + ETHER_HDR_LEN
2124                               + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE;
2125
2126         /* check if mtu is within the allowed range */
2127         if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
2128                 return -EINVAL;
2129
2130         /* mtu setting is forbidden if port is start */
2131         if (dev_data->dev_started) {
2132                 PMD_DRV_LOG(ERR,
2133                             "port %d must be stopped before configuration",
2134                             dev_data->port_id);
2135                 return -EBUSY;
2136         }
2137
2138         if (frame_size > ETHER_MAX_LEN)
2139                 dev_data->dev_conf.rxmode.offloads |=
2140                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2141         else
2142                 dev_data->dev_conf.rxmode.offloads &=
2143                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2144
2145         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2146
2147         return 0;
2148 }
2149
2150 static int ice_macaddr_set(struct rte_eth_dev *dev,
2151                            struct ether_addr *mac_addr)
2152 {
2153         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2154         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2155         struct ice_vsi *vsi = pf->main_vsi;
2156         struct ice_mac_filter *f;
2157         uint8_t flags = 0;
2158         int ret;
2159
2160         if (!is_valid_assigned_ether_addr(mac_addr)) {
2161                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2162                 return -EINVAL;
2163         }
2164
2165         TAILQ_FOREACH(f, &vsi->mac_list, next) {
2166                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
2167                         break;
2168         }
2169
2170         if (!f) {
2171                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
2172                 return -EIO;
2173         }
2174
2175         ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
2176         if (ret != ICE_SUCCESS) {
2177                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
2178                 return -EIO;
2179         }
2180         ret = ice_add_mac_filter(vsi, mac_addr);
2181         if (ret != ICE_SUCCESS) {
2182                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
2183                 return -EIO;
2184         }
2185         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
2186
2187         flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2188         ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
2189         if (ret != ICE_SUCCESS)
2190                 PMD_DRV_LOG(ERR, "Failed to set manage mac");
2191
2192         return 0;
2193 }
2194
2195 /* Add a MAC address, and update filters */
2196 static int
2197 ice_macaddr_add(struct rte_eth_dev *dev,
2198                 struct ether_addr *mac_addr,
2199                 __rte_unused uint32_t index,
2200                 __rte_unused uint32_t pool)
2201 {
2202         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2203         struct ice_vsi *vsi = pf->main_vsi;
2204         int ret;
2205
2206         ret = ice_add_mac_filter(vsi, mac_addr);
2207         if (ret != ICE_SUCCESS) {
2208                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
2209                 return -EINVAL;
2210         }
2211
2212         return ICE_SUCCESS;
2213 }
2214
2215 /* Remove a MAC address, and update filters */
2216 static void
2217 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2218 {
2219         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2220         struct ice_vsi *vsi = pf->main_vsi;
2221         struct rte_eth_dev_data *data = dev->data;
2222         struct ether_addr *macaddr;
2223         int ret;
2224
2225         macaddr = &data->mac_addrs[index];
2226         ret = ice_remove_mac_filter(vsi, macaddr);
2227         if (ret) {
2228                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
2229                 return;
2230         }
2231 }
2232
2233 static int
2234 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2235 {
2236         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2237         struct ice_vsi *vsi = pf->main_vsi;
2238         int ret;
2239
2240         PMD_INIT_FUNC_TRACE();
2241
2242         if (on) {
2243                 ret = ice_add_vlan_filter(vsi, vlan_id);
2244                 if (ret < 0) {
2245                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
2246                         return -EINVAL;
2247                 }
2248         } else {
2249                 ret = ice_remove_vlan_filter(vsi, vlan_id);
2250                 if (ret < 0) {
2251                         PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
2252                         return -EINVAL;
2253                 }
2254         }
2255
2256         return 0;
2257 }
2258
2259 /* Configure vlan filter on or off */
2260 static int
2261 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
2262 {
2263         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2264         struct ice_vsi_ctx ctxt;
2265         uint8_t sec_flags, sw_flags2;
2266         int ret = 0;
2267
2268         sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2269                     ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2270         sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2271
2272         if (on) {
2273                 vsi->info.sec_flags |= sec_flags;
2274                 vsi->info.sw_flags2 |= sw_flags2;
2275         } else {
2276                 vsi->info.sec_flags &= ~sec_flags;
2277                 vsi->info.sw_flags2 &= ~sw_flags2;
2278         }
2279         vsi->info.sw_id = hw->port_info->sw_id;
2280         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2281         ctxt.info.valid_sections =
2282                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2283                                  ICE_AQ_VSI_PROP_SECURITY_VALID);
2284         ctxt.vsi_num = vsi->vsi_id;
2285
2286         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2287         if (ret) {
2288                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
2289                             on ? "enable" : "disable");
2290                 return -EINVAL;
2291         } else {
2292                 vsi->info.valid_sections |=
2293                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2294                                          ICE_AQ_VSI_PROP_SECURITY_VALID);
2295         }
2296
2297         /* consist with other drivers, allow untagged packet when vlan filter on */
2298         if (on)
2299                 ret = ice_add_vlan_filter(vsi, 0);
2300         else
2301                 ret = ice_remove_vlan_filter(vsi, 0);
2302
2303         return 0;
2304 }
2305
2306 static int
2307 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
2308 {
2309         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2310         struct ice_vsi_ctx ctxt;
2311         uint8_t vlan_flags;
2312         int ret = 0;
2313
2314         /* Check if it has been already on or off */
2315         if (vsi->info.valid_sections &
2316                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
2317                 if (on) {
2318                         if ((vsi->info.vlan_flags &
2319                              ICE_AQ_VSI_VLAN_EMOD_M) ==
2320                             ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
2321                                 return 0; /* already on */
2322                 } else {
2323                         if ((vsi->info.vlan_flags &
2324                              ICE_AQ_VSI_VLAN_EMOD_M) ==
2325                             ICE_AQ_VSI_VLAN_EMOD_NOTHING)
2326                                 return 0; /* already off */
2327                 }
2328         }
2329
2330         if (on)
2331                 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2332         else
2333                 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2334         vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
2335         vsi->info.vlan_flags |= vlan_flags;
2336         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2337         ctxt.info.valid_sections =
2338                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2339         ctxt.vsi_num = vsi->vsi_id;
2340         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2341         if (ret) {
2342                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2343                             on ? "enable" : "disable");
2344                 return -EINVAL;
2345         }
2346
2347         vsi->info.valid_sections |=
2348                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2349
2350         return ret;
2351 }
2352
2353 static int
2354 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2355 {
2356         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2357         struct ice_vsi *vsi = pf->main_vsi;
2358         struct rte_eth_rxmode *rxmode;
2359
2360         rxmode = &dev->data->dev_conf.rxmode;
2361         if (mask & ETH_VLAN_FILTER_MASK) {
2362                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2363                         ice_vsi_config_vlan_filter(vsi, TRUE);
2364                 else
2365                         ice_vsi_config_vlan_filter(vsi, FALSE);
2366         }
2367
2368         if (mask & ETH_VLAN_STRIP_MASK) {
2369                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2370                         ice_vsi_config_vlan_stripping(vsi, TRUE);
2371                 else
2372                         ice_vsi_config_vlan_stripping(vsi, FALSE);
2373         }
2374
2375         if (mask & ETH_VLAN_EXTEND_MASK) {
2376                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2377                         ice_vsi_config_double_vlan(vsi, TRUE);
2378                 else
2379                         ice_vsi_config_double_vlan(vsi, FALSE);
2380         }
2381
2382         return 0;
2383 }
2384
2385 static int
2386 ice_vlan_tpid_set(struct rte_eth_dev *dev,
2387                   enum rte_vlan_type vlan_type,
2388                   uint16_t tpid)
2389 {
2390         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2391         uint64_t reg_r = 0, reg_w = 0;
2392         uint16_t reg_id = 0;
2393         int ret = 0;
2394         int qinq = dev->data->dev_conf.rxmode.offloads &
2395                    DEV_RX_OFFLOAD_VLAN_EXTEND;
2396
2397         switch (vlan_type) {
2398         case ETH_VLAN_TYPE_OUTER:
2399                 if (qinq)
2400                         reg_id = 3;
2401                 else
2402                         reg_id = 5;
2403         break;
2404         case ETH_VLAN_TYPE_INNER:
2405                 if (qinq) {
2406                         reg_id = 5;
2407                 } else {
2408                         PMD_DRV_LOG(ERR,
2409                                     "Unsupported vlan type in single vlan.");
2410                         return -EINVAL;
2411                 }
2412                 break;
2413         default:
2414                 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2415                 return -EINVAL;
2416         }
2417         reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2418         PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2419                     "0x%08"PRIx64"", reg_id, reg_r);
2420
2421         reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2422         reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2423         if (reg_r == reg_w) {
2424                 PMD_DRV_LOG(DEBUG, "No need to write");
2425                 return 0;
2426         }
2427
2428         ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2429         PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2430                     "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2431
2432         return ret;
2433 }
2434
2435 static int
2436 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2437 {
2438         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2439         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2440         int ret;
2441
2442         if (!lut)
2443                 return -EINVAL;
2444
2445         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2446                 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2447                                          lut, lut_size);
2448                 if (ret) {
2449                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2450                         return -EINVAL;
2451                 }
2452         } else {
2453                 uint64_t *lut_dw = (uint64_t *)lut;
2454                 uint16_t i, lut_size_dw = lut_size / 4;
2455
2456                 for (i = 0; i < lut_size_dw; i++)
2457                         lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2458         }
2459
2460         return 0;
2461 }
2462
2463 static int
2464 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2465 {
2466         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2467         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2468         int ret;
2469
2470         if (!vsi || !lut)
2471                 return -EINVAL;
2472
2473         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2474                 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2475                                          lut, lut_size);
2476                 if (ret) {
2477                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2478                         return -EINVAL;
2479                 }
2480         } else {
2481                 uint64_t *lut_dw = (uint64_t *)lut;
2482                 uint16_t i, lut_size_dw = lut_size / 4;
2483
2484                 for (i = 0; i < lut_size_dw; i++)
2485                         ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2486
2487                 ice_flush(hw);
2488         }
2489
2490         return 0;
2491 }
2492
2493 static int
2494 ice_rss_reta_update(struct rte_eth_dev *dev,
2495                     struct rte_eth_rss_reta_entry64 *reta_conf,
2496                     uint16_t reta_size)
2497 {
2498         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2499         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2500         uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2501         uint16_t idx, shift;
2502         uint8_t *lut;
2503         int ret;
2504
2505         if (reta_size != lut_size ||
2506             reta_size > ETH_RSS_RETA_SIZE_512) {
2507                 PMD_DRV_LOG(ERR,
2508                             "The size of hash lookup table configured (%d)"
2509                             "doesn't match the number hardware can "
2510                             "supported (%d)",
2511                             reta_size, lut_size);
2512                 return -EINVAL;
2513         }
2514
2515         lut = rte_zmalloc(NULL, reta_size, 0);
2516         if (!lut) {
2517                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2518                 return -ENOMEM;
2519         }
2520         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2521         if (ret)
2522                 goto out;
2523
2524         for (i = 0; i < reta_size; i++) {
2525                 idx = i / RTE_RETA_GROUP_SIZE;
2526                 shift = i % RTE_RETA_GROUP_SIZE;
2527                 if (reta_conf[idx].mask & (1ULL << shift))
2528                         lut[i] = reta_conf[idx].reta[shift];
2529         }
2530         ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2531
2532 out:
2533         rte_free(lut);
2534
2535         return ret;
2536 }
2537
2538 static int
2539 ice_rss_reta_query(struct rte_eth_dev *dev,
2540                    struct rte_eth_rss_reta_entry64 *reta_conf,
2541                    uint16_t reta_size)
2542 {
2543         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2544         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2545         uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2546         uint16_t idx, shift;
2547         uint8_t *lut;
2548         int ret;
2549
2550         if (reta_size != lut_size ||
2551             reta_size > ETH_RSS_RETA_SIZE_512) {
2552                 PMD_DRV_LOG(ERR,
2553                             "The size of hash lookup table configured (%d)"
2554                             "doesn't match the number hardware can "
2555                             "supported (%d)",
2556                             reta_size, lut_size);
2557                 return -EINVAL;
2558         }
2559
2560         lut = rte_zmalloc(NULL, reta_size, 0);
2561         if (!lut) {
2562                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2563                 return -ENOMEM;
2564         }
2565
2566         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2567         if (ret)
2568                 goto out;
2569
2570         for (i = 0; i < reta_size; i++) {
2571                 idx = i / RTE_RETA_GROUP_SIZE;
2572                 shift = i % RTE_RETA_GROUP_SIZE;
2573                 if (reta_conf[idx].mask & (1ULL << shift))
2574                         reta_conf[idx].reta[shift] = lut[i];
2575         }
2576
2577 out:
2578         rte_free(lut);
2579
2580         return ret;
2581 }
2582
2583 static int
2584 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2585 {
2586         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2587         int ret = 0;
2588
2589         if (!key || key_len == 0) {
2590                 PMD_DRV_LOG(DEBUG, "No key to be configured");
2591                 return 0;
2592         } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2593                    sizeof(uint32_t)) {
2594                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2595                 return -EINVAL;
2596         }
2597
2598         struct ice_aqc_get_set_rss_keys *key_dw =
2599                 (struct ice_aqc_get_set_rss_keys *)key;
2600
2601         ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2602         if (ret) {
2603                 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2604                 ret = -EINVAL;
2605         }
2606
2607         return ret;
2608 }
2609
2610 static int
2611 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2612 {
2613         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2614         int ret;
2615
2616         if (!key || !key_len)
2617                 return -EINVAL;
2618
2619         ret = ice_aq_get_rss_key
2620                 (hw, vsi->idx,
2621                  (struct ice_aqc_get_set_rss_keys *)key);
2622         if (ret) {
2623                 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2624                 return -EINVAL;
2625         }
2626         *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2627
2628         return 0;
2629 }
2630
2631 static int
2632 ice_rss_hash_update(struct rte_eth_dev *dev,
2633                     struct rte_eth_rss_conf *rss_conf)
2634 {
2635         enum ice_status status = ICE_SUCCESS;
2636         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2637         struct ice_vsi *vsi = pf->main_vsi;
2638
2639         /* set hash key */
2640         status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2641         if (status)
2642                 return status;
2643
2644         /* TODO: hash enable config, ice_add_rss_cfg */
2645         return 0;
2646 }
2647
2648 static int
2649 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2650                       struct rte_eth_rss_conf *rss_conf)
2651 {
2652         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2653         struct ice_vsi *vsi = pf->main_vsi;
2654
2655         ice_get_rss_key(vsi, rss_conf->rss_key,
2656                         &rss_conf->rss_key_len);
2657
2658         /* TODO: default set to 0 as hf config is not supported now */
2659         rss_conf->rss_hf = 0;
2660         return 0;
2661 }
2662
2663 static void
2664 ice_promisc_enable(struct rte_eth_dev *dev)
2665 {
2666         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2667         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2668         struct ice_vsi *vsi = pf->main_vsi;
2669         uint8_t pmask;
2670         uint16_t status;
2671
2672         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2673                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2674
2675         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2676         if (status != ICE_SUCCESS)
2677                 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
2678 }
2679
2680 static void
2681 ice_promisc_disable(struct rte_eth_dev *dev)
2682 {
2683         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2684         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2685         struct ice_vsi *vsi = pf->main_vsi;
2686         uint16_t status;
2687         uint8_t pmask;
2688
2689         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2690                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2691
2692         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2693         if (status != ICE_SUCCESS)
2694                 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
2695 }
2696
2697 static void
2698 ice_allmulti_enable(struct rte_eth_dev *dev)
2699 {
2700         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2701         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2702         struct ice_vsi *vsi = pf->main_vsi;
2703         uint8_t pmask;
2704         uint16_t status;
2705
2706         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2707
2708         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2709         if (status != ICE_SUCCESS)
2710                 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
2711 }
2712
2713 static void
2714 ice_allmulti_disable(struct rte_eth_dev *dev)
2715 {
2716         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2717         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2718         struct ice_vsi *vsi = pf->main_vsi;
2719         uint16_t status;
2720         uint8_t pmask;
2721
2722         if (dev->data->promiscuous == 1)
2723                 return; /* must remain in all_multicast mode */
2724
2725         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2726
2727         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2728         if (status != ICE_SUCCESS)
2729                 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
2730 }
2731
2732 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
2733                                     uint16_t queue_id)
2734 {
2735         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2736         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2737         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2738         uint32_t val;
2739         uint16_t msix_intr;
2740
2741         msix_intr = intr_handle->intr_vec[queue_id];
2742
2743         val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
2744               GLINT_DYN_CTL_ITR_INDX_M;
2745         val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
2746
2747         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
2748         rte_intr_enable(&pci_dev->intr_handle);
2749
2750         return 0;
2751 }
2752
2753 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
2754                                      uint16_t queue_id)
2755 {
2756         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2757         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2758         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2759         uint16_t msix_intr;
2760
2761         msix_intr = intr_handle->intr_vec[queue_id];
2762
2763         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
2764
2765         return 0;
2766 }
2767
2768 static int
2769 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2770 {
2771         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2772         u32 full_ver;
2773         u8 ver, patch;
2774         u16 build;
2775         int ret;
2776
2777         full_ver = hw->nvm.oem_ver;
2778         ver = (u8)(full_ver >> 24);
2779         build = (u16)((full_ver >> 8) & 0xffff);
2780         patch = (u8)(full_ver & 0xff);
2781
2782         ret = snprintf(fw_version, fw_size,
2783                         "%d.%d%d 0x%08x %d.%d.%d",
2784                         ((hw->nvm.ver >> 12) & 0xf),
2785                         ((hw->nvm.ver >> 4) & 0xff),
2786                         (hw->nvm.ver & 0xf), hw->nvm.eetrack,
2787                         ver, build, patch);
2788
2789         /* add the size of '\0' */
2790         ret += 1;
2791         if (fw_size < (u32)ret)
2792                 return ret;
2793         else
2794                 return 0;
2795 }
2796
2797 static int
2798 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
2799 {
2800         struct ice_hw *hw;
2801         struct ice_vsi_ctx ctxt;
2802         uint8_t vlan_flags = 0;
2803         int ret;
2804
2805         if (!vsi || !info) {
2806                 PMD_DRV_LOG(ERR, "invalid parameters");
2807                 return -EINVAL;
2808         }
2809
2810         if (info->on) {
2811                 vsi->info.pvid = info->config.pvid;
2812                 /**
2813                  * If insert pvid is enabled, only tagged pkts are
2814                  * allowed to be sent out.
2815                  */
2816                 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
2817                              ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2818         } else {
2819                 vsi->info.pvid = 0;
2820                 if (info->config.reject.tagged == 0)
2821                         vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
2822
2823                 if (info->config.reject.untagged == 0)
2824                         vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2825         }
2826         vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
2827                                   ICE_AQ_VSI_VLAN_MODE_M);
2828         vsi->info.vlan_flags |= vlan_flags;
2829         memset(&ctxt, 0, sizeof(ctxt));
2830         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2831         ctxt.info.valid_sections =
2832                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2833         ctxt.vsi_num = vsi->vsi_id;
2834
2835         hw = ICE_VSI_TO_HW(vsi);
2836         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2837         if (ret != ICE_SUCCESS) {
2838                 PMD_DRV_LOG(ERR,
2839                             "update VSI for VLAN insert failed, err %d",
2840                             ret);
2841                 return -EINVAL;
2842         }
2843
2844         vsi->info.valid_sections |=
2845                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2846
2847         return ret;
2848 }
2849
2850 static int
2851 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2852 {
2853         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2854         struct ice_vsi *vsi = pf->main_vsi;
2855         struct rte_eth_dev_data *data = pf->dev_data;
2856         struct ice_vsi_vlan_pvid_info info;
2857         int ret;
2858
2859         memset(&info, 0, sizeof(info));
2860         info.on = on;
2861         if (info.on) {
2862                 info.config.pvid = pvid;
2863         } else {
2864                 info.config.reject.tagged =
2865                         data->dev_conf.txmode.hw_vlan_reject_tagged;
2866                 info.config.reject.untagged =
2867                         data->dev_conf.txmode.hw_vlan_reject_untagged;
2868         }
2869
2870         ret = ice_vsi_vlan_pvid_set(vsi, &info);
2871         if (ret < 0) {
2872                 PMD_DRV_LOG(ERR, "Failed to set pvid.");
2873                 return -EINVAL;
2874         }
2875
2876         return 0;
2877 }
2878
2879 static int
2880 ice_get_eeprom_length(struct rte_eth_dev *dev)
2881 {
2882         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2883
2884         /* Convert word count to byte count */
2885         return hw->nvm.sr_words << 1;
2886 }
2887
2888 static int
2889 ice_get_eeprom(struct rte_eth_dev *dev,
2890                struct rte_dev_eeprom_info *eeprom)
2891 {
2892         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2893         uint16_t *data = eeprom->data;
2894         uint16_t first_word, last_word, nwords;
2895         enum ice_status status = ICE_SUCCESS;
2896
2897         first_word = eeprom->offset >> 1;
2898         last_word = (eeprom->offset + eeprom->length - 1) >> 1;
2899         nwords = last_word - first_word + 1;
2900
2901         if (first_word > hw->nvm.sr_words ||
2902             last_word > hw->nvm.sr_words) {
2903                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
2904                 return -EINVAL;
2905         }
2906
2907         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2908
2909         status = ice_read_sr_buf(hw, first_word, &nwords, data);
2910         if (status) {
2911                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
2912                 eeprom->length = sizeof(uint16_t) * nwords;
2913                 return -EIO;
2914         }
2915
2916         return 0;
2917 }
2918
2919 static void
2920 ice_stat_update_32(struct ice_hw *hw,
2921                    uint32_t reg,
2922                    bool offset_loaded,
2923                    uint64_t *offset,
2924                    uint64_t *stat)
2925 {
2926         uint64_t new_data;
2927
2928         new_data = (uint64_t)ICE_READ_REG(hw, reg);
2929         if (!offset_loaded)
2930                 *offset = new_data;
2931
2932         if (new_data >= *offset)
2933                 *stat = (uint64_t)(new_data - *offset);
2934         else
2935                 *stat = (uint64_t)((new_data +
2936                                     ((uint64_t)1 << ICE_32_BIT_WIDTH))
2937                                    - *offset);
2938 }
2939
2940 static void
2941 ice_stat_update_40(struct ice_hw *hw,
2942                    uint32_t hireg,
2943                    uint32_t loreg,
2944                    bool offset_loaded,
2945                    uint64_t *offset,
2946                    uint64_t *stat)
2947 {
2948         uint64_t new_data;
2949
2950         new_data = (uint64_t)ICE_READ_REG(hw, loreg);
2951         new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
2952                     ICE_32_BIT_WIDTH;
2953
2954         if (!offset_loaded)
2955                 *offset = new_data;
2956
2957         if (new_data >= *offset)
2958                 *stat = new_data - *offset;
2959         else
2960                 *stat = (uint64_t)((new_data +
2961                                     ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
2962                                    *offset);
2963
2964         *stat &= ICE_40_BIT_MASK;
2965 }
2966
2967 /* Get all the statistics of a VSI */
2968 static void
2969 ice_update_vsi_stats(struct ice_vsi *vsi)
2970 {
2971         struct ice_eth_stats *oes = &vsi->eth_stats_offset;
2972         struct ice_eth_stats *nes = &vsi->eth_stats;
2973         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2974         int idx = rte_le_to_cpu_16(vsi->vsi_id);
2975
2976         ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
2977                            vsi->offset_loaded, &oes->rx_bytes,
2978                            &nes->rx_bytes);
2979         ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
2980                            vsi->offset_loaded, &oes->rx_unicast,
2981                            &nes->rx_unicast);
2982         ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
2983                            vsi->offset_loaded, &oes->rx_multicast,
2984                            &nes->rx_multicast);
2985         ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
2986                            vsi->offset_loaded, &oes->rx_broadcast,
2987                            &nes->rx_broadcast);
2988         /* exclude CRC bytes */
2989         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2990                           nes->rx_broadcast) * ETHER_CRC_LEN;
2991
2992         ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
2993                            &oes->rx_discards, &nes->rx_discards);
2994         /* GLV_REPC not supported */
2995         /* GLV_RMPC not supported */
2996         ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
2997                            &oes->rx_unknown_protocol,
2998                            &nes->rx_unknown_protocol);
2999         ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
3000                            vsi->offset_loaded, &oes->tx_bytes,
3001                            &nes->tx_bytes);
3002         ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
3003                            vsi->offset_loaded, &oes->tx_unicast,
3004                            &nes->tx_unicast);
3005         ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
3006                            vsi->offset_loaded, &oes->tx_multicast,
3007                            &nes->tx_multicast);
3008         ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
3009                            vsi->offset_loaded,  &oes->tx_broadcast,
3010                            &nes->tx_broadcast);
3011         /* GLV_TDPC not supported */
3012         ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
3013                            &oes->tx_errors, &nes->tx_errors);
3014         vsi->offset_loaded = true;
3015
3016         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
3017                     vsi->vsi_id);
3018         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3019         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3020         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3021         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3022         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3023         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3024                     nes->rx_unknown_protocol);
3025         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3026         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3027         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3028         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3029         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3030         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3031         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
3032                     vsi->vsi_id);
3033 }
3034
3035 static void
3036 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
3037 {
3038         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3039         struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
3040
3041         /* Get statistics of struct ice_eth_stats */
3042         ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
3043                            GLPRT_GORCL(hw->port_info->lport),
3044                            pf->offset_loaded, &os->eth.rx_bytes,
3045                            &ns->eth.rx_bytes);
3046         ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
3047                            GLPRT_UPRCL(hw->port_info->lport),
3048                            pf->offset_loaded, &os->eth.rx_unicast,
3049                            &ns->eth.rx_unicast);
3050         ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
3051                            GLPRT_MPRCL(hw->port_info->lport),
3052                            pf->offset_loaded, &os->eth.rx_multicast,
3053                            &ns->eth.rx_multicast);
3054         ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
3055                            GLPRT_BPRCL(hw->port_info->lport),
3056                            pf->offset_loaded, &os->eth.rx_broadcast,
3057                            &ns->eth.rx_broadcast);
3058         ice_stat_update_32(hw, PRTRPB_RDPC,
3059                            pf->offset_loaded, &os->eth.rx_discards,
3060                            &ns->eth.rx_discards);
3061
3062         /* Workaround: CRC size should not be included in byte statistics,
3063          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
3064          */
3065         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3066                              ns->eth.rx_broadcast) * ETHER_CRC_LEN;
3067
3068         /* GLPRT_REPC not supported */
3069         /* GLPRT_RMPC not supported */
3070         ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
3071                            pf->offset_loaded,
3072                            &os->eth.rx_unknown_protocol,
3073                            &ns->eth.rx_unknown_protocol);
3074         ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
3075                            GLPRT_GOTCL(hw->port_info->lport),
3076                            pf->offset_loaded, &os->eth.tx_bytes,
3077                            &ns->eth.tx_bytes);
3078         ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
3079                            GLPRT_UPTCL(hw->port_info->lport),
3080                            pf->offset_loaded, &os->eth.tx_unicast,
3081                            &ns->eth.tx_unicast);
3082         ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
3083                            GLPRT_MPTCL(hw->port_info->lport),
3084                            pf->offset_loaded, &os->eth.tx_multicast,
3085                            &ns->eth.tx_multicast);
3086         ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
3087                            GLPRT_BPTCL(hw->port_info->lport),
3088                            pf->offset_loaded, &os->eth.tx_broadcast,
3089                            &ns->eth.tx_broadcast);
3090         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3091                              ns->eth.tx_broadcast) * ETHER_CRC_LEN;
3092
3093         /* GLPRT_TEPC not supported */
3094
3095         /* additional port specific stats */
3096         ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
3097                            pf->offset_loaded, &os->tx_dropped_link_down,
3098                            &ns->tx_dropped_link_down);
3099         ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
3100                            pf->offset_loaded, &os->crc_errors,
3101                            &ns->crc_errors);
3102         ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
3103                            pf->offset_loaded, &os->illegal_bytes,
3104                            &ns->illegal_bytes);
3105         /* GLPRT_ERRBC not supported */
3106         ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
3107                            pf->offset_loaded, &os->mac_local_faults,
3108                            &ns->mac_local_faults);
3109         ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
3110                            pf->offset_loaded, &os->mac_remote_faults,
3111                            &ns->mac_remote_faults);
3112
3113         ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
3114                            pf->offset_loaded, &os->rx_len_errors,
3115                            &ns->rx_len_errors);
3116
3117         ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
3118                            pf->offset_loaded, &os->link_xon_rx,
3119                            &ns->link_xon_rx);
3120         ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
3121                            pf->offset_loaded, &os->link_xoff_rx,
3122                            &ns->link_xoff_rx);
3123         ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
3124                            pf->offset_loaded, &os->link_xon_tx,
3125                            &ns->link_xon_tx);
3126         ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
3127                            pf->offset_loaded, &os->link_xoff_tx,
3128                            &ns->link_xoff_tx);
3129         ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
3130                            GLPRT_PRC64L(hw->port_info->lport),
3131                            pf->offset_loaded, &os->rx_size_64,
3132                            &ns->rx_size_64);
3133         ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
3134                            GLPRT_PRC127L(hw->port_info->lport),
3135                            pf->offset_loaded, &os->rx_size_127,
3136                            &ns->rx_size_127);
3137         ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
3138                            GLPRT_PRC255L(hw->port_info->lport),
3139                            pf->offset_loaded, &os->rx_size_255,
3140                            &ns->rx_size_255);
3141         ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
3142                            GLPRT_PRC511L(hw->port_info->lport),
3143                            pf->offset_loaded, &os->rx_size_511,
3144                            &ns->rx_size_511);
3145         ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
3146                            GLPRT_PRC1023L(hw->port_info->lport),
3147                            pf->offset_loaded, &os->rx_size_1023,
3148                            &ns->rx_size_1023);
3149         ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
3150                            GLPRT_PRC1522L(hw->port_info->lport),
3151                            pf->offset_loaded, &os->rx_size_1522,
3152                            &ns->rx_size_1522);
3153         ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
3154                            GLPRT_PRC9522L(hw->port_info->lport),
3155                            pf->offset_loaded, &os->rx_size_big,
3156                            &ns->rx_size_big);
3157         ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
3158                            pf->offset_loaded, &os->rx_undersize,
3159                            &ns->rx_undersize);
3160         ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
3161                            pf->offset_loaded, &os->rx_fragments,
3162                            &ns->rx_fragments);
3163         ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
3164                            pf->offset_loaded, &os->rx_oversize,
3165                            &ns->rx_oversize);
3166         ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
3167                            pf->offset_loaded, &os->rx_jabber,
3168                            &ns->rx_jabber);
3169         ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
3170                            GLPRT_PTC64L(hw->port_info->lport),
3171                            pf->offset_loaded, &os->tx_size_64,
3172                            &ns->tx_size_64);
3173         ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
3174                            GLPRT_PTC127L(hw->port_info->lport),
3175                            pf->offset_loaded, &os->tx_size_127,
3176                            &ns->tx_size_127);
3177         ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
3178                            GLPRT_PTC255L(hw->port_info->lport),
3179                            pf->offset_loaded, &os->tx_size_255,
3180                            &ns->tx_size_255);
3181         ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
3182                            GLPRT_PTC511L(hw->port_info->lport),
3183                            pf->offset_loaded, &os->tx_size_511,
3184                            &ns->tx_size_511);
3185         ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
3186                            GLPRT_PTC1023L(hw->port_info->lport),
3187                            pf->offset_loaded, &os->tx_size_1023,
3188                            &ns->tx_size_1023);
3189         ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
3190                            GLPRT_PTC1522L(hw->port_info->lport),
3191                            pf->offset_loaded, &os->tx_size_1522,
3192                            &ns->tx_size_1522);
3193         ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
3194                            GLPRT_PTC9522L(hw->port_info->lport),
3195                            pf->offset_loaded, &os->tx_size_big,
3196                            &ns->tx_size_big);
3197
3198         /* GLPRT_MSPDC not supported */
3199         /* GLPRT_XEC not supported */
3200
3201         pf->offset_loaded = true;
3202
3203         if (pf->main_vsi)
3204                 ice_update_vsi_stats(pf->main_vsi);
3205 }
3206
3207 /* Get all statistics of a port */
3208 static int
3209 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3210 {
3211         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3212         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3213         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3214
3215         /* call read registers - updates values, now write them to struct */
3216         ice_read_stats_registers(pf, hw);
3217
3218         stats->ipackets = ns->eth.rx_unicast +
3219                           ns->eth.rx_multicast +
3220                           ns->eth.rx_broadcast -
3221                           ns->eth.rx_discards -
3222                           pf->main_vsi->eth_stats.rx_discards;
3223         stats->opackets = ns->eth.tx_unicast +
3224                           ns->eth.tx_multicast +
3225                           ns->eth.tx_broadcast;
3226         stats->ibytes   = ns->eth.rx_bytes;
3227         stats->obytes   = ns->eth.tx_bytes;
3228         stats->oerrors  = ns->eth.tx_errors +
3229                           pf->main_vsi->eth_stats.tx_errors;
3230
3231         /* Rx Errors */
3232         stats->imissed  = ns->eth.rx_discards +
3233                           pf->main_vsi->eth_stats.rx_discards;
3234         stats->ierrors  = ns->crc_errors +
3235                           ns->rx_undersize +
3236                           ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3237
3238         PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
3239         PMD_DRV_LOG(DEBUG, "rx_bytes:   %"PRIu64"", ns->eth.rx_bytes);
3240         PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
3241         PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
3242         PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
3243         PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
3244         PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
3245                     pf->main_vsi->eth_stats.rx_discards);
3246         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
3247                     ns->eth.rx_unknown_protocol);
3248         PMD_DRV_LOG(DEBUG, "tx_bytes:   %"PRIu64"", ns->eth.tx_bytes);
3249         PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
3250         PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
3251         PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
3252         PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
3253         PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
3254                     pf->main_vsi->eth_stats.tx_discards);
3255         PMD_DRV_LOG(DEBUG, "tx_errors:          %"PRIu64"", ns->eth.tx_errors);
3256
3257         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:       %"PRIu64"",
3258                     ns->tx_dropped_link_down);
3259         PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
3260         PMD_DRV_LOG(DEBUG, "illegal_bytes:      %"PRIu64"",
3261                     ns->illegal_bytes);
3262         PMD_DRV_LOG(DEBUG, "error_bytes:        %"PRIu64"", ns->error_bytes);
3263         PMD_DRV_LOG(DEBUG, "mac_local_faults:   %"PRIu64"",
3264                     ns->mac_local_faults);
3265         PMD_DRV_LOG(DEBUG, "mac_remote_faults:  %"PRIu64"",
3266                     ns->mac_remote_faults);
3267         PMD_DRV_LOG(DEBUG, "link_xon_rx:        %"PRIu64"", ns->link_xon_rx);
3268         PMD_DRV_LOG(DEBUG, "link_xoff_rx:       %"PRIu64"", ns->link_xoff_rx);
3269         PMD_DRV_LOG(DEBUG, "link_xon_tx:        %"PRIu64"", ns->link_xon_tx);
3270         PMD_DRV_LOG(DEBUG, "link_xoff_tx:       %"PRIu64"", ns->link_xoff_tx);
3271         PMD_DRV_LOG(DEBUG, "rx_size_64:         %"PRIu64"", ns->rx_size_64);
3272         PMD_DRV_LOG(DEBUG, "rx_size_127:        %"PRIu64"", ns->rx_size_127);
3273         PMD_DRV_LOG(DEBUG, "rx_size_255:        %"PRIu64"", ns->rx_size_255);
3274         PMD_DRV_LOG(DEBUG, "rx_size_511:        %"PRIu64"", ns->rx_size_511);
3275         PMD_DRV_LOG(DEBUG, "rx_size_1023:       %"PRIu64"", ns->rx_size_1023);
3276         PMD_DRV_LOG(DEBUG, "rx_size_1522:       %"PRIu64"", ns->rx_size_1522);
3277         PMD_DRV_LOG(DEBUG, "rx_size_big:        %"PRIu64"", ns->rx_size_big);
3278         PMD_DRV_LOG(DEBUG, "rx_undersize:       %"PRIu64"", ns->rx_undersize);
3279         PMD_DRV_LOG(DEBUG, "rx_fragments:       %"PRIu64"", ns->rx_fragments);
3280         PMD_DRV_LOG(DEBUG, "rx_oversize:        %"PRIu64"", ns->rx_oversize);
3281         PMD_DRV_LOG(DEBUG, "rx_jabber:          %"PRIu64"", ns->rx_jabber);
3282         PMD_DRV_LOG(DEBUG, "tx_size_64:         %"PRIu64"", ns->tx_size_64);
3283         PMD_DRV_LOG(DEBUG, "tx_size_127:        %"PRIu64"", ns->tx_size_127);
3284         PMD_DRV_LOG(DEBUG, "tx_size_255:        %"PRIu64"", ns->tx_size_255);
3285         PMD_DRV_LOG(DEBUG, "tx_size_511:        %"PRIu64"", ns->tx_size_511);
3286         PMD_DRV_LOG(DEBUG, "tx_size_1023:       %"PRIu64"", ns->tx_size_1023);
3287         PMD_DRV_LOG(DEBUG, "tx_size_1522:       %"PRIu64"", ns->tx_size_1522);
3288         PMD_DRV_LOG(DEBUG, "tx_size_big:        %"PRIu64"", ns->tx_size_big);
3289         PMD_DRV_LOG(DEBUG, "rx_len_errors:      %"PRIu64"", ns->rx_len_errors);
3290         PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
3291         return 0;
3292 }
3293
3294 /* Reset the statistics */
3295 static void
3296 ice_stats_reset(struct rte_eth_dev *dev)
3297 {
3298         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3299         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3300
3301         /* Mark PF and VSI stats to update the offset, aka "reset" */
3302         pf->offset_loaded = false;
3303         if (pf->main_vsi)
3304                 pf->main_vsi->offset_loaded = false;
3305
3306         /* read the stats, reading current register values into offset */
3307         ice_read_stats_registers(pf, hw);
3308 }
3309
3310 static uint32_t
3311 ice_xstats_calc_num(void)
3312 {
3313         uint32_t num;
3314
3315         num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
3316
3317         return num;
3318 }
3319
3320 static int
3321 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3322                unsigned int n)
3323 {
3324         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3325         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3326         unsigned int i;
3327         unsigned int count;
3328         struct ice_hw_port_stats *hw_stats = &pf->stats;
3329
3330         count = ice_xstats_calc_num();
3331         if (n < count)
3332                 return count;
3333
3334         ice_read_stats_registers(pf, hw);
3335
3336         if (!xstats)
3337                 return 0;
3338
3339         count = 0;
3340
3341         /* Get stats from ice_eth_stats struct */
3342         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3343                 xstats[count].value =
3344                         *(uint64_t *)((char *)&hw_stats->eth +
3345                                       ice_stats_strings[i].offset);
3346                 xstats[count].id = count;
3347                 count++;
3348         }
3349
3350         /* Get individiual stats from ice_hw_port struct */
3351         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3352                 xstats[count].value =
3353                         *(uint64_t *)((char *)hw_stats +
3354                                       ice_hw_port_strings[i].offset);
3355                 xstats[count].id = count;
3356                 count++;
3357         }
3358
3359         return count;
3360 }
3361
3362 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3363                                 struct rte_eth_xstat_name *xstats_names,
3364                                 __rte_unused unsigned int limit)
3365 {
3366         unsigned int count = 0;
3367         unsigned int i;
3368
3369         if (!xstats_names)
3370                 return ice_xstats_calc_num();
3371
3372         /* Note: limit checked in rte_eth_xstats_names() */
3373
3374         /* Get stats from ice_eth_stats struct */
3375         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3376                 snprintf(xstats_names[count].name,
3377                          sizeof(xstats_names[count].name),
3378                          "%s", ice_stats_strings[i].name);
3379                 count++;
3380         }
3381
3382         /* Get individiual stats from ice_hw_port struct */
3383         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3384                 snprintf(xstats_names[count].name,
3385                          sizeof(xstats_names[count].name),
3386                          "%s", ice_hw_port_strings[i].name);
3387                 count++;
3388         }
3389
3390         return count;
3391 }
3392
3393 static int
3394 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3395               struct rte_pci_device *pci_dev)
3396 {
3397         return rte_eth_dev_pci_generic_probe(pci_dev,
3398                                              sizeof(struct ice_adapter),
3399                                              ice_dev_init);
3400 }
3401
3402 static int
3403 ice_pci_remove(struct rte_pci_device *pci_dev)
3404 {
3405         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
3406 }
3407
3408 static struct rte_pci_driver rte_ice_pmd = {
3409         .id_table = pci_id_ice_map,
3410         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3411                      RTE_PCI_DRV_IOVA_AS_VA,
3412         .probe = ice_pci_probe,
3413         .remove = ice_pci_remove,
3414 };
3415
3416 /**
3417  * Driver initialization routine.
3418  * Invoked once at EAL init time.
3419  * Register itself as the [Poll Mode] Driver of PCI devices.
3420  */
3421 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
3422 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
3423 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
3424 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
3425                               ICE_MAX_QP_NUM "=<int>");
3426
3427 RTE_INIT(ice_init_log)
3428 {
3429         ice_logtype_init = rte_log_register("pmd.net.ice.init");
3430         if (ice_logtype_init >= 0)
3431                 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
3432         ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
3433         if (ice_logtype_driver >= 0)
3434                 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
3435 }