net/ice: fix speed capability
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include <stdio.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11
12 #include "base/ice_sched.h"
13 #include "base/ice_flow.h"
14 #include "ice_ethdev.h"
15 #include "ice_rxtx.h"
16
17 #define ICE_MAX_QP_NUM "max_queue_pair_num"
18 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
19 #define ICE_DFLT_PKG_FILE "/lib/firmware/intel/ice/ddp/ice.pkg"
20
21 int ice_logtype_init;
22 int ice_logtype_driver;
23
24 static int ice_dev_configure(struct rte_eth_dev *dev);
25 static int ice_dev_start(struct rte_eth_dev *dev);
26 static void ice_dev_stop(struct rte_eth_dev *dev);
27 static void ice_dev_close(struct rte_eth_dev *dev);
28 static int ice_dev_reset(struct rte_eth_dev *dev);
29 static void ice_dev_info_get(struct rte_eth_dev *dev,
30                              struct rte_eth_dev_info *dev_info);
31 static int ice_link_update(struct rte_eth_dev *dev,
32                            int wait_to_complete);
33 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
34 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
35 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
36                              enum rte_vlan_type vlan_type,
37                              uint16_t tpid);
38 static int ice_rss_reta_update(struct rte_eth_dev *dev,
39                                struct rte_eth_rss_reta_entry64 *reta_conf,
40                                uint16_t reta_size);
41 static int ice_rss_reta_query(struct rte_eth_dev *dev,
42                               struct rte_eth_rss_reta_entry64 *reta_conf,
43                               uint16_t reta_size);
44 static int ice_rss_hash_update(struct rte_eth_dev *dev,
45                                struct rte_eth_rss_conf *rss_conf);
46 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
47                                  struct rte_eth_rss_conf *rss_conf);
48 static void ice_promisc_enable(struct rte_eth_dev *dev);
49 static void ice_promisc_disable(struct rte_eth_dev *dev);
50 static void ice_allmulti_enable(struct rte_eth_dev *dev);
51 static void ice_allmulti_disable(struct rte_eth_dev *dev);
52 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
53                                uint16_t vlan_id,
54                                int on);
55 static int ice_macaddr_set(struct rte_eth_dev *dev,
56                            struct ether_addr *mac_addr);
57 static int ice_macaddr_add(struct rte_eth_dev *dev,
58                            struct ether_addr *mac_addr,
59                            __rte_unused uint32_t index,
60                            uint32_t pool);
61 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
62 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
63                                     uint16_t queue_id);
64 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
65                                      uint16_t queue_id);
66 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
67                               size_t fw_size);
68 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
69                              uint16_t pvid, int on);
70 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
71 static int ice_get_eeprom(struct rte_eth_dev *dev,
72                           struct rte_dev_eeprom_info *eeprom);
73 static int ice_stats_get(struct rte_eth_dev *dev,
74                          struct rte_eth_stats *stats);
75 static void ice_stats_reset(struct rte_eth_dev *dev);
76 static int ice_xstats_get(struct rte_eth_dev *dev,
77                           struct rte_eth_xstat *xstats, unsigned int n);
78 static int ice_xstats_get_names(struct rte_eth_dev *dev,
79                                 struct rte_eth_xstat_name *xstats_names,
80                                 unsigned int limit);
81
82 static const struct rte_pci_id pci_id_ice_map[] = {
83         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
84         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
85         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
86         { .vendor_id = 0, /* sentinel */ },
87 };
88
89 static const struct eth_dev_ops ice_eth_dev_ops = {
90         .dev_configure                = ice_dev_configure,
91         .dev_start                    = ice_dev_start,
92         .dev_stop                     = ice_dev_stop,
93         .dev_close                    = ice_dev_close,
94         .dev_reset                    = ice_dev_reset,
95         .rx_queue_start               = ice_rx_queue_start,
96         .rx_queue_stop                = ice_rx_queue_stop,
97         .tx_queue_start               = ice_tx_queue_start,
98         .tx_queue_stop                = ice_tx_queue_stop,
99         .rx_queue_setup               = ice_rx_queue_setup,
100         .rx_queue_release             = ice_rx_queue_release,
101         .tx_queue_setup               = ice_tx_queue_setup,
102         .tx_queue_release             = ice_tx_queue_release,
103         .dev_infos_get                = ice_dev_info_get,
104         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
105         .link_update                  = ice_link_update,
106         .mtu_set                      = ice_mtu_set,
107         .mac_addr_set                 = ice_macaddr_set,
108         .mac_addr_add                 = ice_macaddr_add,
109         .mac_addr_remove              = ice_macaddr_remove,
110         .vlan_filter_set              = ice_vlan_filter_set,
111         .vlan_offload_set             = ice_vlan_offload_set,
112         .vlan_tpid_set                = ice_vlan_tpid_set,
113         .reta_update                  = ice_rss_reta_update,
114         .reta_query                   = ice_rss_reta_query,
115         .rss_hash_update              = ice_rss_hash_update,
116         .rss_hash_conf_get            = ice_rss_hash_conf_get,
117         .promiscuous_enable           = ice_promisc_enable,
118         .promiscuous_disable          = ice_promisc_disable,
119         .allmulticast_enable          = ice_allmulti_enable,
120         .allmulticast_disable         = ice_allmulti_disable,
121         .rx_queue_intr_enable         = ice_rx_queue_intr_enable,
122         .rx_queue_intr_disable        = ice_rx_queue_intr_disable,
123         .fw_version_get               = ice_fw_version_get,
124         .vlan_pvid_set                = ice_vlan_pvid_set,
125         .rxq_info_get                 = ice_rxq_info_get,
126         .txq_info_get                 = ice_txq_info_get,
127         .get_eeprom_length            = ice_get_eeprom_length,
128         .get_eeprom                   = ice_get_eeprom,
129         .rx_queue_count               = ice_rx_queue_count,
130         .rx_descriptor_status         = ice_rx_descriptor_status,
131         .tx_descriptor_status         = ice_tx_descriptor_status,
132         .stats_get                    = ice_stats_get,
133         .stats_reset                  = ice_stats_reset,
134         .xstats_get                   = ice_xstats_get,
135         .xstats_get_names             = ice_xstats_get_names,
136         .xstats_reset                 = ice_stats_reset,
137 };
138
139 /* store statistics names and its offset in stats structure */
140 struct ice_xstats_name_off {
141         char name[RTE_ETH_XSTATS_NAME_SIZE];
142         unsigned int offset;
143 };
144
145 static const struct ice_xstats_name_off ice_stats_strings[] = {
146         {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
147         {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
148         {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
149         {"rx_dropped", offsetof(struct ice_eth_stats, rx_discards)},
150         {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
151                 rx_unknown_protocol)},
152         {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
153         {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
154         {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
155         {"tx_dropped", offsetof(struct ice_eth_stats, tx_discards)},
156 };
157
158 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
159                 sizeof(ice_stats_strings[0]))
160
161 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
162         {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
163                 tx_dropped_link_down)},
164         {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
165         {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
166                 illegal_bytes)},
167         {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
168         {"mac_local_errors", offsetof(struct ice_hw_port_stats,
169                 mac_local_faults)},
170         {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
171                 mac_remote_faults)},
172         {"rx_len_errors", offsetof(struct ice_hw_port_stats,
173                 rx_len_errors)},
174         {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
175         {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
176         {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
177         {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
178         {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
179         {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
180                 rx_size_127)},
181         {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
182                 rx_size_255)},
183         {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
184                 rx_size_511)},
185         {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
186                 rx_size_1023)},
187         {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
188                 rx_size_1522)},
189         {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
190                 rx_size_big)},
191         {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
192                 rx_undersize)},
193         {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
194                 rx_oversize)},
195         {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
196                 mac_short_pkt_dropped)},
197         {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
198                 rx_fragments)},
199         {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
200         {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
201         {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
202                 tx_size_127)},
203         {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
204                 tx_size_255)},
205         {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
206                 tx_size_511)},
207         {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
208                 tx_size_1023)},
209         {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
210                 tx_size_1522)},
211         {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
212                 tx_size_big)},
213 };
214
215 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
216                 sizeof(ice_hw_port_strings[0]))
217
218 static void
219 ice_init_controlq_parameter(struct ice_hw *hw)
220 {
221         /* fields for adminq */
222         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
223         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
224         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
225         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
226
227         /* fields for mailboxq, DPDK used as PF host */
228         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
229         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
230         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
231         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
232 }
233
234 static int
235 ice_check_qp_num(const char *key, const char *qp_value,
236                  __rte_unused void *opaque)
237 {
238         char *end = NULL;
239         int num = 0;
240
241         while (isblank(*qp_value))
242                 qp_value++;
243
244         num = strtoul(qp_value, &end, 10);
245
246         if (!num || (*end == '-') || errno) {
247                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
248                             "value must be > 0",
249                             qp_value, key);
250                 return -1;
251         }
252
253         return num;
254 }
255
256 static int
257 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
258 {
259         struct rte_kvargs *kvlist;
260         const char *queue_num_key = ICE_MAX_QP_NUM;
261         int ret;
262
263         if (!devargs)
264                 return 0;
265
266         kvlist = rte_kvargs_parse(devargs->args, NULL);
267         if (!kvlist)
268                 return 0;
269
270         if (!rte_kvargs_count(kvlist, queue_num_key)) {
271                 rte_kvargs_free(kvlist);
272                 return 0;
273         }
274
275         if (rte_kvargs_process(kvlist, queue_num_key,
276                                ice_check_qp_num, NULL) < 0) {
277                 rte_kvargs_free(kvlist);
278                 return 0;
279         }
280         ret = rte_kvargs_process(kvlist, queue_num_key,
281                                  ice_check_qp_num, NULL);
282         rte_kvargs_free(kvlist);
283
284         return ret;
285 }
286
287 static int
288 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
289                   uint32_t num)
290 {
291         struct pool_entry *entry;
292
293         if (!pool || !num)
294                 return -EINVAL;
295
296         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
297         if (!entry) {
298                 PMD_INIT_LOG(ERR,
299                              "Failed to allocate memory for resource pool");
300                 return -ENOMEM;
301         }
302
303         /* queue heap initialize */
304         pool->num_free = num;
305         pool->num_alloc = 0;
306         pool->base = base;
307         LIST_INIT(&pool->alloc_list);
308         LIST_INIT(&pool->free_list);
309
310         /* Initialize element  */
311         entry->base = 0;
312         entry->len = num;
313
314         LIST_INSERT_HEAD(&pool->free_list, entry, next);
315         return 0;
316 }
317
318 static int
319 ice_res_pool_alloc(struct ice_res_pool_info *pool,
320                    uint16_t num)
321 {
322         struct pool_entry *entry, *valid_entry;
323
324         if (!pool || !num) {
325                 PMD_INIT_LOG(ERR, "Invalid parameter");
326                 return -EINVAL;
327         }
328
329         if (pool->num_free < num) {
330                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
331                              num, pool->num_free);
332                 return -ENOMEM;
333         }
334
335         valid_entry = NULL;
336         /* Lookup  in free list and find most fit one */
337         LIST_FOREACH(entry, &pool->free_list, next) {
338                 if (entry->len >= num) {
339                         /* Find best one */
340                         if (entry->len == num) {
341                                 valid_entry = entry;
342                                 break;
343                         }
344                         if (!valid_entry ||
345                             valid_entry->len > entry->len)
346                                 valid_entry = entry;
347                 }
348         }
349
350         /* Not find one to satisfy the request, return */
351         if (!valid_entry) {
352                 PMD_INIT_LOG(ERR, "No valid entry found");
353                 return -ENOMEM;
354         }
355         /**
356          * The entry have equal queue number as requested,
357          * remove it from alloc_list.
358          */
359         if (valid_entry->len == num) {
360                 LIST_REMOVE(valid_entry, next);
361         } else {
362                 /**
363                  * The entry have more numbers than requested,
364                  * create a new entry for alloc_list and minus its
365                  * queue base and number in free_list.
366                  */
367                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
368                 if (!entry) {
369                         PMD_INIT_LOG(ERR,
370                                      "Failed to allocate memory for "
371                                      "resource pool");
372                         return -ENOMEM;
373                 }
374                 entry->base = valid_entry->base;
375                 entry->len = num;
376                 valid_entry->base += num;
377                 valid_entry->len -= num;
378                 valid_entry = entry;
379         }
380
381         /* Insert it into alloc list, not sorted */
382         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
383
384         pool->num_free -= valid_entry->len;
385         pool->num_alloc += valid_entry->len;
386
387         return valid_entry->base + pool->base;
388 }
389
390 static void
391 ice_res_pool_destroy(struct ice_res_pool_info *pool)
392 {
393         struct pool_entry *entry, *next_entry;
394
395         if (!pool)
396                 return;
397
398         for (entry = LIST_FIRST(&pool->alloc_list);
399              entry && (next_entry = LIST_NEXT(entry, next), 1);
400              entry = next_entry) {
401                 LIST_REMOVE(entry, next);
402                 rte_free(entry);
403         }
404
405         for (entry = LIST_FIRST(&pool->free_list);
406              entry && (next_entry = LIST_NEXT(entry, next), 1);
407              entry = next_entry) {
408                 LIST_REMOVE(entry, next);
409                 rte_free(entry);
410         }
411
412         pool->num_free = 0;
413         pool->num_alloc = 0;
414         pool->base = 0;
415         LIST_INIT(&pool->alloc_list);
416         LIST_INIT(&pool->free_list);
417 }
418
419 static void
420 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
421 {
422         /* Set VSI LUT selection */
423         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
424                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
425         /* Set Hash scheme */
426         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
427                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
428         /* enable TC */
429         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
430 }
431
432 static enum ice_status
433 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
434                                 struct ice_aqc_vsi_props *info,
435                                 uint8_t enabled_tcmap)
436 {
437         uint16_t bsf, qp_idx;
438
439         /* default tc 0 now. Multi-TC supporting need to be done later.
440          * Configure TC and queue mapping parameters, for enabled TC,
441          * allocate qpnum_per_tc queues to this traffic.
442          */
443         if (enabled_tcmap != 0x01) {
444                 PMD_INIT_LOG(ERR, "only TC0 is supported");
445                 return -ENOTSUP;
446         }
447
448         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
449         bsf = rte_bsf32(vsi->nb_qps);
450         /* Adjust the queue number to actual queues that can be applied */
451         vsi->nb_qps = 0x1 << bsf;
452
453         qp_idx = 0;
454         /* Set tc and queue mapping with VSI */
455         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
456                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
457                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
458
459         /* Associate queue number with VSI */
460         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
461         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
462         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
463         info->valid_sections |=
464                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
465         /* Set the info.ingress_table and info.egress_table
466          * for UP translate table. Now just set it to 1:1 map by default
467          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
468          */
469 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
470         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
471         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
472         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
473         return 0;
474 }
475
476 static int
477 ice_init_mac_address(struct rte_eth_dev *dev)
478 {
479         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
480
481         if (!is_unicast_ether_addr
482                 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
483                 PMD_INIT_LOG(ERR, "Invalid MAC address");
484                 return -EINVAL;
485         }
486
487         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
488                         (struct ether_addr *)hw->port_info[0].mac.perm_addr);
489
490         dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
491         if (!dev->data->mac_addrs) {
492                 PMD_INIT_LOG(ERR,
493                              "Failed to allocate memory to store mac address");
494                 return -ENOMEM;
495         }
496         /* store it to dev data */
497         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
498                         &dev->data->mac_addrs[0]);
499         return 0;
500 }
501
502 /* Find out specific MAC filter */
503 static struct ice_mac_filter *
504 ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
505 {
506         struct ice_mac_filter *f;
507
508         TAILQ_FOREACH(f, &vsi->mac_list, next) {
509                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
510                         return f;
511         }
512
513         return NULL;
514 }
515
516 static int
517 ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
518 {
519         struct ice_fltr_list_entry *m_list_itr = NULL;
520         struct ice_mac_filter *f;
521         struct LIST_HEAD_TYPE list_head;
522         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
523         int ret = 0;
524
525         /* If it's added and configured, return */
526         f = ice_find_mac_filter(vsi, mac_addr);
527         if (f) {
528                 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
529                 return 0;
530         }
531
532         INIT_LIST_HEAD(&list_head);
533
534         m_list_itr = (struct ice_fltr_list_entry *)
535                 ice_malloc(hw, sizeof(*m_list_itr));
536         if (!m_list_itr) {
537                 ret = -ENOMEM;
538                 goto DONE;
539         }
540         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
541                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
542         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
543         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
544         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
545         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
546         m_list_itr->fltr_info.vsi_handle = vsi->idx;
547
548         LIST_ADD(&m_list_itr->list_entry, &list_head);
549
550         /* Add the mac */
551         ret = ice_add_mac(hw, &list_head);
552         if (ret != ICE_SUCCESS) {
553                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
554                 ret = -EINVAL;
555                 goto DONE;
556         }
557         /* Add the mac addr into mac list */
558         f = rte_zmalloc(NULL, sizeof(*f), 0);
559         if (!f) {
560                 PMD_DRV_LOG(ERR, "failed to allocate memory");
561                 ret = -ENOMEM;
562                 goto DONE;
563         }
564         rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
565         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
566         vsi->mac_num++;
567
568         ret = 0;
569
570 DONE:
571         rte_free(m_list_itr);
572         return ret;
573 }
574
575 static int
576 ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
577 {
578         struct ice_fltr_list_entry *m_list_itr = NULL;
579         struct ice_mac_filter *f;
580         struct LIST_HEAD_TYPE list_head;
581         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
582         int ret = 0;
583
584         /* Can't find it, return an error */
585         f = ice_find_mac_filter(vsi, mac_addr);
586         if (!f)
587                 return -EINVAL;
588
589         INIT_LIST_HEAD(&list_head);
590
591         m_list_itr = (struct ice_fltr_list_entry *)
592                 ice_malloc(hw, sizeof(*m_list_itr));
593         if (!m_list_itr) {
594                 ret = -ENOMEM;
595                 goto DONE;
596         }
597         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
598                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
599         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
600         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
601         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
602         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
603         m_list_itr->fltr_info.vsi_handle = vsi->idx;
604
605         LIST_ADD(&m_list_itr->list_entry, &list_head);
606
607         /* remove the mac filter */
608         ret = ice_remove_mac(hw, &list_head);
609         if (ret != ICE_SUCCESS) {
610                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
611                 ret = -EINVAL;
612                 goto DONE;
613         }
614
615         /* Remove the mac addr from mac list */
616         TAILQ_REMOVE(&vsi->mac_list, f, next);
617         rte_free(f);
618         vsi->mac_num--;
619
620         ret = 0;
621 DONE:
622         rte_free(m_list_itr);
623         return ret;
624 }
625
626 /* Find out specific VLAN filter */
627 static struct ice_vlan_filter *
628 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
629 {
630         struct ice_vlan_filter *f;
631
632         TAILQ_FOREACH(f, &vsi->vlan_list, next) {
633                 if (vlan_id == f->vlan_info.vlan_id)
634                         return f;
635         }
636
637         return NULL;
638 }
639
640 static int
641 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
642 {
643         struct ice_fltr_list_entry *v_list_itr = NULL;
644         struct ice_vlan_filter *f;
645         struct LIST_HEAD_TYPE list_head;
646         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
647         int ret = 0;
648
649         if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
650                 return -EINVAL;
651
652         /* If it's added and configured, return. */
653         f = ice_find_vlan_filter(vsi, vlan_id);
654         if (f) {
655                 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
656                 return 0;
657         }
658
659         if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
660                 return 0;
661
662         INIT_LIST_HEAD(&list_head);
663
664         v_list_itr = (struct ice_fltr_list_entry *)
665                       ice_malloc(hw, sizeof(*v_list_itr));
666         if (!v_list_itr) {
667                 ret = -ENOMEM;
668                 goto DONE;
669         }
670         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
671         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
672         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
673         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
674         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
675         v_list_itr->fltr_info.vsi_handle = vsi->idx;
676
677         LIST_ADD(&v_list_itr->list_entry, &list_head);
678
679         /* Add the vlan */
680         ret = ice_add_vlan(hw, &list_head);
681         if (ret != ICE_SUCCESS) {
682                 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
683                 ret = -EINVAL;
684                 goto DONE;
685         }
686
687         /* Add vlan into vlan list */
688         f = rte_zmalloc(NULL, sizeof(*f), 0);
689         if (!f) {
690                 PMD_DRV_LOG(ERR, "failed to allocate memory");
691                 ret = -ENOMEM;
692                 goto DONE;
693         }
694         f->vlan_info.vlan_id = vlan_id;
695         TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
696         vsi->vlan_num++;
697
698         ret = 0;
699
700 DONE:
701         rte_free(v_list_itr);
702         return ret;
703 }
704
705 static int
706 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
707 {
708         struct ice_fltr_list_entry *v_list_itr = NULL;
709         struct ice_vlan_filter *f;
710         struct LIST_HEAD_TYPE list_head;
711         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
712         int ret = 0;
713
714         /**
715          * Vlan 0 is the generic filter for untagged packets
716          * and can't be removed.
717          */
718         if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
719                 return -EINVAL;
720
721         /* Can't find it, return an error */
722         f = ice_find_vlan_filter(vsi, vlan_id);
723         if (!f)
724                 return -EINVAL;
725
726         INIT_LIST_HEAD(&list_head);
727
728         v_list_itr = (struct ice_fltr_list_entry *)
729                       ice_malloc(hw, sizeof(*v_list_itr));
730         if (!v_list_itr) {
731                 ret = -ENOMEM;
732                 goto DONE;
733         }
734
735         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
736         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
737         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
738         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
739         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
740         v_list_itr->fltr_info.vsi_handle = vsi->idx;
741
742         LIST_ADD(&v_list_itr->list_entry, &list_head);
743
744         /* remove the vlan filter */
745         ret = ice_remove_vlan(hw, &list_head);
746         if (ret != ICE_SUCCESS) {
747                 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
748                 ret = -EINVAL;
749                 goto DONE;
750         }
751
752         /* Remove the vlan id from vlan list */
753         TAILQ_REMOVE(&vsi->vlan_list, f, next);
754         rte_free(f);
755         vsi->vlan_num--;
756
757         ret = 0;
758 DONE:
759         rte_free(v_list_itr);
760         return ret;
761 }
762
763 static int
764 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
765 {
766         struct ice_mac_filter *m_f;
767         struct ice_vlan_filter *v_f;
768         int ret = 0;
769
770         if (!vsi || !vsi->mac_num)
771                 return -EINVAL;
772
773         TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
774                 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
775                 if (ret != ICE_SUCCESS) {
776                         ret = -EINVAL;
777                         goto DONE;
778                 }
779         }
780
781         if (vsi->vlan_num == 0)
782                 return 0;
783
784         TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
785                 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
786                 if (ret != ICE_SUCCESS) {
787                         ret = -EINVAL;
788                         goto DONE;
789                 }
790         }
791
792 DONE:
793         return ret;
794 }
795
796 static int
797 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
798 {
799         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
800         struct ice_vsi_ctx ctxt;
801         uint8_t qinq_flags;
802         int ret = 0;
803
804         /* Check if it has been already on or off */
805         if (vsi->info.valid_sections &
806                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
807                 if (on) {
808                         if ((vsi->info.outer_tag_flags &
809                              ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
810                             ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
811                                 return 0; /* already on */
812                 } else {
813                         if (!(vsi->info.outer_tag_flags &
814                               ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
815                                 return 0; /* already off */
816                 }
817         }
818
819         if (on)
820                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
821         else
822                 qinq_flags = 0;
823         /* clear global insertion and use per packet insertion */
824         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
825         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
826         vsi->info.outer_tag_flags |= qinq_flags;
827         /* use default vlan type 0x8100 */
828         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
829         vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
830                                      ICE_AQ_VSI_OUTER_TAG_TYPE_S;
831         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
832         ctxt.info.valid_sections =
833                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
834         ctxt.vsi_num = vsi->vsi_id;
835         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
836         if (ret) {
837                 PMD_DRV_LOG(INFO,
838                             "Update VSI failed to %s qinq stripping",
839                             on ? "enable" : "disable");
840                 return -EINVAL;
841         }
842
843         vsi->info.valid_sections |=
844                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
845
846         return ret;
847 }
848
849 static int
850 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
851 {
852         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
853         struct ice_vsi_ctx ctxt;
854         uint8_t qinq_flags;
855         int ret = 0;
856
857         /* Check if it has been already on or off */
858         if (vsi->info.valid_sections &
859                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
860                 if (on) {
861                         if ((vsi->info.outer_tag_flags &
862                              ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
863                             ICE_AQ_VSI_OUTER_TAG_COPY)
864                                 return 0; /* already on */
865                 } else {
866                         if ((vsi->info.outer_tag_flags &
867                              ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
868                             ICE_AQ_VSI_OUTER_TAG_NOTHING)
869                                 return 0; /* already off */
870                 }
871         }
872
873         if (on)
874                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
875         else
876                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
877         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
878         vsi->info.outer_tag_flags |= qinq_flags;
879         /* use default vlan type 0x8100 */
880         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
881         vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
882                                      ICE_AQ_VSI_OUTER_TAG_TYPE_S;
883         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
884         ctxt.info.valid_sections =
885                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
886         ctxt.vsi_num = vsi->vsi_id;
887         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
888         if (ret) {
889                 PMD_DRV_LOG(INFO,
890                             "Update VSI failed to %s qinq stripping",
891                             on ? "enable" : "disable");
892                 return -EINVAL;
893         }
894
895         vsi->info.valid_sections |=
896                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
897
898         return ret;
899 }
900
901 static int
902 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
903 {
904         int ret;
905
906         ret = ice_vsi_config_qinq_stripping(vsi, on);
907         if (ret)
908                 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
909
910         ret = ice_vsi_config_qinq_insertion(vsi, on);
911         if (ret)
912                 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
913
914         return ret;
915 }
916
917 /* Enable IRQ0 */
918 static void
919 ice_pf_enable_irq0(struct ice_hw *hw)
920 {
921         /* reset the registers */
922         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
923         ICE_READ_REG(hw, PFINT_OICR);
924
925 #ifdef ICE_LSE_SPT
926         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
927                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
928                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
929
930         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
931                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
932                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
933                        PFINT_OICR_CTL_ITR_INDX_M) |
934                       PFINT_OICR_CTL_CAUSE_ENA_M);
935
936         ICE_WRITE_REG(hw, PFINT_FW_CTL,
937                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
938                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
939                        PFINT_FW_CTL_ITR_INDX_M) |
940                       PFINT_FW_CTL_CAUSE_ENA_M);
941 #else
942         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
943 #endif
944
945         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
946                       GLINT_DYN_CTL_INTENA_M |
947                       GLINT_DYN_CTL_CLEARPBA_M |
948                       GLINT_DYN_CTL_ITR_INDX_M);
949
950         ice_flush(hw);
951 }
952
953 /* Disable IRQ0 */
954 static void
955 ice_pf_disable_irq0(struct ice_hw *hw)
956 {
957         /* Disable all interrupt types */
958         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
959         ice_flush(hw);
960 }
961
962 #ifdef ICE_LSE_SPT
963 static void
964 ice_handle_aq_msg(struct rte_eth_dev *dev)
965 {
966         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
967         struct ice_ctl_q_info *cq = &hw->adminq;
968         struct ice_rq_event_info event;
969         uint16_t pending, opcode;
970         int ret;
971
972         event.buf_len = ICE_AQ_MAX_BUF_LEN;
973         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
974         if (!event.msg_buf) {
975                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
976                 return;
977         }
978
979         pending = 1;
980         while (pending) {
981                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
982
983                 if (ret != ICE_SUCCESS) {
984                         PMD_DRV_LOG(INFO,
985                                     "Failed to read msg from AdminQ, "
986                                     "adminq_err: %u",
987                                     hw->adminq.sq_last_status);
988                         break;
989                 }
990                 opcode = rte_le_to_cpu_16(event.desc.opcode);
991
992                 switch (opcode) {
993                 case ice_aqc_opc_get_link_status:
994                         ret = ice_link_update(dev, 0);
995                         if (!ret)
996                                 _rte_eth_dev_callback_process
997                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
998                         break;
999                 default:
1000                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1001                                     opcode);
1002                         break;
1003                 }
1004         }
1005         rte_free(event.msg_buf);
1006 }
1007 #endif
1008
1009 /**
1010  * Interrupt handler triggered by NIC for handling
1011  * specific interrupt.
1012  *
1013  * @param handle
1014  *  Pointer to interrupt handle.
1015  * @param param
1016  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1017  *
1018  * @return
1019  *  void
1020  */
1021 static void
1022 ice_interrupt_handler(void *param)
1023 {
1024         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1025         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1026         uint32_t oicr;
1027         uint32_t reg;
1028         uint8_t pf_num;
1029         uint8_t event;
1030         uint16_t queue;
1031 #ifdef ICE_LSE_SPT
1032         uint32_t int_fw_ctl;
1033 #endif
1034
1035         /* Disable interrupt */
1036         ice_pf_disable_irq0(hw);
1037
1038         /* read out interrupt causes */
1039         oicr = ICE_READ_REG(hw, PFINT_OICR);
1040 #ifdef ICE_LSE_SPT
1041         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1042 #endif
1043
1044         /* No interrupt event indicated */
1045         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1046                 PMD_DRV_LOG(INFO, "No interrupt event");
1047                 goto done;
1048         }
1049
1050 #ifdef ICE_LSE_SPT
1051         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1052                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1053                 ice_handle_aq_msg(dev);
1054         }
1055 #else
1056         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1057                 PMD_DRV_LOG(INFO, "OICR: link state change event");
1058                 ice_link_update(dev, 0);
1059         }
1060 #endif
1061
1062         if (oicr & PFINT_OICR_MAL_DETECT_M) {
1063                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1064                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1065                 if (reg & GL_MDET_TX_PQM_VALID_M) {
1066                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1067                                  GL_MDET_TX_PQM_PF_NUM_S;
1068                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1069                                 GL_MDET_TX_PQM_MAL_TYPE_S;
1070                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1071                                 GL_MDET_TX_PQM_QNUM_S;
1072
1073                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1074                                     "%d by PQM on TX queue %d PF# %d",
1075                                     event, queue, pf_num);
1076                 }
1077
1078                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1079                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1080                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1081                                  GL_MDET_TX_TCLAN_PF_NUM_S;
1082                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1083                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1084                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1085                                 GL_MDET_TX_TCLAN_QNUM_S;
1086
1087                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1088                                     "%d by TCLAN on TX queue %d PF# %d",
1089                                     event, queue, pf_num);
1090                 }
1091         }
1092 done:
1093         /* Enable interrupt */
1094         ice_pf_enable_irq0(hw);
1095         rte_intr_enable(dev->intr_handle);
1096 }
1097
1098 /*  Initialize SW parameters of PF */
1099 static int
1100 ice_pf_sw_init(struct rte_eth_dev *dev)
1101 {
1102         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1103         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1104
1105         if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
1106                 pf->lan_nb_qp_max =
1107                         ice_config_max_queue_pair_num(dev->device->devargs);
1108         else
1109                 pf->lan_nb_qp_max =
1110                         (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1111                                           hw->func_caps.common_cap.num_rxq);
1112
1113         pf->lan_nb_qps = pf->lan_nb_qp_max;
1114
1115         return 0;
1116 }
1117
1118 static struct ice_vsi *
1119 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1120 {
1121         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1122         struct ice_vsi *vsi = NULL;
1123         struct ice_vsi_ctx vsi_ctx;
1124         int ret;
1125         struct ether_addr broadcast = {
1126                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1127         struct ether_addr mac_addr;
1128         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1129         uint8_t tc_bitmap = 0x1;
1130
1131         /* hw->num_lports = 1 in NIC mode */
1132         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1133         if (!vsi)
1134                 return NULL;
1135
1136         vsi->idx = pf->next_vsi_idx;
1137         pf->next_vsi_idx++;
1138         vsi->type = type;
1139         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1140         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1141         vsi->vlan_anti_spoof_on = 0;
1142         vsi->vlan_filter_on = 1;
1143         TAILQ_INIT(&vsi->mac_list);
1144         TAILQ_INIT(&vsi->vlan_list);
1145
1146         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1147         /* base_queue in used in queue mapping of VSI add/update command.
1148          * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1149          * cases in the first stage. Only Main VSI.
1150          */
1151         vsi->base_queue = 0;
1152         switch (type) {
1153         case ICE_VSI_PF:
1154                 vsi->nb_qps = pf->lan_nb_qps;
1155                 ice_vsi_config_default_rss(&vsi_ctx.info);
1156                 vsi_ctx.alloc_from_pool = true;
1157                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1158                 /* switch_id is queried by get_switch_config aq, which is done
1159                  * by ice_init_hw
1160                  */
1161                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1162                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1163                 /* Allow all untagged or tagged packets */
1164                 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1165                 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1166                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1167                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1168                 /* Enable VLAN/UP trip */
1169                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1170                                                       &vsi_ctx.info,
1171                                                       ICE_DEFAULT_TCMAP);
1172                 if (ret) {
1173                         PMD_INIT_LOG(ERR,
1174                                      "tc queue mapping with vsi failed, "
1175                                      "err = %d",
1176                                      ret);
1177                         goto fail_mem;
1178                 }
1179
1180                 break;
1181         default:
1182                 /* for other types of VSI */
1183                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1184                 goto fail_mem;
1185         }
1186
1187         /* VF has MSIX interrupt in VF range, don't allocate here */
1188         if (type == ICE_VSI_PF) {
1189                 ret = ice_res_pool_alloc(&pf->msix_pool,
1190                                          RTE_MIN(vsi->nb_qps,
1191                                                  RTE_MAX_RXTX_INTR_VEC_ID));
1192                 if (ret < 0) {
1193                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1194                                      vsi->vsi_id, ret);
1195                 }
1196                 vsi->msix_intr = ret;
1197                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1198         } else {
1199                 vsi->msix_intr = 0;
1200                 vsi->nb_msix = 0;
1201         }
1202         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1203         if (ret != ICE_SUCCESS) {
1204                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1205                 goto fail_mem;
1206         }
1207         /* store vsi information is SW structure */
1208         vsi->vsi_id = vsi_ctx.vsi_num;
1209         vsi->info = vsi_ctx.info;
1210         pf->vsis_allocated = vsi_ctx.vsis_allocd;
1211         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1212
1213         /* MAC configuration */
1214         rte_memcpy(pf->dev_addr.addr_bytes,
1215                    hw->port_info->mac.perm_addr,
1216                    ETH_ADDR_LEN);
1217
1218         rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
1219         ret = ice_add_mac_filter(vsi, &mac_addr);
1220         if (ret != ICE_SUCCESS)
1221                 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1222
1223         rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
1224         ret = ice_add_mac_filter(vsi, &mac_addr);
1225         if (ret != ICE_SUCCESS)
1226                 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1227
1228         /* At the beginning, only TC0. */
1229         /* What we need here is the maximam number of the TX queues.
1230          * Currently vsi->nb_qps means it.
1231          * Correct it if any change.
1232          */
1233         max_txqs[0] = vsi->nb_qps;
1234         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1235                               tc_bitmap, max_txqs);
1236         if (ret != ICE_SUCCESS)
1237                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1238
1239         return vsi;
1240 fail_mem:
1241         rte_free(vsi);
1242         pf->next_vsi_idx--;
1243         return NULL;
1244 }
1245
1246 static int
1247 ice_pf_setup(struct ice_pf *pf)
1248 {
1249         struct ice_vsi *vsi;
1250
1251         /* Clear all stats counters */
1252         pf->offset_loaded = FALSE;
1253         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1254         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1255         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1256         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1257
1258         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1259         if (!vsi) {
1260                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1261                 return -EINVAL;
1262         }
1263
1264         pf->main_vsi = vsi;
1265
1266         return 0;
1267 }
1268
1269 static int ice_load_pkg(struct rte_eth_dev *dev)
1270 {
1271         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1272         const char *pkg_file = ICE_DFLT_PKG_FILE;
1273         int err;
1274         uint8_t *buf;
1275         int buf_len;
1276         FILE *file;
1277         struct stat fstat;
1278
1279         file = fopen(pkg_file, "rb");
1280         if (!file)  {
1281                 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1282                 return -1;
1283         }
1284
1285         err = stat(pkg_file, &fstat);
1286         if (err) {
1287                 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1288                 fclose(file);
1289                 return err;
1290         }
1291
1292         buf_len = fstat.st_size;
1293         buf = rte_malloc(NULL, buf_len, 0);
1294
1295         if (!buf) {
1296                 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1297                                 buf_len);
1298                 fclose(file);
1299                 return -1;
1300         }
1301
1302         err = fread(buf, buf_len, 1, file);
1303         if (err != 1) {
1304                 PMD_INIT_LOG(ERR, "failed to read package data\n");
1305                 fclose(file);
1306                 err = -1;
1307                 goto fail_exit;
1308         }
1309
1310         fclose(file);
1311
1312         err = ice_copy_and_init_pkg(hw, buf, buf_len);
1313         if (err) {
1314                 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1315                 goto fail_exit;
1316         }
1317         err = ice_init_hw_tbls(hw);
1318         if (err) {
1319                 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1320                 goto fail_init_tbls;
1321         }
1322
1323         return 0;
1324
1325 fail_init_tbls:
1326         rte_free(hw->pkg_copy);
1327 fail_exit:
1328         rte_free(buf);
1329         return err;
1330 }
1331
1332 static int
1333 ice_dev_init(struct rte_eth_dev *dev)
1334 {
1335         struct rte_pci_device *pci_dev;
1336         struct rte_intr_handle *intr_handle;
1337         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1338         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1339         struct ice_adapter *ad =
1340                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1341         struct ice_vsi *vsi;
1342         int ret;
1343
1344         dev->dev_ops = &ice_eth_dev_ops;
1345         dev->rx_pkt_burst = ice_recv_pkts;
1346         dev->tx_pkt_burst = ice_xmit_pkts;
1347         dev->tx_pkt_prepare = ice_prep_pkts;
1348
1349         ice_set_default_ptype_table(dev);
1350         pci_dev = RTE_DEV_TO_PCI(dev->device);
1351         intr_handle = &pci_dev->intr_handle;
1352
1353         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1354         pf->adapter->eth_dev = dev;
1355         pf->dev_data = dev->data;
1356         hw->back = pf->adapter;
1357         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1358         hw->vendor_id = pci_dev->id.vendor_id;
1359         hw->device_id = pci_dev->id.device_id;
1360         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1361         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1362         hw->bus.device = pci_dev->addr.devid;
1363         hw->bus.func = pci_dev->addr.function;
1364
1365         ice_init_controlq_parameter(hw);
1366
1367         ret = ice_init_hw(hw);
1368         if (ret) {
1369                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1370                 return -EINVAL;
1371         }
1372
1373         ret = ice_load_pkg(dev);
1374         if (ret) {
1375                 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
1376                                 "Entering Safe Mode");
1377                 ad->is_safe_mode = 1;
1378         }
1379
1380         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1381                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1382                      hw->api_maj_ver, hw->api_min_ver);
1383
1384         ice_pf_sw_init(dev);
1385         ret = ice_init_mac_address(dev);
1386         if (ret) {
1387                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1388                 goto err_init_mac;
1389         }
1390
1391         ret = ice_res_pool_init(&pf->msix_pool, 1,
1392                                 hw->func_caps.common_cap.num_msix_vectors - 1);
1393         if (ret) {
1394                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1395                 goto err_msix_pool_init;
1396         }
1397
1398         ret = ice_pf_setup(pf);
1399         if (ret) {
1400                 PMD_INIT_LOG(ERR, "Failed to setup PF");
1401                 goto err_pf_setup;
1402         }
1403
1404         vsi = pf->main_vsi;
1405
1406         /* Disable double vlan by default */
1407         ice_vsi_config_double_vlan(vsi, FALSE);
1408
1409         /* register callback func to eal lib */
1410         rte_intr_callback_register(intr_handle,
1411                                    ice_interrupt_handler, dev);
1412
1413         ice_pf_enable_irq0(hw);
1414
1415         /* enable uio intr after callback register */
1416         rte_intr_enable(intr_handle);
1417
1418         return 0;
1419
1420 err_pf_setup:
1421         ice_res_pool_destroy(&pf->msix_pool);
1422 err_msix_pool_init:
1423         rte_free(dev->data->mac_addrs);
1424 err_init_mac:
1425         ice_sched_cleanup_all(hw);
1426         rte_free(hw->port_info);
1427         ice_shutdown_all_ctrlq(hw);
1428
1429         return ret;
1430 }
1431
1432 static int
1433 ice_release_vsi(struct ice_vsi *vsi)
1434 {
1435         struct ice_hw *hw;
1436         struct ice_vsi_ctx vsi_ctx;
1437         enum ice_status ret;
1438
1439         if (!vsi)
1440                 return 0;
1441
1442         hw = ICE_VSI_TO_HW(vsi);
1443
1444         ice_remove_all_mac_vlan_filters(vsi);
1445
1446         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1447
1448         vsi_ctx.vsi_num = vsi->vsi_id;
1449         vsi_ctx.info = vsi->info;
1450         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1451         if (ret != ICE_SUCCESS) {
1452                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1453                 rte_free(vsi);
1454                 return -1;
1455         }
1456
1457         rte_free(vsi);
1458         return 0;
1459 }
1460
1461 static void
1462 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
1463 {
1464         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1465         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1466         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1467         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1468         uint16_t msix_intr, i;
1469
1470         /* disable interrupt and also clear all the exist config */
1471         for (i = 0; i < vsi->nb_qps; i++) {
1472                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1473                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1474                 rte_wmb();
1475         }
1476
1477         if (rte_intr_allow_others(intr_handle))
1478                 /* vfio-pci */
1479                 for (i = 0; i < vsi->nb_msix; i++) {
1480                         msix_intr = vsi->msix_intr + i;
1481                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1482                                       GLINT_DYN_CTL_WB_ON_ITR_M);
1483                 }
1484         else
1485                 /* igb_uio */
1486                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1487 }
1488
1489 static void
1490 ice_dev_stop(struct rte_eth_dev *dev)
1491 {
1492         struct rte_eth_dev_data *data = dev->data;
1493         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1494         struct ice_vsi *main_vsi = pf->main_vsi;
1495         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1496         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1497         uint16_t i;
1498
1499         /* avoid stopping again */
1500         if (pf->adapter_stopped)
1501                 return;
1502
1503         /* stop and clear all Rx queues */
1504         for (i = 0; i < data->nb_rx_queues; i++)
1505                 ice_rx_queue_stop(dev, i);
1506
1507         /* stop and clear all Tx queues */
1508         for (i = 0; i < data->nb_tx_queues; i++)
1509                 ice_tx_queue_stop(dev, i);
1510
1511         /* disable all queue interrupts */
1512         ice_vsi_disable_queues_intr(main_vsi);
1513
1514         /* Clear all queues and release mbufs */
1515         ice_clear_queues(dev);
1516
1517         /* Clean datapath event and queue/vec mapping */
1518         rte_intr_efd_disable(intr_handle);
1519         if (intr_handle->intr_vec) {
1520                 rte_free(intr_handle->intr_vec);
1521                 intr_handle->intr_vec = NULL;
1522         }
1523
1524         pf->adapter_stopped = true;
1525 }
1526
1527 static void
1528 ice_dev_close(struct rte_eth_dev *dev)
1529 {
1530         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1531         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1532
1533         ice_dev_stop(dev);
1534
1535         /* release all queue resource */
1536         ice_free_queues(dev);
1537
1538         ice_res_pool_destroy(&pf->msix_pool);
1539         ice_release_vsi(pf->main_vsi);
1540         ice_sched_cleanup_all(hw);
1541         rte_free(hw->port_info);
1542         ice_shutdown_all_ctrlq(hw);
1543 }
1544
1545 static int
1546 ice_dev_uninit(struct rte_eth_dev *dev)
1547 {
1548         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1549         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1550
1551         ice_dev_close(dev);
1552
1553         dev->dev_ops = NULL;
1554         dev->rx_pkt_burst = NULL;
1555         dev->tx_pkt_burst = NULL;
1556
1557         rte_free(dev->data->mac_addrs);
1558         dev->data->mac_addrs = NULL;
1559
1560         /* disable uio intr before callback unregister */
1561         rte_intr_disable(intr_handle);
1562
1563         /* register callback func to eal lib */
1564         rte_intr_callback_unregister(intr_handle,
1565                                      ice_interrupt_handler, dev);
1566
1567         return 0;
1568 }
1569
1570 static int
1571 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1572 {
1573         struct ice_adapter *ad =
1574                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1575
1576         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1577          * bulk allocation or vector Rx preconditions we will reset it.
1578          */
1579         ad->rx_bulk_alloc_allowed = true;
1580         ad->tx_simple_allowed = true;
1581
1582         return 0;
1583 }
1584
1585 static int ice_init_rss(struct ice_pf *pf)
1586 {
1587         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1588         struct ice_vsi *vsi = pf->main_vsi;
1589         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1590         struct rte_eth_rss_conf *rss_conf;
1591         struct ice_aqc_get_set_rss_keys key;
1592         uint16_t i, nb_q;
1593         int ret = 0;
1594         bool is_safe_mode = pf->adapter->is_safe_mode;
1595
1596         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1597         nb_q = dev->data->nb_rx_queues;
1598         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1599         vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
1600
1601         if (is_safe_mode) {
1602                 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
1603                 return 0;
1604         }
1605
1606         if (!vsi->rss_key)
1607                 vsi->rss_key = rte_zmalloc(NULL,
1608                                            vsi->rss_key_size, 0);
1609         if (!vsi->rss_lut)
1610                 vsi->rss_lut = rte_zmalloc(NULL,
1611                                            vsi->rss_lut_size, 0);
1612
1613         /* configure RSS key */
1614         if (!rss_conf->rss_key) {
1615                 /* Calculate the default hash key */
1616                 for (i = 0; i <= vsi->rss_key_size; i++)
1617                         vsi->rss_key[i] = (uint8_t)rte_rand();
1618         } else {
1619                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1620                            RTE_MIN(rss_conf->rss_key_len,
1621                                    vsi->rss_key_size));
1622         }
1623         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1624         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1625         if (ret)
1626                 return -EINVAL;
1627
1628         /* init RSS LUT table */
1629         for (i = 0; i < vsi->rss_lut_size; i++)
1630                 vsi->rss_lut[i] = i % nb_q;
1631
1632         ret = ice_aq_set_rss_lut(hw, vsi->idx,
1633                                  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1634                                  vsi->rss_lut, vsi->rss_lut_size);
1635         if (ret)
1636                 return -EINVAL;
1637
1638         /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1639         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
1640                               ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
1641         if (ret)
1642                 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret);
1643
1644         /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1645         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
1646                               ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
1647         if (ret)
1648                 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret);
1649
1650         /* configure RSS for sctp6 with input set IPv6 src/dst */
1651         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
1652                               ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
1653         if (ret)
1654                 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
1655                                 __func__, ret);
1656
1657         /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1658         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
1659                               ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
1660         if (ret)
1661                 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret);
1662
1663         /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1664         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
1665                               ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
1666         if (ret)
1667                 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret);
1668
1669         /* configure RSS for sctp4 with input set IP src/dst */
1670         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
1671                               ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
1672         if (ret)
1673                 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
1674                                 __func__, ret);
1675
1676         return 0;
1677 }
1678
1679 static void
1680 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
1681                        int base_queue, int nb_queue)
1682 {
1683         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1684         uint32_t val, val_tx;
1685         int i;
1686
1687         for (i = 0; i < nb_queue; i++) {
1688                 /*do actual bind*/
1689                 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
1690                       (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
1691                 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
1692                          (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
1693
1694                 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
1695                             base_queue + i, msix_vect);
1696                 /* set ITR0 value */
1697                 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
1698                 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
1699                 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
1700         }
1701 }
1702
1703 static void
1704 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
1705 {
1706         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1707         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1708         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1709         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1710         uint16_t msix_vect = vsi->msix_intr;
1711         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1712         uint16_t queue_idx = 0;
1713         int record = 0;
1714         int i;
1715
1716         /* clear Rx/Tx queue interrupt */
1717         for (i = 0; i < vsi->nb_used_qps; i++) {
1718                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1719                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1720         }
1721
1722         /* PF bind interrupt */
1723         if (rte_intr_dp_is_en(intr_handle)) {
1724                 queue_idx = 0;
1725                 record = 1;
1726         }
1727
1728         for (i = 0; i < vsi->nb_used_qps; i++) {
1729                 if (nb_msix <= 1) {
1730                         if (!rte_intr_allow_others(intr_handle))
1731                                 msix_vect = ICE_MISC_VEC_ID;
1732
1733                         /* uio mapping all queue to one msix_vect */
1734                         __vsi_queues_bind_intr(vsi, msix_vect,
1735                                                vsi->base_queue + i,
1736                                                vsi->nb_used_qps - i);
1737
1738                         for (; !!record && i < vsi->nb_used_qps; i++)
1739                                 intr_handle->intr_vec[queue_idx + i] =
1740                                         msix_vect;
1741                         break;
1742                 }
1743
1744                 /* vfio 1:1 queue/msix_vect mapping */
1745                 __vsi_queues_bind_intr(vsi, msix_vect,
1746                                        vsi->base_queue + i, 1);
1747
1748                 if (!!record)
1749                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1750
1751                 msix_vect++;
1752                 nb_msix--;
1753         }
1754 }
1755
1756 static void
1757 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
1758 {
1759         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1760         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1761         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1762         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1763         uint16_t msix_intr, i;
1764
1765         if (rte_intr_allow_others(intr_handle))
1766                 for (i = 0; i < vsi->nb_used_qps; i++) {
1767                         msix_intr = vsi->msix_intr + i;
1768                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1769                                       GLINT_DYN_CTL_INTENA_M |
1770                                       GLINT_DYN_CTL_CLEARPBA_M |
1771                                       GLINT_DYN_CTL_ITR_INDX_M |
1772                                       GLINT_DYN_CTL_WB_ON_ITR_M);
1773                 }
1774         else
1775                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1776                               GLINT_DYN_CTL_INTENA_M |
1777                               GLINT_DYN_CTL_CLEARPBA_M |
1778                               GLINT_DYN_CTL_ITR_INDX_M |
1779                               GLINT_DYN_CTL_WB_ON_ITR_M);
1780 }
1781
1782 static int
1783 ice_rxq_intr_setup(struct rte_eth_dev *dev)
1784 {
1785         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1786         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1787         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1788         struct ice_vsi *vsi = pf->main_vsi;
1789         uint32_t intr_vector = 0;
1790
1791         rte_intr_disable(intr_handle);
1792
1793         /* check and configure queue intr-vector mapping */
1794         if ((rte_intr_cap_multiple(intr_handle) ||
1795              !RTE_ETH_DEV_SRIOV(dev).active) &&
1796             dev->data->dev_conf.intr_conf.rxq != 0) {
1797                 intr_vector = dev->data->nb_rx_queues;
1798                 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
1799                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
1800                                     ICE_MAX_INTR_QUEUE_NUM);
1801                         return -ENOTSUP;
1802                 }
1803                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1804                         return -1;
1805         }
1806
1807         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1808                 intr_handle->intr_vec =
1809                 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
1810                             0);
1811                 if (!intr_handle->intr_vec) {
1812                         PMD_DRV_LOG(ERR,
1813                                     "Failed to allocate %d rx_queues intr_vec",
1814                                     dev->data->nb_rx_queues);
1815                         return -ENOMEM;
1816                 }
1817         }
1818
1819         /* Map queues with MSIX interrupt */
1820         vsi->nb_used_qps = dev->data->nb_rx_queues;
1821         ice_vsi_queues_bind_intr(vsi);
1822
1823         /* Enable interrupts for all the queues */
1824         ice_vsi_enable_queues_intr(vsi);
1825
1826         rte_intr_enable(intr_handle);
1827
1828         return 0;
1829 }
1830
1831 static int
1832 ice_dev_start(struct rte_eth_dev *dev)
1833 {
1834         struct rte_eth_dev_data *data = dev->data;
1835         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1836         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1837         struct ice_vsi *vsi = pf->main_vsi;
1838         uint16_t nb_rxq = 0;
1839         uint16_t nb_txq, i;
1840         int mask, ret;
1841
1842         /* program Tx queues' context in hardware */
1843         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1844                 ret = ice_tx_queue_start(dev, nb_txq);
1845                 if (ret) {
1846                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1847                         goto tx_err;
1848                 }
1849         }
1850
1851         /* program Rx queues' context in hardware*/
1852         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1853                 ret = ice_rx_queue_start(dev, nb_rxq);
1854                 if (ret) {
1855                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1856                         goto rx_err;
1857                 }
1858         }
1859
1860         ret = ice_init_rss(pf);
1861         if (ret) {
1862                 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1863                 goto rx_err;
1864         }
1865
1866         ice_set_rx_function(dev);
1867         ice_set_tx_function(dev);
1868
1869         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1870                         ETH_VLAN_EXTEND_MASK;
1871         ret = ice_vlan_offload_set(dev, mask);
1872         if (ret) {
1873                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1874                 goto rx_err;
1875         }
1876
1877         /* enable Rx interrput and mapping Rx queue to interrupt vector */
1878         if (ice_rxq_intr_setup(dev))
1879                 return -EIO;
1880
1881         /* Enable receiving broadcast packets and transmitting packets */
1882         ret = ice_set_vsi_promisc(hw, vsi->idx,
1883                                   ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
1884                                   ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
1885                                   0);
1886         if (ret != ICE_SUCCESS)
1887                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1888
1889         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1890                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1891                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1892                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1893                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1894                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
1895                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1896                                      NULL);
1897         if (ret != ICE_SUCCESS)
1898                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1899
1900         /* Call get_link_info aq commond to enable/disable LSE */
1901         ice_link_update(dev, 0);
1902
1903         pf->adapter_stopped = false;
1904
1905         return 0;
1906
1907         /* stop the started queues if failed to start all queues */
1908 rx_err:
1909         for (i = 0; i < nb_rxq; i++)
1910                 ice_rx_queue_stop(dev, i);
1911 tx_err:
1912         for (i = 0; i < nb_txq; i++)
1913                 ice_tx_queue_stop(dev, i);
1914
1915         return -EIO;
1916 }
1917
1918 static int
1919 ice_dev_reset(struct rte_eth_dev *dev)
1920 {
1921         int ret;
1922
1923         if (dev->data->sriov.active)
1924                 return -ENOTSUP;
1925
1926         ret = ice_dev_uninit(dev);
1927         if (ret) {
1928                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1929                 return -ENXIO;
1930         }
1931
1932         ret = ice_dev_init(dev);
1933         if (ret) {
1934                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1935                 return -ENXIO;
1936         }
1937
1938         return 0;
1939 }
1940
1941 static void
1942 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1943 {
1944         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1945         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1946         struct ice_vsi *vsi = pf->main_vsi;
1947         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1948         bool is_safe_mode = pf->adapter->is_safe_mode;
1949         u64 phy_type_low;
1950         u64 phy_type_high;
1951
1952         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1953         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1954         dev_info->max_rx_queues = vsi->nb_qps;
1955         dev_info->max_tx_queues = vsi->nb_qps;
1956         dev_info->max_mac_addrs = vsi->max_macaddrs;
1957         dev_info->max_vfs = pci_dev->max_vfs;
1958
1959         dev_info->rx_offload_capa =
1960                 DEV_RX_OFFLOAD_VLAN_STRIP |
1961                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1962                 DEV_RX_OFFLOAD_KEEP_CRC |
1963                 DEV_RX_OFFLOAD_SCATTER |
1964                 DEV_RX_OFFLOAD_VLAN_FILTER;
1965         dev_info->tx_offload_capa =
1966                 DEV_TX_OFFLOAD_VLAN_INSERT |
1967                 DEV_TX_OFFLOAD_TCP_TSO |
1968                 DEV_TX_OFFLOAD_MULTI_SEGS |
1969                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1970         dev_info->flow_type_rss_offloads = 0;
1971
1972         if (!is_safe_mode) {
1973                 dev_info->rx_offload_capa |=
1974                         DEV_RX_OFFLOAD_IPV4_CKSUM |
1975                         DEV_RX_OFFLOAD_UDP_CKSUM |
1976                         DEV_RX_OFFLOAD_TCP_CKSUM |
1977                         DEV_RX_OFFLOAD_QINQ_STRIP |
1978                         DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1979                         DEV_RX_OFFLOAD_VLAN_EXTEND;
1980                 dev_info->tx_offload_capa |=
1981                         DEV_TX_OFFLOAD_QINQ_INSERT |
1982                         DEV_TX_OFFLOAD_IPV4_CKSUM |
1983                         DEV_TX_OFFLOAD_UDP_CKSUM |
1984                         DEV_TX_OFFLOAD_TCP_CKSUM |
1985                         DEV_TX_OFFLOAD_SCTP_CKSUM |
1986                         DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
1987                 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
1988         }
1989
1990         dev_info->rx_queue_offload_capa = 0;
1991         dev_info->tx_queue_offload_capa = 0;
1992
1993         dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1994         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1995
1996         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1997                 .rx_thresh = {
1998                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
1999                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
2000                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
2001                 },
2002                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
2003                 .rx_drop_en = 0,
2004                 .offloads = 0,
2005         };
2006
2007         dev_info->default_txconf = (struct rte_eth_txconf) {
2008                 .tx_thresh = {
2009                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
2010                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
2011                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
2012                 },
2013                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
2014                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
2015                 .offloads = 0,
2016         };
2017
2018         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2019                 .nb_max = ICE_MAX_RING_DESC,
2020                 .nb_min = ICE_MIN_RING_DESC,
2021                 .nb_align = ICE_ALIGN_RING_DESC,
2022         };
2023
2024         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2025                 .nb_max = ICE_MAX_RING_DESC,
2026                 .nb_min = ICE_MIN_RING_DESC,
2027                 .nb_align = ICE_ALIGN_RING_DESC,
2028         };
2029
2030         dev_info->speed_capa = ETH_LINK_SPEED_10M |
2031                                ETH_LINK_SPEED_100M |
2032                                ETH_LINK_SPEED_1G |
2033                                ETH_LINK_SPEED_2_5G |
2034                                ETH_LINK_SPEED_5G |
2035                                ETH_LINK_SPEED_10G |
2036                                ETH_LINK_SPEED_20G |
2037                                ETH_LINK_SPEED_25G;
2038
2039         phy_type_low = hw->port_info->phy.phy_type_low;
2040         phy_type_high = hw->port_info->phy.phy_type_high;
2041
2042         if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
2043                 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
2044
2045         if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
2046                         ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
2047                 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
2048
2049         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2050         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2051
2052         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
2053         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
2054         dev_info->default_rxportconf.nb_queues = 1;
2055         dev_info->default_txportconf.nb_queues = 1;
2056         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
2057         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
2058 }
2059
2060 static inline int
2061 ice_atomic_read_link_status(struct rte_eth_dev *dev,
2062                             struct rte_eth_link *link)
2063 {
2064         struct rte_eth_link *dst = link;
2065         struct rte_eth_link *src = &dev->data->dev_link;
2066
2067         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2068                                 *(uint64_t *)src) == 0)
2069                 return -1;
2070
2071         return 0;
2072 }
2073
2074 static inline int
2075 ice_atomic_write_link_status(struct rte_eth_dev *dev,
2076                              struct rte_eth_link *link)
2077 {
2078         struct rte_eth_link *dst = &dev->data->dev_link;
2079         struct rte_eth_link *src = link;
2080
2081         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2082                                 *(uint64_t *)src) == 0)
2083                 return -1;
2084
2085         return 0;
2086 }
2087
2088 static int
2089 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
2090 {
2091 #define CHECK_INTERVAL 100  /* 100ms */
2092 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2093         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2094         struct ice_link_status link_status;
2095         struct rte_eth_link link, old;
2096         int status;
2097         unsigned int rep_cnt = MAX_REPEAT_TIME;
2098         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2099
2100         memset(&link, 0, sizeof(link));
2101         memset(&old, 0, sizeof(old));
2102         memset(&link_status, 0, sizeof(link_status));
2103         ice_atomic_read_link_status(dev, &old);
2104
2105         do {
2106                 /* Get link status information from hardware */
2107                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
2108                                               &link_status, NULL);
2109                 if (status != ICE_SUCCESS) {
2110                         link.link_speed = ETH_SPEED_NUM_100M;
2111                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2112                         PMD_DRV_LOG(ERR, "Failed to get link info");
2113                         goto out;
2114                 }
2115
2116                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
2117                 if (!wait_to_complete || link.link_status)
2118                         break;
2119
2120                 rte_delay_ms(CHECK_INTERVAL);
2121         } while (--rep_cnt);
2122
2123         if (!link.link_status)
2124                 goto out;
2125
2126         /* Full-duplex operation at all supported speeds */
2127         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2128
2129         /* Parse the link status */
2130         switch (link_status.link_speed) {
2131         case ICE_AQ_LINK_SPEED_10MB:
2132                 link.link_speed = ETH_SPEED_NUM_10M;
2133                 break;
2134         case ICE_AQ_LINK_SPEED_100MB:
2135                 link.link_speed = ETH_SPEED_NUM_100M;
2136                 break;
2137         case ICE_AQ_LINK_SPEED_1000MB:
2138                 link.link_speed = ETH_SPEED_NUM_1G;
2139                 break;
2140         case ICE_AQ_LINK_SPEED_2500MB:
2141                 link.link_speed = ETH_SPEED_NUM_2_5G;
2142                 break;
2143         case ICE_AQ_LINK_SPEED_5GB:
2144                 link.link_speed = ETH_SPEED_NUM_5G;
2145                 break;
2146         case ICE_AQ_LINK_SPEED_10GB:
2147                 link.link_speed = ETH_SPEED_NUM_10G;
2148                 break;
2149         case ICE_AQ_LINK_SPEED_20GB:
2150                 link.link_speed = ETH_SPEED_NUM_20G;
2151                 break;
2152         case ICE_AQ_LINK_SPEED_25GB:
2153                 link.link_speed = ETH_SPEED_NUM_25G;
2154                 break;
2155         case ICE_AQ_LINK_SPEED_40GB:
2156                 link.link_speed = ETH_SPEED_NUM_40G;
2157                 break;
2158         case ICE_AQ_LINK_SPEED_50GB:
2159                 link.link_speed = ETH_SPEED_NUM_50G;
2160                 break;
2161         case ICE_AQ_LINK_SPEED_100GB:
2162                 link.link_speed = ETH_SPEED_NUM_100G;
2163                 break;
2164         case ICE_AQ_LINK_SPEED_UNKNOWN:
2165         default:
2166                 PMD_DRV_LOG(ERR, "Unknown link speed");
2167                 link.link_speed = ETH_SPEED_NUM_NONE;
2168                 break;
2169         }
2170
2171         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2172                               ETH_LINK_SPEED_FIXED);
2173
2174 out:
2175         ice_atomic_write_link_status(dev, &link);
2176         if (link.link_status == old.link_status)
2177                 return -1;
2178
2179         return 0;
2180 }
2181
2182 static int
2183 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2184 {
2185         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2186         struct rte_eth_dev_data *dev_data = pf->dev_data;
2187         uint32_t frame_size = mtu + ETHER_HDR_LEN
2188                               + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE;
2189
2190         /* check if mtu is within the allowed range */
2191         if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
2192                 return -EINVAL;
2193
2194         /* mtu setting is forbidden if port is start */
2195         if (dev_data->dev_started) {
2196                 PMD_DRV_LOG(ERR,
2197                             "port %d must be stopped before configuration",
2198                             dev_data->port_id);
2199                 return -EBUSY;
2200         }
2201
2202         if (frame_size > ETHER_MAX_LEN)
2203                 dev_data->dev_conf.rxmode.offloads |=
2204                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2205         else
2206                 dev_data->dev_conf.rxmode.offloads &=
2207                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2208
2209         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2210
2211         return 0;
2212 }
2213
2214 static int ice_macaddr_set(struct rte_eth_dev *dev,
2215                            struct ether_addr *mac_addr)
2216 {
2217         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2218         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2219         struct ice_vsi *vsi = pf->main_vsi;
2220         struct ice_mac_filter *f;
2221         uint8_t flags = 0;
2222         int ret;
2223
2224         if (!is_valid_assigned_ether_addr(mac_addr)) {
2225                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2226                 return -EINVAL;
2227         }
2228
2229         TAILQ_FOREACH(f, &vsi->mac_list, next) {
2230                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
2231                         break;
2232         }
2233
2234         if (!f) {
2235                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
2236                 return -EIO;
2237         }
2238
2239         ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
2240         if (ret != ICE_SUCCESS) {
2241                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
2242                 return -EIO;
2243         }
2244         ret = ice_add_mac_filter(vsi, mac_addr);
2245         if (ret != ICE_SUCCESS) {
2246                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
2247                 return -EIO;
2248         }
2249         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
2250
2251         flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2252         ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
2253         if (ret != ICE_SUCCESS)
2254                 PMD_DRV_LOG(ERR, "Failed to set manage mac");
2255
2256         return 0;
2257 }
2258
2259 /* Add a MAC address, and update filters */
2260 static int
2261 ice_macaddr_add(struct rte_eth_dev *dev,
2262                 struct ether_addr *mac_addr,
2263                 __rte_unused uint32_t index,
2264                 __rte_unused uint32_t pool)
2265 {
2266         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2267         struct ice_vsi *vsi = pf->main_vsi;
2268         int ret;
2269
2270         ret = ice_add_mac_filter(vsi, mac_addr);
2271         if (ret != ICE_SUCCESS) {
2272                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
2273                 return -EINVAL;
2274         }
2275
2276         return ICE_SUCCESS;
2277 }
2278
2279 /* Remove a MAC address, and update filters */
2280 static void
2281 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2282 {
2283         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2284         struct ice_vsi *vsi = pf->main_vsi;
2285         struct rte_eth_dev_data *data = dev->data;
2286         struct ether_addr *macaddr;
2287         int ret;
2288
2289         macaddr = &data->mac_addrs[index];
2290         ret = ice_remove_mac_filter(vsi, macaddr);
2291         if (ret) {
2292                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
2293                 return;
2294         }
2295 }
2296
2297 static int
2298 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2299 {
2300         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2301         struct ice_vsi *vsi = pf->main_vsi;
2302         int ret;
2303
2304         PMD_INIT_FUNC_TRACE();
2305
2306         if (on) {
2307                 ret = ice_add_vlan_filter(vsi, vlan_id);
2308                 if (ret < 0) {
2309                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
2310                         return -EINVAL;
2311                 }
2312         } else {
2313                 ret = ice_remove_vlan_filter(vsi, vlan_id);
2314                 if (ret < 0) {
2315                         PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
2316                         return -EINVAL;
2317                 }
2318         }
2319
2320         return 0;
2321 }
2322
2323 /* Configure vlan filter on or off */
2324 static int
2325 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
2326 {
2327         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2328         struct ice_vsi_ctx ctxt;
2329         uint8_t sec_flags, sw_flags2;
2330         int ret = 0;
2331
2332         sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2333                     ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2334         sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2335
2336         if (on) {
2337                 vsi->info.sec_flags |= sec_flags;
2338                 vsi->info.sw_flags2 |= sw_flags2;
2339         } else {
2340                 vsi->info.sec_flags &= ~sec_flags;
2341                 vsi->info.sw_flags2 &= ~sw_flags2;
2342         }
2343         vsi->info.sw_id = hw->port_info->sw_id;
2344         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2345         ctxt.info.valid_sections =
2346                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2347                                  ICE_AQ_VSI_PROP_SECURITY_VALID);
2348         ctxt.vsi_num = vsi->vsi_id;
2349
2350         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2351         if (ret) {
2352                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
2353                             on ? "enable" : "disable");
2354                 return -EINVAL;
2355         } else {
2356                 vsi->info.valid_sections |=
2357                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2358                                          ICE_AQ_VSI_PROP_SECURITY_VALID);
2359         }
2360
2361         /* consist with other drivers, allow untagged packet when vlan filter on */
2362         if (on)
2363                 ret = ice_add_vlan_filter(vsi, 0);
2364         else
2365                 ret = ice_remove_vlan_filter(vsi, 0);
2366
2367         return 0;
2368 }
2369
2370 static int
2371 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
2372 {
2373         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2374         struct ice_vsi_ctx ctxt;
2375         uint8_t vlan_flags;
2376         int ret = 0;
2377
2378         /* Check if it has been already on or off */
2379         if (vsi->info.valid_sections &
2380                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
2381                 if (on) {
2382                         if ((vsi->info.vlan_flags &
2383                              ICE_AQ_VSI_VLAN_EMOD_M) ==
2384                             ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
2385                                 return 0; /* already on */
2386                 } else {
2387                         if ((vsi->info.vlan_flags &
2388                              ICE_AQ_VSI_VLAN_EMOD_M) ==
2389                             ICE_AQ_VSI_VLAN_EMOD_NOTHING)
2390                                 return 0; /* already off */
2391                 }
2392         }
2393
2394         if (on)
2395                 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2396         else
2397                 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2398         vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
2399         vsi->info.vlan_flags |= vlan_flags;
2400         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2401         ctxt.info.valid_sections =
2402                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2403         ctxt.vsi_num = vsi->vsi_id;
2404         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2405         if (ret) {
2406                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2407                             on ? "enable" : "disable");
2408                 return -EINVAL;
2409         }
2410
2411         vsi->info.valid_sections |=
2412                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2413
2414         return ret;
2415 }
2416
2417 static int
2418 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2419 {
2420         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2421         struct ice_vsi *vsi = pf->main_vsi;
2422         struct rte_eth_rxmode *rxmode;
2423
2424         rxmode = &dev->data->dev_conf.rxmode;
2425         if (mask & ETH_VLAN_FILTER_MASK) {
2426                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2427                         ice_vsi_config_vlan_filter(vsi, TRUE);
2428                 else
2429                         ice_vsi_config_vlan_filter(vsi, FALSE);
2430         }
2431
2432         if (mask & ETH_VLAN_STRIP_MASK) {
2433                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2434                         ice_vsi_config_vlan_stripping(vsi, TRUE);
2435                 else
2436                         ice_vsi_config_vlan_stripping(vsi, FALSE);
2437         }
2438
2439         if (mask & ETH_VLAN_EXTEND_MASK) {
2440                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2441                         ice_vsi_config_double_vlan(vsi, TRUE);
2442                 else
2443                         ice_vsi_config_double_vlan(vsi, FALSE);
2444         }
2445
2446         return 0;
2447 }
2448
2449 static int
2450 ice_vlan_tpid_set(struct rte_eth_dev *dev,
2451                   enum rte_vlan_type vlan_type,
2452                   uint16_t tpid)
2453 {
2454         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2455         uint64_t reg_r = 0, reg_w = 0;
2456         uint16_t reg_id = 0;
2457         int ret = 0;
2458         int qinq = dev->data->dev_conf.rxmode.offloads &
2459                    DEV_RX_OFFLOAD_VLAN_EXTEND;
2460
2461         switch (vlan_type) {
2462         case ETH_VLAN_TYPE_OUTER:
2463                 if (qinq)
2464                         reg_id = 3;
2465                 else
2466                         reg_id = 5;
2467         break;
2468         case ETH_VLAN_TYPE_INNER:
2469                 if (qinq) {
2470                         reg_id = 5;
2471                 } else {
2472                         PMD_DRV_LOG(ERR,
2473                                     "Unsupported vlan type in single vlan.");
2474                         return -EINVAL;
2475                 }
2476                 break;
2477         default:
2478                 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2479                 return -EINVAL;
2480         }
2481         reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2482         PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2483                     "0x%08"PRIx64"", reg_id, reg_r);
2484
2485         reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2486         reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2487         if (reg_r == reg_w) {
2488                 PMD_DRV_LOG(DEBUG, "No need to write");
2489                 return 0;
2490         }
2491
2492         ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2493         PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2494                     "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2495
2496         return ret;
2497 }
2498
2499 static int
2500 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2501 {
2502         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2503         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2504         int ret;
2505
2506         if (!lut)
2507                 return -EINVAL;
2508
2509         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2510                 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2511                                          lut, lut_size);
2512                 if (ret) {
2513                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2514                         return -EINVAL;
2515                 }
2516         } else {
2517                 uint64_t *lut_dw = (uint64_t *)lut;
2518                 uint16_t i, lut_size_dw = lut_size / 4;
2519
2520                 for (i = 0; i < lut_size_dw; i++)
2521                         lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2522         }
2523
2524         return 0;
2525 }
2526
2527 static int
2528 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2529 {
2530         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2531         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2532         int ret;
2533
2534         if (!vsi || !lut)
2535                 return -EINVAL;
2536
2537         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2538                 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2539                                          lut, lut_size);
2540                 if (ret) {
2541                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2542                         return -EINVAL;
2543                 }
2544         } else {
2545                 uint64_t *lut_dw = (uint64_t *)lut;
2546                 uint16_t i, lut_size_dw = lut_size / 4;
2547
2548                 for (i = 0; i < lut_size_dw; i++)
2549                         ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2550
2551                 ice_flush(hw);
2552         }
2553
2554         return 0;
2555 }
2556
2557 static int
2558 ice_rss_reta_update(struct rte_eth_dev *dev,
2559                     struct rte_eth_rss_reta_entry64 *reta_conf,
2560                     uint16_t reta_size)
2561 {
2562         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2563         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2564         uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2565         uint16_t idx, shift;
2566         uint8_t *lut;
2567         int ret;
2568
2569         if (reta_size != lut_size ||
2570             reta_size > ETH_RSS_RETA_SIZE_512) {
2571                 PMD_DRV_LOG(ERR,
2572                             "The size of hash lookup table configured (%d)"
2573                             "doesn't match the number hardware can "
2574                             "supported (%d)",
2575                             reta_size, lut_size);
2576                 return -EINVAL;
2577         }
2578
2579         lut = rte_zmalloc(NULL, reta_size, 0);
2580         if (!lut) {
2581                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2582                 return -ENOMEM;
2583         }
2584         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2585         if (ret)
2586                 goto out;
2587
2588         for (i = 0; i < reta_size; i++) {
2589                 idx = i / RTE_RETA_GROUP_SIZE;
2590                 shift = i % RTE_RETA_GROUP_SIZE;
2591                 if (reta_conf[idx].mask & (1ULL << shift))
2592                         lut[i] = reta_conf[idx].reta[shift];
2593         }
2594         ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2595
2596 out:
2597         rte_free(lut);
2598
2599         return ret;
2600 }
2601
2602 static int
2603 ice_rss_reta_query(struct rte_eth_dev *dev,
2604                    struct rte_eth_rss_reta_entry64 *reta_conf,
2605                    uint16_t reta_size)
2606 {
2607         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2608         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2609         uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2610         uint16_t idx, shift;
2611         uint8_t *lut;
2612         int ret;
2613
2614         if (reta_size != lut_size ||
2615             reta_size > ETH_RSS_RETA_SIZE_512) {
2616                 PMD_DRV_LOG(ERR,
2617                             "The size of hash lookup table configured (%d)"
2618                             "doesn't match the number hardware can "
2619                             "supported (%d)",
2620                             reta_size, lut_size);
2621                 return -EINVAL;
2622         }
2623
2624         lut = rte_zmalloc(NULL, reta_size, 0);
2625         if (!lut) {
2626                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2627                 return -ENOMEM;
2628         }
2629
2630         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2631         if (ret)
2632                 goto out;
2633
2634         for (i = 0; i < reta_size; i++) {
2635                 idx = i / RTE_RETA_GROUP_SIZE;
2636                 shift = i % RTE_RETA_GROUP_SIZE;
2637                 if (reta_conf[idx].mask & (1ULL << shift))
2638                         reta_conf[idx].reta[shift] = lut[i];
2639         }
2640
2641 out:
2642         rte_free(lut);
2643
2644         return ret;
2645 }
2646
2647 static int
2648 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2649 {
2650         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2651         int ret = 0;
2652
2653         if (!key || key_len == 0) {
2654                 PMD_DRV_LOG(DEBUG, "No key to be configured");
2655                 return 0;
2656         } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2657                    sizeof(uint32_t)) {
2658                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2659                 return -EINVAL;
2660         }
2661
2662         struct ice_aqc_get_set_rss_keys *key_dw =
2663                 (struct ice_aqc_get_set_rss_keys *)key;
2664
2665         ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2666         if (ret) {
2667                 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2668                 ret = -EINVAL;
2669         }
2670
2671         return ret;
2672 }
2673
2674 static int
2675 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2676 {
2677         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2678         int ret;
2679
2680         if (!key || !key_len)
2681                 return -EINVAL;
2682
2683         ret = ice_aq_get_rss_key
2684                 (hw, vsi->idx,
2685                  (struct ice_aqc_get_set_rss_keys *)key);
2686         if (ret) {
2687                 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2688                 return -EINVAL;
2689         }
2690         *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2691
2692         return 0;
2693 }
2694
2695 static int
2696 ice_rss_hash_update(struct rte_eth_dev *dev,
2697                     struct rte_eth_rss_conf *rss_conf)
2698 {
2699         enum ice_status status = ICE_SUCCESS;
2700         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2701         struct ice_vsi *vsi = pf->main_vsi;
2702
2703         /* set hash key */
2704         status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2705         if (status)
2706                 return status;
2707
2708         /* TODO: hash enable config, ice_add_rss_cfg */
2709         return 0;
2710 }
2711
2712 static int
2713 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2714                       struct rte_eth_rss_conf *rss_conf)
2715 {
2716         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2717         struct ice_vsi *vsi = pf->main_vsi;
2718
2719         ice_get_rss_key(vsi, rss_conf->rss_key,
2720                         &rss_conf->rss_key_len);
2721
2722         /* TODO: default set to 0 as hf config is not supported now */
2723         rss_conf->rss_hf = 0;
2724         return 0;
2725 }
2726
2727 static void
2728 ice_promisc_enable(struct rte_eth_dev *dev)
2729 {
2730         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2731         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2732         struct ice_vsi *vsi = pf->main_vsi;
2733         uint8_t pmask;
2734         uint16_t status;
2735
2736         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2737                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2738
2739         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2740         if (status != ICE_SUCCESS)
2741                 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
2742 }
2743
2744 static void
2745 ice_promisc_disable(struct rte_eth_dev *dev)
2746 {
2747         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2748         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2749         struct ice_vsi *vsi = pf->main_vsi;
2750         uint16_t status;
2751         uint8_t pmask;
2752
2753         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2754                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2755
2756         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2757         if (status != ICE_SUCCESS)
2758                 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
2759 }
2760
2761 static void
2762 ice_allmulti_enable(struct rte_eth_dev *dev)
2763 {
2764         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2765         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2766         struct ice_vsi *vsi = pf->main_vsi;
2767         uint8_t pmask;
2768         uint16_t status;
2769
2770         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2771
2772         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2773         if (status != ICE_SUCCESS)
2774                 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
2775 }
2776
2777 static void
2778 ice_allmulti_disable(struct rte_eth_dev *dev)
2779 {
2780         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2781         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2782         struct ice_vsi *vsi = pf->main_vsi;
2783         uint16_t status;
2784         uint8_t pmask;
2785
2786         if (dev->data->promiscuous == 1)
2787                 return; /* must remain in all_multicast mode */
2788
2789         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2790
2791         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2792         if (status != ICE_SUCCESS)
2793                 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
2794 }
2795
2796 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
2797                                     uint16_t queue_id)
2798 {
2799         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2800         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2801         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2802         uint32_t val;
2803         uint16_t msix_intr;
2804
2805         msix_intr = intr_handle->intr_vec[queue_id];
2806
2807         val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
2808               GLINT_DYN_CTL_ITR_INDX_M;
2809         val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
2810
2811         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
2812         rte_intr_enable(&pci_dev->intr_handle);
2813
2814         return 0;
2815 }
2816
2817 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
2818                                      uint16_t queue_id)
2819 {
2820         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2821         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2822         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2823         uint16_t msix_intr;
2824
2825         msix_intr = intr_handle->intr_vec[queue_id];
2826
2827         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
2828
2829         return 0;
2830 }
2831
2832 static int
2833 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2834 {
2835         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2836         u32 full_ver;
2837         u8 ver, patch;
2838         u16 build;
2839         int ret;
2840
2841         full_ver = hw->nvm.oem_ver;
2842         ver = (u8)(full_ver >> 24);
2843         build = (u16)((full_ver >> 8) & 0xffff);
2844         patch = (u8)(full_ver & 0xff);
2845
2846         ret = snprintf(fw_version, fw_size,
2847                         "%d.%d%d 0x%08x %d.%d.%d",
2848                         ((hw->nvm.ver >> 12) & 0xf),
2849                         ((hw->nvm.ver >> 4) & 0xff),
2850                         (hw->nvm.ver & 0xf), hw->nvm.eetrack,
2851                         ver, build, patch);
2852
2853         /* add the size of '\0' */
2854         ret += 1;
2855         if (fw_size < (u32)ret)
2856                 return ret;
2857         else
2858                 return 0;
2859 }
2860
2861 static int
2862 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
2863 {
2864         struct ice_hw *hw;
2865         struct ice_vsi_ctx ctxt;
2866         uint8_t vlan_flags = 0;
2867         int ret;
2868
2869         if (!vsi || !info) {
2870                 PMD_DRV_LOG(ERR, "invalid parameters");
2871                 return -EINVAL;
2872         }
2873
2874         if (info->on) {
2875                 vsi->info.pvid = info->config.pvid;
2876                 /**
2877                  * If insert pvid is enabled, only tagged pkts are
2878                  * allowed to be sent out.
2879                  */
2880                 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
2881                              ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2882         } else {
2883                 vsi->info.pvid = 0;
2884                 if (info->config.reject.tagged == 0)
2885                         vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
2886
2887                 if (info->config.reject.untagged == 0)
2888                         vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2889         }
2890         vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
2891                                   ICE_AQ_VSI_VLAN_MODE_M);
2892         vsi->info.vlan_flags |= vlan_flags;
2893         memset(&ctxt, 0, sizeof(ctxt));
2894         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2895         ctxt.info.valid_sections =
2896                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2897         ctxt.vsi_num = vsi->vsi_id;
2898
2899         hw = ICE_VSI_TO_HW(vsi);
2900         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2901         if (ret != ICE_SUCCESS) {
2902                 PMD_DRV_LOG(ERR,
2903                             "update VSI for VLAN insert failed, err %d",
2904                             ret);
2905                 return -EINVAL;
2906         }
2907
2908         vsi->info.valid_sections |=
2909                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2910
2911         return ret;
2912 }
2913
2914 static int
2915 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2916 {
2917         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2918         struct ice_vsi *vsi = pf->main_vsi;
2919         struct rte_eth_dev_data *data = pf->dev_data;
2920         struct ice_vsi_vlan_pvid_info info;
2921         int ret;
2922
2923         memset(&info, 0, sizeof(info));
2924         info.on = on;
2925         if (info.on) {
2926                 info.config.pvid = pvid;
2927         } else {
2928                 info.config.reject.tagged =
2929                         data->dev_conf.txmode.hw_vlan_reject_tagged;
2930                 info.config.reject.untagged =
2931                         data->dev_conf.txmode.hw_vlan_reject_untagged;
2932         }
2933
2934         ret = ice_vsi_vlan_pvid_set(vsi, &info);
2935         if (ret < 0) {
2936                 PMD_DRV_LOG(ERR, "Failed to set pvid.");
2937                 return -EINVAL;
2938         }
2939
2940         return 0;
2941 }
2942
2943 static int
2944 ice_get_eeprom_length(struct rte_eth_dev *dev)
2945 {
2946         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2947
2948         /* Convert word count to byte count */
2949         return hw->nvm.sr_words << 1;
2950 }
2951
2952 static int
2953 ice_get_eeprom(struct rte_eth_dev *dev,
2954                struct rte_dev_eeprom_info *eeprom)
2955 {
2956         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2957         uint16_t *data = eeprom->data;
2958         uint16_t first_word, last_word, nwords;
2959         enum ice_status status = ICE_SUCCESS;
2960
2961         first_word = eeprom->offset >> 1;
2962         last_word = (eeprom->offset + eeprom->length - 1) >> 1;
2963         nwords = last_word - first_word + 1;
2964
2965         if (first_word > hw->nvm.sr_words ||
2966             last_word > hw->nvm.sr_words) {
2967                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
2968                 return -EINVAL;
2969         }
2970
2971         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2972
2973         status = ice_read_sr_buf(hw, first_word, &nwords, data);
2974         if (status) {
2975                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
2976                 eeprom->length = sizeof(uint16_t) * nwords;
2977                 return -EIO;
2978         }
2979
2980         return 0;
2981 }
2982
2983 static void
2984 ice_stat_update_32(struct ice_hw *hw,
2985                    uint32_t reg,
2986                    bool offset_loaded,
2987                    uint64_t *offset,
2988                    uint64_t *stat)
2989 {
2990         uint64_t new_data;
2991
2992         new_data = (uint64_t)ICE_READ_REG(hw, reg);
2993         if (!offset_loaded)
2994                 *offset = new_data;
2995
2996         if (new_data >= *offset)
2997                 *stat = (uint64_t)(new_data - *offset);
2998         else
2999                 *stat = (uint64_t)((new_data +
3000                                     ((uint64_t)1 << ICE_32_BIT_WIDTH))
3001                                    - *offset);
3002 }
3003
3004 static void
3005 ice_stat_update_40(struct ice_hw *hw,
3006                    uint32_t hireg,
3007                    uint32_t loreg,
3008                    bool offset_loaded,
3009                    uint64_t *offset,
3010                    uint64_t *stat)
3011 {
3012         uint64_t new_data;
3013
3014         new_data = (uint64_t)ICE_READ_REG(hw, loreg);
3015         new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
3016                     ICE_32_BIT_WIDTH;
3017
3018         if (!offset_loaded)
3019                 *offset = new_data;
3020
3021         if (new_data >= *offset)
3022                 *stat = new_data - *offset;
3023         else
3024                 *stat = (uint64_t)((new_data +
3025                                     ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
3026                                    *offset);
3027
3028         *stat &= ICE_40_BIT_MASK;
3029 }
3030
3031 /* Get all the statistics of a VSI */
3032 static void
3033 ice_update_vsi_stats(struct ice_vsi *vsi)
3034 {
3035         struct ice_eth_stats *oes = &vsi->eth_stats_offset;
3036         struct ice_eth_stats *nes = &vsi->eth_stats;
3037         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3038         int idx = rte_le_to_cpu_16(vsi->vsi_id);
3039
3040         ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
3041                            vsi->offset_loaded, &oes->rx_bytes,
3042                            &nes->rx_bytes);
3043         ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
3044                            vsi->offset_loaded, &oes->rx_unicast,
3045                            &nes->rx_unicast);
3046         ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
3047                            vsi->offset_loaded, &oes->rx_multicast,
3048                            &nes->rx_multicast);
3049         ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
3050                            vsi->offset_loaded, &oes->rx_broadcast,
3051                            &nes->rx_broadcast);
3052         /* exclude CRC bytes */
3053         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3054                           nes->rx_broadcast) * ETHER_CRC_LEN;
3055
3056         ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
3057                            &oes->rx_discards, &nes->rx_discards);
3058         /* GLV_REPC not supported */
3059         /* GLV_RMPC not supported */
3060         ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
3061                            &oes->rx_unknown_protocol,
3062                            &nes->rx_unknown_protocol);
3063         ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
3064                            vsi->offset_loaded, &oes->tx_bytes,
3065                            &nes->tx_bytes);
3066         ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
3067                            vsi->offset_loaded, &oes->tx_unicast,
3068                            &nes->tx_unicast);
3069         ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
3070                            vsi->offset_loaded, &oes->tx_multicast,
3071                            &nes->tx_multicast);
3072         ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
3073                            vsi->offset_loaded,  &oes->tx_broadcast,
3074                            &nes->tx_broadcast);
3075         /* GLV_TDPC not supported */
3076         ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
3077                            &oes->tx_errors, &nes->tx_errors);
3078         vsi->offset_loaded = true;
3079
3080         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
3081                     vsi->vsi_id);
3082         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3083         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3084         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3085         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3086         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3087         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3088                     nes->rx_unknown_protocol);
3089         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3090         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3091         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3092         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3093         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3094         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3095         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
3096                     vsi->vsi_id);
3097 }
3098
3099 static void
3100 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
3101 {
3102         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3103         struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
3104
3105         /* Get statistics of struct ice_eth_stats */
3106         ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
3107                            GLPRT_GORCL(hw->port_info->lport),
3108                            pf->offset_loaded, &os->eth.rx_bytes,
3109                            &ns->eth.rx_bytes);
3110         ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
3111                            GLPRT_UPRCL(hw->port_info->lport),
3112                            pf->offset_loaded, &os->eth.rx_unicast,
3113                            &ns->eth.rx_unicast);
3114         ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
3115                            GLPRT_MPRCL(hw->port_info->lport),
3116                            pf->offset_loaded, &os->eth.rx_multicast,
3117                            &ns->eth.rx_multicast);
3118         ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
3119                            GLPRT_BPRCL(hw->port_info->lport),
3120                            pf->offset_loaded, &os->eth.rx_broadcast,
3121                            &ns->eth.rx_broadcast);
3122         ice_stat_update_32(hw, PRTRPB_RDPC,
3123                            pf->offset_loaded, &os->eth.rx_discards,
3124                            &ns->eth.rx_discards);
3125
3126         /* Workaround: CRC size should not be included in byte statistics,
3127          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
3128          */
3129         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3130                              ns->eth.rx_broadcast) * ETHER_CRC_LEN;
3131
3132         /* GLPRT_REPC not supported */
3133         /* GLPRT_RMPC not supported */
3134         ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
3135                            pf->offset_loaded,
3136                            &os->eth.rx_unknown_protocol,
3137                            &ns->eth.rx_unknown_protocol);
3138         ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
3139                            GLPRT_GOTCL(hw->port_info->lport),
3140                            pf->offset_loaded, &os->eth.tx_bytes,
3141                            &ns->eth.tx_bytes);
3142         ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
3143                            GLPRT_UPTCL(hw->port_info->lport),
3144                            pf->offset_loaded, &os->eth.tx_unicast,
3145                            &ns->eth.tx_unicast);
3146         ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
3147                            GLPRT_MPTCL(hw->port_info->lport),
3148                            pf->offset_loaded, &os->eth.tx_multicast,
3149                            &ns->eth.tx_multicast);
3150         ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
3151                            GLPRT_BPTCL(hw->port_info->lport),
3152                            pf->offset_loaded, &os->eth.tx_broadcast,
3153                            &ns->eth.tx_broadcast);
3154         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3155                              ns->eth.tx_broadcast) * ETHER_CRC_LEN;
3156
3157         /* GLPRT_TEPC not supported */
3158
3159         /* additional port specific stats */
3160         ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
3161                            pf->offset_loaded, &os->tx_dropped_link_down,
3162                            &ns->tx_dropped_link_down);
3163         ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
3164                            pf->offset_loaded, &os->crc_errors,
3165                            &ns->crc_errors);
3166         ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
3167                            pf->offset_loaded, &os->illegal_bytes,
3168                            &ns->illegal_bytes);
3169         /* GLPRT_ERRBC not supported */
3170         ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
3171                            pf->offset_loaded, &os->mac_local_faults,
3172                            &ns->mac_local_faults);
3173         ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
3174                            pf->offset_loaded, &os->mac_remote_faults,
3175                            &ns->mac_remote_faults);
3176
3177         ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
3178                            pf->offset_loaded, &os->rx_len_errors,
3179                            &ns->rx_len_errors);
3180
3181         ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
3182                            pf->offset_loaded, &os->link_xon_rx,
3183                            &ns->link_xon_rx);
3184         ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
3185                            pf->offset_loaded, &os->link_xoff_rx,
3186                            &ns->link_xoff_rx);
3187         ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
3188                            pf->offset_loaded, &os->link_xon_tx,
3189                            &ns->link_xon_tx);
3190         ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
3191                            pf->offset_loaded, &os->link_xoff_tx,
3192                            &ns->link_xoff_tx);
3193         ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
3194                            GLPRT_PRC64L(hw->port_info->lport),
3195                            pf->offset_loaded, &os->rx_size_64,
3196                            &ns->rx_size_64);
3197         ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
3198                            GLPRT_PRC127L(hw->port_info->lport),
3199                            pf->offset_loaded, &os->rx_size_127,
3200                            &ns->rx_size_127);
3201         ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
3202                            GLPRT_PRC255L(hw->port_info->lport),
3203                            pf->offset_loaded, &os->rx_size_255,
3204                            &ns->rx_size_255);
3205         ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
3206                            GLPRT_PRC511L(hw->port_info->lport),
3207                            pf->offset_loaded, &os->rx_size_511,
3208                            &ns->rx_size_511);
3209         ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
3210                            GLPRT_PRC1023L(hw->port_info->lport),
3211                            pf->offset_loaded, &os->rx_size_1023,
3212                            &ns->rx_size_1023);
3213         ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
3214                            GLPRT_PRC1522L(hw->port_info->lport),
3215                            pf->offset_loaded, &os->rx_size_1522,
3216                            &ns->rx_size_1522);
3217         ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
3218                            GLPRT_PRC9522L(hw->port_info->lport),
3219                            pf->offset_loaded, &os->rx_size_big,
3220                            &ns->rx_size_big);
3221         ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
3222                            pf->offset_loaded, &os->rx_undersize,
3223                            &ns->rx_undersize);
3224         ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
3225                            pf->offset_loaded, &os->rx_fragments,
3226                            &ns->rx_fragments);
3227         ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
3228                            pf->offset_loaded, &os->rx_oversize,
3229                            &ns->rx_oversize);
3230         ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
3231                            pf->offset_loaded, &os->rx_jabber,
3232                            &ns->rx_jabber);
3233         ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
3234                            GLPRT_PTC64L(hw->port_info->lport),
3235                            pf->offset_loaded, &os->tx_size_64,
3236                            &ns->tx_size_64);
3237         ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
3238                            GLPRT_PTC127L(hw->port_info->lport),
3239                            pf->offset_loaded, &os->tx_size_127,
3240                            &ns->tx_size_127);
3241         ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
3242                            GLPRT_PTC255L(hw->port_info->lport),
3243                            pf->offset_loaded, &os->tx_size_255,
3244                            &ns->tx_size_255);
3245         ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
3246                            GLPRT_PTC511L(hw->port_info->lport),
3247                            pf->offset_loaded, &os->tx_size_511,
3248                            &ns->tx_size_511);
3249         ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
3250                            GLPRT_PTC1023L(hw->port_info->lport),
3251                            pf->offset_loaded, &os->tx_size_1023,
3252                            &ns->tx_size_1023);
3253         ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
3254                            GLPRT_PTC1522L(hw->port_info->lport),
3255                            pf->offset_loaded, &os->tx_size_1522,
3256                            &ns->tx_size_1522);
3257         ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
3258                            GLPRT_PTC9522L(hw->port_info->lport),
3259                            pf->offset_loaded, &os->tx_size_big,
3260                            &ns->tx_size_big);
3261
3262         /* GLPRT_MSPDC not supported */
3263         /* GLPRT_XEC not supported */
3264
3265         pf->offset_loaded = true;
3266
3267         if (pf->main_vsi)
3268                 ice_update_vsi_stats(pf->main_vsi);
3269 }
3270
3271 /* Get all statistics of a port */
3272 static int
3273 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3274 {
3275         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3276         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3277         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3278
3279         /* call read registers - updates values, now write them to struct */
3280         ice_read_stats_registers(pf, hw);
3281
3282         stats->ipackets = ns->eth.rx_unicast +
3283                           ns->eth.rx_multicast +
3284                           ns->eth.rx_broadcast -
3285                           ns->eth.rx_discards -
3286                           pf->main_vsi->eth_stats.rx_discards;
3287         stats->opackets = ns->eth.tx_unicast +
3288                           ns->eth.tx_multicast +
3289                           ns->eth.tx_broadcast;
3290         stats->ibytes   = ns->eth.rx_bytes;
3291         stats->obytes   = ns->eth.tx_bytes;
3292         stats->oerrors  = ns->eth.tx_errors +
3293                           pf->main_vsi->eth_stats.tx_errors;
3294
3295         /* Rx Errors */
3296         stats->imissed  = ns->eth.rx_discards +
3297                           pf->main_vsi->eth_stats.rx_discards;
3298         stats->ierrors  = ns->crc_errors +
3299                           ns->rx_undersize +
3300                           ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3301
3302         PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
3303         PMD_DRV_LOG(DEBUG, "rx_bytes:   %"PRIu64"", ns->eth.rx_bytes);
3304         PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
3305         PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
3306         PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
3307         PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
3308         PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
3309                     pf->main_vsi->eth_stats.rx_discards);
3310         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
3311                     ns->eth.rx_unknown_protocol);
3312         PMD_DRV_LOG(DEBUG, "tx_bytes:   %"PRIu64"", ns->eth.tx_bytes);
3313         PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
3314         PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
3315         PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
3316         PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
3317         PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
3318                     pf->main_vsi->eth_stats.tx_discards);
3319         PMD_DRV_LOG(DEBUG, "tx_errors:          %"PRIu64"", ns->eth.tx_errors);
3320
3321         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:       %"PRIu64"",
3322                     ns->tx_dropped_link_down);
3323         PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
3324         PMD_DRV_LOG(DEBUG, "illegal_bytes:      %"PRIu64"",
3325                     ns->illegal_bytes);
3326         PMD_DRV_LOG(DEBUG, "error_bytes:        %"PRIu64"", ns->error_bytes);
3327         PMD_DRV_LOG(DEBUG, "mac_local_faults:   %"PRIu64"",
3328                     ns->mac_local_faults);
3329         PMD_DRV_LOG(DEBUG, "mac_remote_faults:  %"PRIu64"",
3330                     ns->mac_remote_faults);
3331         PMD_DRV_LOG(DEBUG, "link_xon_rx:        %"PRIu64"", ns->link_xon_rx);
3332         PMD_DRV_LOG(DEBUG, "link_xoff_rx:       %"PRIu64"", ns->link_xoff_rx);
3333         PMD_DRV_LOG(DEBUG, "link_xon_tx:        %"PRIu64"", ns->link_xon_tx);
3334         PMD_DRV_LOG(DEBUG, "link_xoff_tx:       %"PRIu64"", ns->link_xoff_tx);
3335         PMD_DRV_LOG(DEBUG, "rx_size_64:         %"PRIu64"", ns->rx_size_64);
3336         PMD_DRV_LOG(DEBUG, "rx_size_127:        %"PRIu64"", ns->rx_size_127);
3337         PMD_DRV_LOG(DEBUG, "rx_size_255:        %"PRIu64"", ns->rx_size_255);
3338         PMD_DRV_LOG(DEBUG, "rx_size_511:        %"PRIu64"", ns->rx_size_511);
3339         PMD_DRV_LOG(DEBUG, "rx_size_1023:       %"PRIu64"", ns->rx_size_1023);
3340         PMD_DRV_LOG(DEBUG, "rx_size_1522:       %"PRIu64"", ns->rx_size_1522);
3341         PMD_DRV_LOG(DEBUG, "rx_size_big:        %"PRIu64"", ns->rx_size_big);
3342         PMD_DRV_LOG(DEBUG, "rx_undersize:       %"PRIu64"", ns->rx_undersize);
3343         PMD_DRV_LOG(DEBUG, "rx_fragments:       %"PRIu64"", ns->rx_fragments);
3344         PMD_DRV_LOG(DEBUG, "rx_oversize:        %"PRIu64"", ns->rx_oversize);
3345         PMD_DRV_LOG(DEBUG, "rx_jabber:          %"PRIu64"", ns->rx_jabber);
3346         PMD_DRV_LOG(DEBUG, "tx_size_64:         %"PRIu64"", ns->tx_size_64);
3347         PMD_DRV_LOG(DEBUG, "tx_size_127:        %"PRIu64"", ns->tx_size_127);
3348         PMD_DRV_LOG(DEBUG, "tx_size_255:        %"PRIu64"", ns->tx_size_255);
3349         PMD_DRV_LOG(DEBUG, "tx_size_511:        %"PRIu64"", ns->tx_size_511);
3350         PMD_DRV_LOG(DEBUG, "tx_size_1023:       %"PRIu64"", ns->tx_size_1023);
3351         PMD_DRV_LOG(DEBUG, "tx_size_1522:       %"PRIu64"", ns->tx_size_1522);
3352         PMD_DRV_LOG(DEBUG, "tx_size_big:        %"PRIu64"", ns->tx_size_big);
3353         PMD_DRV_LOG(DEBUG, "rx_len_errors:      %"PRIu64"", ns->rx_len_errors);
3354         PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
3355         return 0;
3356 }
3357
3358 /* Reset the statistics */
3359 static void
3360 ice_stats_reset(struct rte_eth_dev *dev)
3361 {
3362         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3363         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3364
3365         /* Mark PF and VSI stats to update the offset, aka "reset" */
3366         pf->offset_loaded = false;
3367         if (pf->main_vsi)
3368                 pf->main_vsi->offset_loaded = false;
3369
3370         /* read the stats, reading current register values into offset */
3371         ice_read_stats_registers(pf, hw);
3372 }
3373
3374 static uint32_t
3375 ice_xstats_calc_num(void)
3376 {
3377         uint32_t num;
3378
3379         num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
3380
3381         return num;
3382 }
3383
3384 static int
3385 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3386                unsigned int n)
3387 {
3388         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3389         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3390         unsigned int i;
3391         unsigned int count;
3392         struct ice_hw_port_stats *hw_stats = &pf->stats;
3393
3394         count = ice_xstats_calc_num();
3395         if (n < count)
3396                 return count;
3397
3398         ice_read_stats_registers(pf, hw);
3399
3400         if (!xstats)
3401                 return 0;
3402
3403         count = 0;
3404
3405         /* Get stats from ice_eth_stats struct */
3406         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3407                 xstats[count].value =
3408                         *(uint64_t *)((char *)&hw_stats->eth +
3409                                       ice_stats_strings[i].offset);
3410                 xstats[count].id = count;
3411                 count++;
3412         }
3413
3414         /* Get individiual stats from ice_hw_port struct */
3415         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3416                 xstats[count].value =
3417                         *(uint64_t *)((char *)hw_stats +
3418                                       ice_hw_port_strings[i].offset);
3419                 xstats[count].id = count;
3420                 count++;
3421         }
3422
3423         return count;
3424 }
3425
3426 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3427                                 struct rte_eth_xstat_name *xstats_names,
3428                                 __rte_unused unsigned int limit)
3429 {
3430         unsigned int count = 0;
3431         unsigned int i;
3432
3433         if (!xstats_names)
3434                 return ice_xstats_calc_num();
3435
3436         /* Note: limit checked in rte_eth_xstats_names() */
3437
3438         /* Get stats from ice_eth_stats struct */
3439         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3440                 snprintf(xstats_names[count].name,
3441                          sizeof(xstats_names[count].name),
3442                          "%s", ice_stats_strings[i].name);
3443                 count++;
3444         }
3445
3446         /* Get individiual stats from ice_hw_port struct */
3447         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3448                 snprintf(xstats_names[count].name,
3449                          sizeof(xstats_names[count].name),
3450                          "%s", ice_hw_port_strings[i].name);
3451                 count++;
3452         }
3453
3454         return count;
3455 }
3456
3457 static int
3458 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3459               struct rte_pci_device *pci_dev)
3460 {
3461         return rte_eth_dev_pci_generic_probe(pci_dev,
3462                                              sizeof(struct ice_adapter),
3463                                              ice_dev_init);
3464 }
3465
3466 static int
3467 ice_pci_remove(struct rte_pci_device *pci_dev)
3468 {
3469         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
3470 }
3471
3472 static struct rte_pci_driver rte_ice_pmd = {
3473         .id_table = pci_id_ice_map,
3474         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3475                      RTE_PCI_DRV_IOVA_AS_VA,
3476         .probe = ice_pci_probe,
3477         .remove = ice_pci_remove,
3478 };
3479
3480 /**
3481  * Driver initialization routine.
3482  * Invoked once at EAL init time.
3483  * Register itself as the [Poll Mode] Driver of PCI devices.
3484  */
3485 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
3486 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
3487 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
3488 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
3489                               ICE_MAX_QP_NUM "=<int>");
3490
3491 RTE_INIT(ice_init_log)
3492 {
3493         ice_logtype_init = rte_log_register("pmd.net.ice.init");
3494         if (ice_logtype_init >= 0)
3495                 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
3496         ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
3497         if (ice_logtype_driver >= 0)
3498                 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
3499 }