net/ice: enable RSS when device init
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include <stdio.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11
12 #include "base/ice_sched.h"
13 #include "base/ice_flow.h"
14 #include "ice_ethdev.h"
15 #include "ice_rxtx.h"
16
17 #define ICE_MAX_QP_NUM "max_queue_pair_num"
18 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
19 #define ICE_DFLT_PKG_FILE "/lib/firmware/intel/ice/ddp/ice.pkg"
20
21 int ice_logtype_init;
22 int ice_logtype_driver;
23
24 static int ice_dev_configure(struct rte_eth_dev *dev);
25 static int ice_dev_start(struct rte_eth_dev *dev);
26 static void ice_dev_stop(struct rte_eth_dev *dev);
27 static void ice_dev_close(struct rte_eth_dev *dev);
28 static int ice_dev_reset(struct rte_eth_dev *dev);
29 static void ice_dev_info_get(struct rte_eth_dev *dev,
30                              struct rte_eth_dev_info *dev_info);
31 static int ice_link_update(struct rte_eth_dev *dev,
32                            int wait_to_complete);
33 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
34 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
35 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
36                              enum rte_vlan_type vlan_type,
37                              uint16_t tpid);
38 static int ice_rss_reta_update(struct rte_eth_dev *dev,
39                                struct rte_eth_rss_reta_entry64 *reta_conf,
40                                uint16_t reta_size);
41 static int ice_rss_reta_query(struct rte_eth_dev *dev,
42                               struct rte_eth_rss_reta_entry64 *reta_conf,
43                               uint16_t reta_size);
44 static int ice_rss_hash_update(struct rte_eth_dev *dev,
45                                struct rte_eth_rss_conf *rss_conf);
46 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
47                                  struct rte_eth_rss_conf *rss_conf);
48 static void ice_promisc_enable(struct rte_eth_dev *dev);
49 static void ice_promisc_disable(struct rte_eth_dev *dev);
50 static void ice_allmulti_enable(struct rte_eth_dev *dev);
51 static void ice_allmulti_disable(struct rte_eth_dev *dev);
52 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
53                                uint16_t vlan_id,
54                                int on);
55 static int ice_macaddr_set(struct rte_eth_dev *dev,
56                            struct ether_addr *mac_addr);
57 static int ice_macaddr_add(struct rte_eth_dev *dev,
58                            struct ether_addr *mac_addr,
59                            __rte_unused uint32_t index,
60                            uint32_t pool);
61 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
62 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
63                                     uint16_t queue_id);
64 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
65                                      uint16_t queue_id);
66 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
67                               size_t fw_size);
68 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
69                              uint16_t pvid, int on);
70 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
71 static int ice_get_eeprom(struct rte_eth_dev *dev,
72                           struct rte_dev_eeprom_info *eeprom);
73 static int ice_stats_get(struct rte_eth_dev *dev,
74                          struct rte_eth_stats *stats);
75 static void ice_stats_reset(struct rte_eth_dev *dev);
76 static int ice_xstats_get(struct rte_eth_dev *dev,
77                           struct rte_eth_xstat *xstats, unsigned int n);
78 static int ice_xstats_get_names(struct rte_eth_dev *dev,
79                                 struct rte_eth_xstat_name *xstats_names,
80                                 unsigned int limit);
81
82 static const struct rte_pci_id pci_id_ice_map[] = {
83         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
84         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
85         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
86         { .vendor_id = 0, /* sentinel */ },
87 };
88
89 static const struct eth_dev_ops ice_eth_dev_ops = {
90         .dev_configure                = ice_dev_configure,
91         .dev_start                    = ice_dev_start,
92         .dev_stop                     = ice_dev_stop,
93         .dev_close                    = ice_dev_close,
94         .dev_reset                    = ice_dev_reset,
95         .rx_queue_start               = ice_rx_queue_start,
96         .rx_queue_stop                = ice_rx_queue_stop,
97         .tx_queue_start               = ice_tx_queue_start,
98         .tx_queue_stop                = ice_tx_queue_stop,
99         .rx_queue_setup               = ice_rx_queue_setup,
100         .rx_queue_release             = ice_rx_queue_release,
101         .tx_queue_setup               = ice_tx_queue_setup,
102         .tx_queue_release             = ice_tx_queue_release,
103         .dev_infos_get                = ice_dev_info_get,
104         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
105         .link_update                  = ice_link_update,
106         .mtu_set                      = ice_mtu_set,
107         .mac_addr_set                 = ice_macaddr_set,
108         .mac_addr_add                 = ice_macaddr_add,
109         .mac_addr_remove              = ice_macaddr_remove,
110         .vlan_filter_set              = ice_vlan_filter_set,
111         .vlan_offload_set             = ice_vlan_offload_set,
112         .vlan_tpid_set                = ice_vlan_tpid_set,
113         .reta_update                  = ice_rss_reta_update,
114         .reta_query                   = ice_rss_reta_query,
115         .rss_hash_update              = ice_rss_hash_update,
116         .rss_hash_conf_get            = ice_rss_hash_conf_get,
117         .promiscuous_enable           = ice_promisc_enable,
118         .promiscuous_disable          = ice_promisc_disable,
119         .allmulticast_enable          = ice_allmulti_enable,
120         .allmulticast_disable         = ice_allmulti_disable,
121         .rx_queue_intr_enable         = ice_rx_queue_intr_enable,
122         .rx_queue_intr_disable        = ice_rx_queue_intr_disable,
123         .fw_version_get               = ice_fw_version_get,
124         .vlan_pvid_set                = ice_vlan_pvid_set,
125         .rxq_info_get                 = ice_rxq_info_get,
126         .txq_info_get                 = ice_txq_info_get,
127         .get_eeprom_length            = ice_get_eeprom_length,
128         .get_eeprom                   = ice_get_eeprom,
129         .rx_queue_count               = ice_rx_queue_count,
130         .rx_descriptor_status         = ice_rx_descriptor_status,
131         .tx_descriptor_status         = ice_tx_descriptor_status,
132         .stats_get                    = ice_stats_get,
133         .stats_reset                  = ice_stats_reset,
134         .xstats_get                   = ice_xstats_get,
135         .xstats_get_names             = ice_xstats_get_names,
136         .xstats_reset                 = ice_stats_reset,
137 };
138
139 /* store statistics names and its offset in stats structure */
140 struct ice_xstats_name_off {
141         char name[RTE_ETH_XSTATS_NAME_SIZE];
142         unsigned int offset;
143 };
144
145 static const struct ice_xstats_name_off ice_stats_strings[] = {
146         {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
147         {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
148         {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
149         {"rx_dropped", offsetof(struct ice_eth_stats, rx_discards)},
150         {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
151                 rx_unknown_protocol)},
152         {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
153         {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
154         {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
155         {"tx_dropped", offsetof(struct ice_eth_stats, tx_discards)},
156 };
157
158 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
159                 sizeof(ice_stats_strings[0]))
160
161 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
162         {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
163                 tx_dropped_link_down)},
164         {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
165         {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
166                 illegal_bytes)},
167         {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
168         {"mac_local_errors", offsetof(struct ice_hw_port_stats,
169                 mac_local_faults)},
170         {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
171                 mac_remote_faults)},
172         {"rx_len_errors", offsetof(struct ice_hw_port_stats,
173                 rx_len_errors)},
174         {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
175         {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
176         {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
177         {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
178         {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
179         {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
180                 rx_size_127)},
181         {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
182                 rx_size_255)},
183         {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
184                 rx_size_511)},
185         {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
186                 rx_size_1023)},
187         {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
188                 rx_size_1522)},
189         {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
190                 rx_size_big)},
191         {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
192                 rx_undersize)},
193         {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
194                 rx_oversize)},
195         {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
196                 mac_short_pkt_dropped)},
197         {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
198                 rx_fragments)},
199         {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
200         {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
201         {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
202                 tx_size_127)},
203         {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
204                 tx_size_255)},
205         {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
206                 tx_size_511)},
207         {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
208                 tx_size_1023)},
209         {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
210                 tx_size_1522)},
211         {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
212                 tx_size_big)},
213 };
214
215 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
216                 sizeof(ice_hw_port_strings[0]))
217
218 static void
219 ice_init_controlq_parameter(struct ice_hw *hw)
220 {
221         /* fields for adminq */
222         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
223         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
224         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
225         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
226
227         /* fields for mailboxq, DPDK used as PF host */
228         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
229         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
230         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
231         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
232 }
233
234 static int
235 ice_check_qp_num(const char *key, const char *qp_value,
236                  __rte_unused void *opaque)
237 {
238         char *end = NULL;
239         int num = 0;
240
241         while (isblank(*qp_value))
242                 qp_value++;
243
244         num = strtoul(qp_value, &end, 10);
245
246         if (!num || (*end == '-') || errno) {
247                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
248                             "value must be > 0",
249                             qp_value, key);
250                 return -1;
251         }
252
253         return num;
254 }
255
256 static int
257 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
258 {
259         struct rte_kvargs *kvlist;
260         const char *queue_num_key = ICE_MAX_QP_NUM;
261         int ret;
262
263         if (!devargs)
264                 return 0;
265
266         kvlist = rte_kvargs_parse(devargs->args, NULL);
267         if (!kvlist)
268                 return 0;
269
270         if (!rte_kvargs_count(kvlist, queue_num_key)) {
271                 rte_kvargs_free(kvlist);
272                 return 0;
273         }
274
275         if (rte_kvargs_process(kvlist, queue_num_key,
276                                ice_check_qp_num, NULL) < 0) {
277                 rte_kvargs_free(kvlist);
278                 return 0;
279         }
280         ret = rte_kvargs_process(kvlist, queue_num_key,
281                                  ice_check_qp_num, NULL);
282         rte_kvargs_free(kvlist);
283
284         return ret;
285 }
286
287 static int
288 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
289                   uint32_t num)
290 {
291         struct pool_entry *entry;
292
293         if (!pool || !num)
294                 return -EINVAL;
295
296         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
297         if (!entry) {
298                 PMD_INIT_LOG(ERR,
299                              "Failed to allocate memory for resource pool");
300                 return -ENOMEM;
301         }
302
303         /* queue heap initialize */
304         pool->num_free = num;
305         pool->num_alloc = 0;
306         pool->base = base;
307         LIST_INIT(&pool->alloc_list);
308         LIST_INIT(&pool->free_list);
309
310         /* Initialize element  */
311         entry->base = 0;
312         entry->len = num;
313
314         LIST_INSERT_HEAD(&pool->free_list, entry, next);
315         return 0;
316 }
317
318 static int
319 ice_res_pool_alloc(struct ice_res_pool_info *pool,
320                    uint16_t num)
321 {
322         struct pool_entry *entry, *valid_entry;
323
324         if (!pool || !num) {
325                 PMD_INIT_LOG(ERR, "Invalid parameter");
326                 return -EINVAL;
327         }
328
329         if (pool->num_free < num) {
330                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
331                              num, pool->num_free);
332                 return -ENOMEM;
333         }
334
335         valid_entry = NULL;
336         /* Lookup  in free list and find most fit one */
337         LIST_FOREACH(entry, &pool->free_list, next) {
338                 if (entry->len >= num) {
339                         /* Find best one */
340                         if (entry->len == num) {
341                                 valid_entry = entry;
342                                 break;
343                         }
344                         if (!valid_entry ||
345                             valid_entry->len > entry->len)
346                                 valid_entry = entry;
347                 }
348         }
349
350         /* Not find one to satisfy the request, return */
351         if (!valid_entry) {
352                 PMD_INIT_LOG(ERR, "No valid entry found");
353                 return -ENOMEM;
354         }
355         /**
356          * The entry have equal queue number as requested,
357          * remove it from alloc_list.
358          */
359         if (valid_entry->len == num) {
360                 LIST_REMOVE(valid_entry, next);
361         } else {
362                 /**
363                  * The entry have more numbers than requested,
364                  * create a new entry for alloc_list and minus its
365                  * queue base and number in free_list.
366                  */
367                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
368                 if (!entry) {
369                         PMD_INIT_LOG(ERR,
370                                      "Failed to allocate memory for "
371                                      "resource pool");
372                         return -ENOMEM;
373                 }
374                 entry->base = valid_entry->base;
375                 entry->len = num;
376                 valid_entry->base += num;
377                 valid_entry->len -= num;
378                 valid_entry = entry;
379         }
380
381         /* Insert it into alloc list, not sorted */
382         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
383
384         pool->num_free -= valid_entry->len;
385         pool->num_alloc += valid_entry->len;
386
387         return valid_entry->base + pool->base;
388 }
389
390 static void
391 ice_res_pool_destroy(struct ice_res_pool_info *pool)
392 {
393         struct pool_entry *entry, *next_entry;
394
395         if (!pool)
396                 return;
397
398         for (entry = LIST_FIRST(&pool->alloc_list);
399              entry && (next_entry = LIST_NEXT(entry, next), 1);
400              entry = next_entry) {
401                 LIST_REMOVE(entry, next);
402                 rte_free(entry);
403         }
404
405         for (entry = LIST_FIRST(&pool->free_list);
406              entry && (next_entry = LIST_NEXT(entry, next), 1);
407              entry = next_entry) {
408                 LIST_REMOVE(entry, next);
409                 rte_free(entry);
410         }
411
412         pool->num_free = 0;
413         pool->num_alloc = 0;
414         pool->base = 0;
415         LIST_INIT(&pool->alloc_list);
416         LIST_INIT(&pool->free_list);
417 }
418
419 static void
420 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
421 {
422         /* Set VSI LUT selection */
423         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
424                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
425         /* Set Hash scheme */
426         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
427                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
428         /* enable TC */
429         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
430 }
431
432 static enum ice_status
433 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
434                                 struct ice_aqc_vsi_props *info,
435                                 uint8_t enabled_tcmap)
436 {
437         uint16_t bsf, qp_idx;
438
439         /* default tc 0 now. Multi-TC supporting need to be done later.
440          * Configure TC and queue mapping parameters, for enabled TC,
441          * allocate qpnum_per_tc queues to this traffic.
442          */
443         if (enabled_tcmap != 0x01) {
444                 PMD_INIT_LOG(ERR, "only TC0 is supported");
445                 return -ENOTSUP;
446         }
447
448         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
449         bsf = rte_bsf32(vsi->nb_qps);
450         /* Adjust the queue number to actual queues that can be applied */
451         vsi->nb_qps = 0x1 << bsf;
452
453         qp_idx = 0;
454         /* Set tc and queue mapping with VSI */
455         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
456                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
457                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
458
459         /* Associate queue number with VSI */
460         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
461         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
462         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
463         info->valid_sections |=
464                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
465         /* Set the info.ingress_table and info.egress_table
466          * for UP translate table. Now just set it to 1:1 map by default
467          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
468          */
469 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
470         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
471         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
472         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
473         return 0;
474 }
475
476 static int
477 ice_init_mac_address(struct rte_eth_dev *dev)
478 {
479         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
480
481         if (!is_unicast_ether_addr
482                 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
483                 PMD_INIT_LOG(ERR, "Invalid MAC address");
484                 return -EINVAL;
485         }
486
487         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
488                         (struct ether_addr *)hw->port_info[0].mac.perm_addr);
489
490         dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
491         if (!dev->data->mac_addrs) {
492                 PMD_INIT_LOG(ERR,
493                              "Failed to allocate memory to store mac address");
494                 return -ENOMEM;
495         }
496         /* store it to dev data */
497         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
498                         &dev->data->mac_addrs[0]);
499         return 0;
500 }
501
502 /* Find out specific MAC filter */
503 static struct ice_mac_filter *
504 ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
505 {
506         struct ice_mac_filter *f;
507
508         TAILQ_FOREACH(f, &vsi->mac_list, next) {
509                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
510                         return f;
511         }
512
513         return NULL;
514 }
515
516 static int
517 ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
518 {
519         struct ice_fltr_list_entry *m_list_itr = NULL;
520         struct ice_mac_filter *f;
521         struct LIST_HEAD_TYPE list_head;
522         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
523         int ret = 0;
524
525         /* If it's added and configured, return */
526         f = ice_find_mac_filter(vsi, mac_addr);
527         if (f) {
528                 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
529                 return 0;
530         }
531
532         INIT_LIST_HEAD(&list_head);
533
534         m_list_itr = (struct ice_fltr_list_entry *)
535                 ice_malloc(hw, sizeof(*m_list_itr));
536         if (!m_list_itr) {
537                 ret = -ENOMEM;
538                 goto DONE;
539         }
540         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
541                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
542         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
543         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
544         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
545         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
546         m_list_itr->fltr_info.vsi_handle = vsi->idx;
547
548         LIST_ADD(&m_list_itr->list_entry, &list_head);
549
550         /* Add the mac */
551         ret = ice_add_mac(hw, &list_head);
552         if (ret != ICE_SUCCESS) {
553                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
554                 ret = -EINVAL;
555                 goto DONE;
556         }
557         /* Add the mac addr into mac list */
558         f = rte_zmalloc(NULL, sizeof(*f), 0);
559         if (!f) {
560                 PMD_DRV_LOG(ERR, "failed to allocate memory");
561                 ret = -ENOMEM;
562                 goto DONE;
563         }
564         rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
565         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
566         vsi->mac_num++;
567
568         ret = 0;
569
570 DONE:
571         rte_free(m_list_itr);
572         return ret;
573 }
574
575 static int
576 ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
577 {
578         struct ice_fltr_list_entry *m_list_itr = NULL;
579         struct ice_mac_filter *f;
580         struct LIST_HEAD_TYPE list_head;
581         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
582         int ret = 0;
583
584         /* Can't find it, return an error */
585         f = ice_find_mac_filter(vsi, mac_addr);
586         if (!f)
587                 return -EINVAL;
588
589         INIT_LIST_HEAD(&list_head);
590
591         m_list_itr = (struct ice_fltr_list_entry *)
592                 ice_malloc(hw, sizeof(*m_list_itr));
593         if (!m_list_itr) {
594                 ret = -ENOMEM;
595                 goto DONE;
596         }
597         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
598                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
599         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
600         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
601         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
602         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
603         m_list_itr->fltr_info.vsi_handle = vsi->idx;
604
605         LIST_ADD(&m_list_itr->list_entry, &list_head);
606
607         /* remove the mac filter */
608         ret = ice_remove_mac(hw, &list_head);
609         if (ret != ICE_SUCCESS) {
610                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
611                 ret = -EINVAL;
612                 goto DONE;
613         }
614
615         /* Remove the mac addr from mac list */
616         TAILQ_REMOVE(&vsi->mac_list, f, next);
617         rte_free(f);
618         vsi->mac_num--;
619
620         ret = 0;
621 DONE:
622         rte_free(m_list_itr);
623         return ret;
624 }
625
626 /* Find out specific VLAN filter */
627 static struct ice_vlan_filter *
628 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
629 {
630         struct ice_vlan_filter *f;
631
632         TAILQ_FOREACH(f, &vsi->vlan_list, next) {
633                 if (vlan_id == f->vlan_info.vlan_id)
634                         return f;
635         }
636
637         return NULL;
638 }
639
640 static int
641 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
642 {
643         struct ice_fltr_list_entry *v_list_itr = NULL;
644         struct ice_vlan_filter *f;
645         struct LIST_HEAD_TYPE list_head;
646         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
647         int ret = 0;
648
649         if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
650                 return -EINVAL;
651
652         /* If it's added and configured, return. */
653         f = ice_find_vlan_filter(vsi, vlan_id);
654         if (f) {
655                 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
656                 return 0;
657         }
658
659         if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
660                 return 0;
661
662         INIT_LIST_HEAD(&list_head);
663
664         v_list_itr = (struct ice_fltr_list_entry *)
665                       ice_malloc(hw, sizeof(*v_list_itr));
666         if (!v_list_itr) {
667                 ret = -ENOMEM;
668                 goto DONE;
669         }
670         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
671         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
672         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
673         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
674         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
675         v_list_itr->fltr_info.vsi_handle = vsi->idx;
676
677         LIST_ADD(&v_list_itr->list_entry, &list_head);
678
679         /* Add the vlan */
680         ret = ice_add_vlan(hw, &list_head);
681         if (ret != ICE_SUCCESS) {
682                 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
683                 ret = -EINVAL;
684                 goto DONE;
685         }
686
687         /* Add vlan into vlan list */
688         f = rte_zmalloc(NULL, sizeof(*f), 0);
689         if (!f) {
690                 PMD_DRV_LOG(ERR, "failed to allocate memory");
691                 ret = -ENOMEM;
692                 goto DONE;
693         }
694         f->vlan_info.vlan_id = vlan_id;
695         TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
696         vsi->vlan_num++;
697
698         ret = 0;
699
700 DONE:
701         rte_free(v_list_itr);
702         return ret;
703 }
704
705 static int
706 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
707 {
708         struct ice_fltr_list_entry *v_list_itr = NULL;
709         struct ice_vlan_filter *f;
710         struct LIST_HEAD_TYPE list_head;
711         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
712         int ret = 0;
713
714         /**
715          * Vlan 0 is the generic filter for untagged packets
716          * and can't be removed.
717          */
718         if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
719                 return -EINVAL;
720
721         /* Can't find it, return an error */
722         f = ice_find_vlan_filter(vsi, vlan_id);
723         if (!f)
724                 return -EINVAL;
725
726         INIT_LIST_HEAD(&list_head);
727
728         v_list_itr = (struct ice_fltr_list_entry *)
729                       ice_malloc(hw, sizeof(*v_list_itr));
730         if (!v_list_itr) {
731                 ret = -ENOMEM;
732                 goto DONE;
733         }
734
735         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
736         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
737         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
738         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
739         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
740         v_list_itr->fltr_info.vsi_handle = vsi->idx;
741
742         LIST_ADD(&v_list_itr->list_entry, &list_head);
743
744         /* remove the vlan filter */
745         ret = ice_remove_vlan(hw, &list_head);
746         if (ret != ICE_SUCCESS) {
747                 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
748                 ret = -EINVAL;
749                 goto DONE;
750         }
751
752         /* Remove the vlan id from vlan list */
753         TAILQ_REMOVE(&vsi->vlan_list, f, next);
754         rte_free(f);
755         vsi->vlan_num--;
756
757         ret = 0;
758 DONE:
759         rte_free(v_list_itr);
760         return ret;
761 }
762
763 static int
764 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
765 {
766         struct ice_mac_filter *m_f;
767         struct ice_vlan_filter *v_f;
768         int ret = 0;
769
770         if (!vsi || !vsi->mac_num)
771                 return -EINVAL;
772
773         TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
774                 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
775                 if (ret != ICE_SUCCESS) {
776                         ret = -EINVAL;
777                         goto DONE;
778                 }
779         }
780
781         if (vsi->vlan_num == 0)
782                 return 0;
783
784         TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
785                 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
786                 if (ret != ICE_SUCCESS) {
787                         ret = -EINVAL;
788                         goto DONE;
789                 }
790         }
791
792 DONE:
793         return ret;
794 }
795
796 static int
797 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
798 {
799         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
800         struct ice_vsi_ctx ctxt;
801         uint8_t qinq_flags;
802         int ret = 0;
803
804         /* Check if it has been already on or off */
805         if (vsi->info.valid_sections &
806                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
807                 if (on) {
808                         if ((vsi->info.outer_tag_flags &
809                              ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
810                             ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
811                                 return 0; /* already on */
812                 } else {
813                         if (!(vsi->info.outer_tag_flags &
814                               ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
815                                 return 0; /* already off */
816                 }
817         }
818
819         if (on)
820                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
821         else
822                 qinq_flags = 0;
823         /* clear global insertion and use per packet insertion */
824         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
825         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
826         vsi->info.outer_tag_flags |= qinq_flags;
827         /* use default vlan type 0x8100 */
828         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
829         vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
830                                      ICE_AQ_VSI_OUTER_TAG_TYPE_S;
831         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
832         ctxt.info.valid_sections =
833                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
834         ctxt.vsi_num = vsi->vsi_id;
835         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
836         if (ret) {
837                 PMD_DRV_LOG(INFO,
838                             "Update VSI failed to %s qinq stripping",
839                             on ? "enable" : "disable");
840                 return -EINVAL;
841         }
842
843         vsi->info.valid_sections |=
844                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
845
846         return ret;
847 }
848
849 static int
850 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
851 {
852         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
853         struct ice_vsi_ctx ctxt;
854         uint8_t qinq_flags;
855         int ret = 0;
856
857         /* Check if it has been already on or off */
858         if (vsi->info.valid_sections &
859                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
860                 if (on) {
861                         if ((vsi->info.outer_tag_flags &
862                              ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
863                             ICE_AQ_VSI_OUTER_TAG_COPY)
864                                 return 0; /* already on */
865                 } else {
866                         if ((vsi->info.outer_tag_flags &
867                              ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
868                             ICE_AQ_VSI_OUTER_TAG_NOTHING)
869                                 return 0; /* already off */
870                 }
871         }
872
873         if (on)
874                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
875         else
876                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
877         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
878         vsi->info.outer_tag_flags |= qinq_flags;
879         /* use default vlan type 0x8100 */
880         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
881         vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
882                                      ICE_AQ_VSI_OUTER_TAG_TYPE_S;
883         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
884         ctxt.info.valid_sections =
885                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
886         ctxt.vsi_num = vsi->vsi_id;
887         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
888         if (ret) {
889                 PMD_DRV_LOG(INFO,
890                             "Update VSI failed to %s qinq stripping",
891                             on ? "enable" : "disable");
892                 return -EINVAL;
893         }
894
895         vsi->info.valid_sections |=
896                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
897
898         return ret;
899 }
900
901 static int
902 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
903 {
904         int ret;
905
906         ret = ice_vsi_config_qinq_stripping(vsi, on);
907         if (ret)
908                 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
909
910         ret = ice_vsi_config_qinq_insertion(vsi, on);
911         if (ret)
912                 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
913
914         return ret;
915 }
916
917 /* Enable IRQ0 */
918 static void
919 ice_pf_enable_irq0(struct ice_hw *hw)
920 {
921         /* reset the registers */
922         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
923         ICE_READ_REG(hw, PFINT_OICR);
924
925 #ifdef ICE_LSE_SPT
926         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
927                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
928                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
929
930         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
931                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
932                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
933                        PFINT_OICR_CTL_ITR_INDX_M) |
934                       PFINT_OICR_CTL_CAUSE_ENA_M);
935
936         ICE_WRITE_REG(hw, PFINT_FW_CTL,
937                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
938                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
939                        PFINT_FW_CTL_ITR_INDX_M) |
940                       PFINT_FW_CTL_CAUSE_ENA_M);
941 #else
942         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
943 #endif
944
945         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
946                       GLINT_DYN_CTL_INTENA_M |
947                       GLINT_DYN_CTL_CLEARPBA_M |
948                       GLINT_DYN_CTL_ITR_INDX_M);
949
950         ice_flush(hw);
951 }
952
953 /* Disable IRQ0 */
954 static void
955 ice_pf_disable_irq0(struct ice_hw *hw)
956 {
957         /* Disable all interrupt types */
958         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
959         ice_flush(hw);
960 }
961
962 #ifdef ICE_LSE_SPT
963 static void
964 ice_handle_aq_msg(struct rte_eth_dev *dev)
965 {
966         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
967         struct ice_ctl_q_info *cq = &hw->adminq;
968         struct ice_rq_event_info event;
969         uint16_t pending, opcode;
970         int ret;
971
972         event.buf_len = ICE_AQ_MAX_BUF_LEN;
973         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
974         if (!event.msg_buf) {
975                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
976                 return;
977         }
978
979         pending = 1;
980         while (pending) {
981                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
982
983                 if (ret != ICE_SUCCESS) {
984                         PMD_DRV_LOG(INFO,
985                                     "Failed to read msg from AdminQ, "
986                                     "adminq_err: %u",
987                                     hw->adminq.sq_last_status);
988                         break;
989                 }
990                 opcode = rte_le_to_cpu_16(event.desc.opcode);
991
992                 switch (opcode) {
993                 case ice_aqc_opc_get_link_status:
994                         ret = ice_link_update(dev, 0);
995                         if (!ret)
996                                 _rte_eth_dev_callback_process
997                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
998                         break;
999                 default:
1000                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1001                                     opcode);
1002                         break;
1003                 }
1004         }
1005         rte_free(event.msg_buf);
1006 }
1007 #endif
1008
1009 /**
1010  * Interrupt handler triggered by NIC for handling
1011  * specific interrupt.
1012  *
1013  * @param handle
1014  *  Pointer to interrupt handle.
1015  * @param param
1016  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1017  *
1018  * @return
1019  *  void
1020  */
1021 static void
1022 ice_interrupt_handler(void *param)
1023 {
1024         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1025         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1026         uint32_t oicr;
1027         uint32_t reg;
1028         uint8_t pf_num;
1029         uint8_t event;
1030         uint16_t queue;
1031 #ifdef ICE_LSE_SPT
1032         uint32_t int_fw_ctl;
1033 #endif
1034
1035         /* Disable interrupt */
1036         ice_pf_disable_irq0(hw);
1037
1038         /* read out interrupt causes */
1039         oicr = ICE_READ_REG(hw, PFINT_OICR);
1040 #ifdef ICE_LSE_SPT
1041         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1042 #endif
1043
1044         /* No interrupt event indicated */
1045         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1046                 PMD_DRV_LOG(INFO, "No interrupt event");
1047                 goto done;
1048         }
1049
1050 #ifdef ICE_LSE_SPT
1051         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1052                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1053                 ice_handle_aq_msg(dev);
1054         }
1055 #else
1056         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1057                 PMD_DRV_LOG(INFO, "OICR: link state change event");
1058                 ice_link_update(dev, 0);
1059         }
1060 #endif
1061
1062         if (oicr & PFINT_OICR_MAL_DETECT_M) {
1063                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1064                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1065                 if (reg & GL_MDET_TX_PQM_VALID_M) {
1066                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1067                                  GL_MDET_TX_PQM_PF_NUM_S;
1068                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1069                                 GL_MDET_TX_PQM_MAL_TYPE_S;
1070                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1071                                 GL_MDET_TX_PQM_QNUM_S;
1072
1073                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1074                                     "%d by PQM on TX queue %d PF# %d",
1075                                     event, queue, pf_num);
1076                 }
1077
1078                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1079                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1080                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1081                                  GL_MDET_TX_TCLAN_PF_NUM_S;
1082                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1083                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1084                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1085                                 GL_MDET_TX_TCLAN_QNUM_S;
1086
1087                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1088                                     "%d by TCLAN on TX queue %d PF# %d",
1089                                     event, queue, pf_num);
1090                 }
1091         }
1092 done:
1093         /* Enable interrupt */
1094         ice_pf_enable_irq0(hw);
1095         rte_intr_enable(dev->intr_handle);
1096 }
1097
1098 /*  Initialize SW parameters of PF */
1099 static int
1100 ice_pf_sw_init(struct rte_eth_dev *dev)
1101 {
1102         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1103         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1104
1105         if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
1106                 pf->lan_nb_qp_max =
1107                         ice_config_max_queue_pair_num(dev->device->devargs);
1108         else
1109                 pf->lan_nb_qp_max =
1110                         (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1111                                           hw->func_caps.common_cap.num_rxq);
1112
1113         pf->lan_nb_qps = pf->lan_nb_qp_max;
1114
1115         return 0;
1116 }
1117
1118 static struct ice_vsi *
1119 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1120 {
1121         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1122         struct ice_vsi *vsi = NULL;
1123         struct ice_vsi_ctx vsi_ctx;
1124         int ret;
1125         struct ether_addr broadcast = {
1126                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1127         struct ether_addr mac_addr;
1128         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1129         uint8_t tc_bitmap = 0x1;
1130
1131         /* hw->num_lports = 1 in NIC mode */
1132         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1133         if (!vsi)
1134                 return NULL;
1135
1136         vsi->idx = pf->next_vsi_idx;
1137         pf->next_vsi_idx++;
1138         vsi->type = type;
1139         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1140         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1141         vsi->vlan_anti_spoof_on = 0;
1142         vsi->vlan_filter_on = 1;
1143         TAILQ_INIT(&vsi->mac_list);
1144         TAILQ_INIT(&vsi->vlan_list);
1145
1146         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1147         /* base_queue in used in queue mapping of VSI add/update command.
1148          * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1149          * cases in the first stage. Only Main VSI.
1150          */
1151         vsi->base_queue = 0;
1152         switch (type) {
1153         case ICE_VSI_PF:
1154                 vsi->nb_qps = pf->lan_nb_qps;
1155                 ice_vsi_config_default_rss(&vsi_ctx.info);
1156                 vsi_ctx.alloc_from_pool = true;
1157                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1158                 /* switch_id is queried by get_switch_config aq, which is done
1159                  * by ice_init_hw
1160                  */
1161                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1162                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1163                 /* Allow all untagged or tagged packets */
1164                 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1165                 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1166                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1167                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1168                 /* Enable VLAN/UP trip */
1169                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1170                                                       &vsi_ctx.info,
1171                                                       ICE_DEFAULT_TCMAP);
1172                 if (ret) {
1173                         PMD_INIT_LOG(ERR,
1174                                      "tc queue mapping with vsi failed, "
1175                                      "err = %d",
1176                                      ret);
1177                         goto fail_mem;
1178                 }
1179
1180                 break;
1181         default:
1182                 /* for other types of VSI */
1183                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1184                 goto fail_mem;
1185         }
1186
1187         /* VF has MSIX interrupt in VF range, don't allocate here */
1188         if (type == ICE_VSI_PF) {
1189                 ret = ice_res_pool_alloc(&pf->msix_pool,
1190                                          RTE_MIN(vsi->nb_qps,
1191                                                  RTE_MAX_RXTX_INTR_VEC_ID));
1192                 if (ret < 0) {
1193                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1194                                      vsi->vsi_id, ret);
1195                 }
1196                 vsi->msix_intr = ret;
1197                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1198         } else {
1199                 vsi->msix_intr = 0;
1200                 vsi->nb_msix = 0;
1201         }
1202         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1203         if (ret != ICE_SUCCESS) {
1204                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1205                 goto fail_mem;
1206         }
1207         /* store vsi information is SW structure */
1208         vsi->vsi_id = vsi_ctx.vsi_num;
1209         vsi->info = vsi_ctx.info;
1210         pf->vsis_allocated = vsi_ctx.vsis_allocd;
1211         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1212
1213         /* MAC configuration */
1214         rte_memcpy(pf->dev_addr.addr_bytes,
1215                    hw->port_info->mac.perm_addr,
1216                    ETH_ADDR_LEN);
1217
1218         rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
1219         ret = ice_add_mac_filter(vsi, &mac_addr);
1220         if (ret != ICE_SUCCESS)
1221                 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1222
1223         rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
1224         ret = ice_add_mac_filter(vsi, &mac_addr);
1225         if (ret != ICE_SUCCESS)
1226                 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1227
1228         /* At the beginning, only TC0. */
1229         /* What we need here is the maximam number of the TX queues.
1230          * Currently vsi->nb_qps means it.
1231          * Correct it if any change.
1232          */
1233         max_txqs[0] = vsi->nb_qps;
1234         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1235                               tc_bitmap, max_txqs);
1236         if (ret != ICE_SUCCESS)
1237                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1238
1239         return vsi;
1240 fail_mem:
1241         rte_free(vsi);
1242         pf->next_vsi_idx--;
1243         return NULL;
1244 }
1245
1246 static int
1247 ice_pf_setup(struct ice_pf *pf)
1248 {
1249         struct ice_vsi *vsi;
1250
1251         /* Clear all stats counters */
1252         pf->offset_loaded = FALSE;
1253         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1254         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1255         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1256         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1257
1258         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1259         if (!vsi) {
1260                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1261                 return -EINVAL;
1262         }
1263
1264         pf->main_vsi = vsi;
1265
1266         return 0;
1267 }
1268
1269 static int ice_load_pkg(struct rte_eth_dev *dev)
1270 {
1271         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1272         const char *pkg_file = ICE_DFLT_PKG_FILE;
1273         int err;
1274         uint8_t *buf;
1275         int buf_len;
1276         FILE *file;
1277         struct stat fstat;
1278
1279         file = fopen(pkg_file, "rb");
1280         if (!file)  {
1281                 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1282                 return -1;
1283         }
1284
1285         err = stat(pkg_file, &fstat);
1286         if (err) {
1287                 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1288                 fclose(file);
1289                 return err;
1290         }
1291
1292         buf_len = fstat.st_size;
1293         buf = rte_malloc(NULL, buf_len, 0);
1294
1295         if (!buf) {
1296                 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1297                                 buf_len);
1298                 fclose(file);
1299                 return -1;
1300         }
1301
1302         err = fread(buf, buf_len, 1, file);
1303         if (err != 1) {
1304                 PMD_INIT_LOG(ERR, "failed to read package data\n");
1305                 fclose(file);
1306                 err = -1;
1307                 goto fail_exit;
1308         }
1309
1310         fclose(file);
1311
1312         err = ice_copy_and_init_pkg(hw, buf, buf_len);
1313         if (err) {
1314                 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1315                 goto fail_exit;
1316         }
1317         err = ice_init_hw_tbls(hw);
1318         if (err) {
1319                 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1320                 goto fail_init_tbls;
1321         }
1322
1323         return 0;
1324
1325 fail_init_tbls:
1326         rte_free(hw->pkg_copy);
1327 fail_exit:
1328         rte_free(buf);
1329         return err;
1330 }
1331
1332 static int
1333 ice_dev_init(struct rte_eth_dev *dev)
1334 {
1335         struct rte_pci_device *pci_dev;
1336         struct rte_intr_handle *intr_handle;
1337         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1338         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1339         struct ice_adapter *ad =
1340                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1341         struct ice_vsi *vsi;
1342         int ret;
1343
1344         dev->dev_ops = &ice_eth_dev_ops;
1345         dev->rx_pkt_burst = ice_recv_pkts;
1346         dev->tx_pkt_burst = ice_xmit_pkts;
1347         dev->tx_pkt_prepare = ice_prep_pkts;
1348
1349         ice_set_default_ptype_table(dev);
1350         pci_dev = RTE_DEV_TO_PCI(dev->device);
1351         intr_handle = &pci_dev->intr_handle;
1352
1353         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1354         pf->adapter->eth_dev = dev;
1355         pf->dev_data = dev->data;
1356         hw->back = pf->adapter;
1357         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1358         hw->vendor_id = pci_dev->id.vendor_id;
1359         hw->device_id = pci_dev->id.device_id;
1360         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1361         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1362         hw->bus.device = pci_dev->addr.devid;
1363         hw->bus.func = pci_dev->addr.function;
1364
1365         ice_init_controlq_parameter(hw);
1366
1367         ret = ice_init_hw(hw);
1368         if (ret) {
1369                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1370                 return -EINVAL;
1371         }
1372
1373         ret = ice_load_pkg(dev);
1374         if (ret) {
1375                 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
1376                                 "Entering Safe Mode");
1377                 ad->is_safe_mode = 1;
1378         }
1379
1380         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1381                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1382                      hw->api_maj_ver, hw->api_min_ver);
1383
1384         ice_pf_sw_init(dev);
1385         ret = ice_init_mac_address(dev);
1386         if (ret) {
1387                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1388                 goto err_init_mac;
1389         }
1390
1391         ret = ice_res_pool_init(&pf->msix_pool, 1,
1392                                 hw->func_caps.common_cap.num_msix_vectors - 1);
1393         if (ret) {
1394                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1395                 goto err_msix_pool_init;
1396         }
1397
1398         ret = ice_pf_setup(pf);
1399         if (ret) {
1400                 PMD_INIT_LOG(ERR, "Failed to setup PF");
1401                 goto err_pf_setup;
1402         }
1403
1404         vsi = pf->main_vsi;
1405
1406         /* Disable double vlan by default */
1407         ice_vsi_config_double_vlan(vsi, FALSE);
1408
1409         /* register callback func to eal lib */
1410         rte_intr_callback_register(intr_handle,
1411                                    ice_interrupt_handler, dev);
1412
1413         ice_pf_enable_irq0(hw);
1414
1415         /* enable uio intr after callback register */
1416         rte_intr_enable(intr_handle);
1417
1418         return 0;
1419
1420 err_pf_setup:
1421         ice_res_pool_destroy(&pf->msix_pool);
1422 err_msix_pool_init:
1423         rte_free(dev->data->mac_addrs);
1424 err_init_mac:
1425         ice_sched_cleanup_all(hw);
1426         rte_free(hw->port_info);
1427         ice_shutdown_all_ctrlq(hw);
1428
1429         return ret;
1430 }
1431
1432 static int
1433 ice_release_vsi(struct ice_vsi *vsi)
1434 {
1435         struct ice_hw *hw;
1436         struct ice_vsi_ctx vsi_ctx;
1437         enum ice_status ret;
1438
1439         if (!vsi)
1440                 return 0;
1441
1442         hw = ICE_VSI_TO_HW(vsi);
1443
1444         ice_remove_all_mac_vlan_filters(vsi);
1445
1446         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1447
1448         vsi_ctx.vsi_num = vsi->vsi_id;
1449         vsi_ctx.info = vsi->info;
1450         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1451         if (ret != ICE_SUCCESS) {
1452                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1453                 rte_free(vsi);
1454                 return -1;
1455         }
1456
1457         rte_free(vsi);
1458         return 0;
1459 }
1460
1461 static void
1462 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
1463 {
1464         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1465         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1466         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1467         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1468         uint16_t msix_intr, i;
1469
1470         /* disable interrupt and also clear all the exist config */
1471         for (i = 0; i < vsi->nb_qps; i++) {
1472                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1473                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1474                 rte_wmb();
1475         }
1476
1477         if (rte_intr_allow_others(intr_handle))
1478                 /* vfio-pci */
1479                 for (i = 0; i < vsi->nb_msix; i++) {
1480                         msix_intr = vsi->msix_intr + i;
1481                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1482                                       GLINT_DYN_CTL_WB_ON_ITR_M);
1483                 }
1484         else
1485                 /* igb_uio */
1486                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1487 }
1488
1489 static void
1490 ice_dev_stop(struct rte_eth_dev *dev)
1491 {
1492         struct rte_eth_dev_data *data = dev->data;
1493         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1494         struct ice_vsi *main_vsi = pf->main_vsi;
1495         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1496         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1497         uint16_t i;
1498
1499         /* avoid stopping again */
1500         if (pf->adapter_stopped)
1501                 return;
1502
1503         /* stop and clear all Rx queues */
1504         for (i = 0; i < data->nb_rx_queues; i++)
1505                 ice_rx_queue_stop(dev, i);
1506
1507         /* stop and clear all Tx queues */
1508         for (i = 0; i < data->nb_tx_queues; i++)
1509                 ice_tx_queue_stop(dev, i);
1510
1511         /* disable all queue interrupts */
1512         ice_vsi_disable_queues_intr(main_vsi);
1513
1514         /* Clear all queues and release mbufs */
1515         ice_clear_queues(dev);
1516
1517         /* Clean datapath event and queue/vec mapping */
1518         rte_intr_efd_disable(intr_handle);
1519         if (intr_handle->intr_vec) {
1520                 rte_free(intr_handle->intr_vec);
1521                 intr_handle->intr_vec = NULL;
1522         }
1523
1524         pf->adapter_stopped = true;
1525 }
1526
1527 static void
1528 ice_dev_close(struct rte_eth_dev *dev)
1529 {
1530         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1531         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1532
1533         ice_dev_stop(dev);
1534
1535         /* release all queue resource */
1536         ice_free_queues(dev);
1537
1538         ice_res_pool_destroy(&pf->msix_pool);
1539         ice_release_vsi(pf->main_vsi);
1540         ice_sched_cleanup_all(hw);
1541         rte_free(hw->port_info);
1542         ice_shutdown_all_ctrlq(hw);
1543 }
1544
1545 static int
1546 ice_dev_uninit(struct rte_eth_dev *dev)
1547 {
1548         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1549         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1550
1551         ice_dev_close(dev);
1552
1553         dev->dev_ops = NULL;
1554         dev->rx_pkt_burst = NULL;
1555         dev->tx_pkt_burst = NULL;
1556
1557         rte_free(dev->data->mac_addrs);
1558         dev->data->mac_addrs = NULL;
1559
1560         /* disable uio intr before callback unregister */
1561         rte_intr_disable(intr_handle);
1562
1563         /* register callback func to eal lib */
1564         rte_intr_callback_unregister(intr_handle,
1565                                      ice_interrupt_handler, dev);
1566
1567         return 0;
1568 }
1569
1570 static int
1571 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1572 {
1573         struct ice_adapter *ad =
1574                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1575
1576         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1577          * bulk allocation or vector Rx preconditions we will reset it.
1578          */
1579         ad->rx_bulk_alloc_allowed = true;
1580         ad->tx_simple_allowed = true;
1581
1582         return 0;
1583 }
1584
1585 static int ice_init_rss(struct ice_pf *pf)
1586 {
1587         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1588         struct ice_vsi *vsi = pf->main_vsi;
1589         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1590         struct rte_eth_rss_conf *rss_conf;
1591         struct ice_aqc_get_set_rss_keys key;
1592         uint16_t i, nb_q;
1593         int ret = 0;
1594         bool is_safe_mode = pf->adapter->is_safe_mode;
1595
1596         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1597         nb_q = dev->data->nb_rx_queues;
1598         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1599         vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
1600
1601         if (is_safe_mode) {
1602                 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
1603                 return 0;
1604         }
1605
1606         if (!vsi->rss_key)
1607                 vsi->rss_key = rte_zmalloc(NULL,
1608                                            vsi->rss_key_size, 0);
1609         if (!vsi->rss_lut)
1610                 vsi->rss_lut = rte_zmalloc(NULL,
1611                                            vsi->rss_lut_size, 0);
1612
1613         /* configure RSS key */
1614         if (!rss_conf->rss_key) {
1615                 /* Calculate the default hash key */
1616                 for (i = 0; i <= vsi->rss_key_size; i++)
1617                         vsi->rss_key[i] = (uint8_t)rte_rand();
1618         } else {
1619                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1620                            RTE_MIN(rss_conf->rss_key_len,
1621                                    vsi->rss_key_size));
1622         }
1623         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1624         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1625         if (ret)
1626                 return -EINVAL;
1627
1628         /* init RSS LUT table */
1629         for (i = 0; i < vsi->rss_lut_size; i++)
1630                 vsi->rss_lut[i] = i % nb_q;
1631
1632         ret = ice_aq_set_rss_lut(hw, vsi->idx,
1633                                  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1634                                  vsi->rss_lut, vsi->rss_lut_size);
1635         if (ret)
1636                 return -EINVAL;
1637
1638         /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1639         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
1640                               ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
1641         if (ret)
1642                 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret);
1643
1644         /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1645         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
1646                               ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
1647         if (ret)
1648                 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret);
1649
1650         /* configure RSS for sctp6 with input set IPv6 src/dst */
1651         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
1652                               ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
1653         if (ret)
1654                 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
1655                                 __func__, ret);
1656
1657         /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1658         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
1659                               ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
1660         if (ret)
1661                 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret);
1662
1663         /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1664         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
1665                               ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
1666         if (ret)
1667                 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret);
1668
1669         /* configure RSS for sctp4 with input set IP src/dst */
1670         ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
1671                               ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
1672         if (ret)
1673                 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
1674                                 __func__, ret);
1675
1676         return 0;
1677 }
1678
1679 static void
1680 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
1681                        int base_queue, int nb_queue)
1682 {
1683         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1684         uint32_t val, val_tx;
1685         int i;
1686
1687         for (i = 0; i < nb_queue; i++) {
1688                 /*do actual bind*/
1689                 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
1690                       (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
1691                 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
1692                          (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
1693
1694                 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
1695                             base_queue + i, msix_vect);
1696                 /* set ITR0 value */
1697                 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
1698                 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
1699                 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
1700         }
1701 }
1702
1703 static void
1704 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
1705 {
1706         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1707         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1708         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1709         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1710         uint16_t msix_vect = vsi->msix_intr;
1711         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1712         uint16_t queue_idx = 0;
1713         int record = 0;
1714         int i;
1715
1716         /* clear Rx/Tx queue interrupt */
1717         for (i = 0; i < vsi->nb_used_qps; i++) {
1718                 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1719                 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1720         }
1721
1722         /* PF bind interrupt */
1723         if (rte_intr_dp_is_en(intr_handle)) {
1724                 queue_idx = 0;
1725                 record = 1;
1726         }
1727
1728         for (i = 0; i < vsi->nb_used_qps; i++) {
1729                 if (nb_msix <= 1) {
1730                         if (!rte_intr_allow_others(intr_handle))
1731                                 msix_vect = ICE_MISC_VEC_ID;
1732
1733                         /* uio mapping all queue to one msix_vect */
1734                         __vsi_queues_bind_intr(vsi, msix_vect,
1735                                                vsi->base_queue + i,
1736                                                vsi->nb_used_qps - i);
1737
1738                         for (; !!record && i < vsi->nb_used_qps; i++)
1739                                 intr_handle->intr_vec[queue_idx + i] =
1740                                         msix_vect;
1741                         break;
1742                 }
1743
1744                 /* vfio 1:1 queue/msix_vect mapping */
1745                 __vsi_queues_bind_intr(vsi, msix_vect,
1746                                        vsi->base_queue + i, 1);
1747
1748                 if (!!record)
1749                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1750
1751                 msix_vect++;
1752                 nb_msix--;
1753         }
1754 }
1755
1756 static void
1757 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
1758 {
1759         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1760         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1761         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1762         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1763         uint16_t msix_intr, i;
1764
1765         if (rte_intr_allow_others(intr_handle))
1766                 for (i = 0; i < vsi->nb_used_qps; i++) {
1767                         msix_intr = vsi->msix_intr + i;
1768                         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1769                                       GLINT_DYN_CTL_INTENA_M |
1770                                       GLINT_DYN_CTL_CLEARPBA_M |
1771                                       GLINT_DYN_CTL_ITR_INDX_M |
1772                                       GLINT_DYN_CTL_WB_ON_ITR_M);
1773                 }
1774         else
1775                 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1776                               GLINT_DYN_CTL_INTENA_M |
1777                               GLINT_DYN_CTL_CLEARPBA_M |
1778                               GLINT_DYN_CTL_ITR_INDX_M |
1779                               GLINT_DYN_CTL_WB_ON_ITR_M);
1780 }
1781
1782 static int
1783 ice_rxq_intr_setup(struct rte_eth_dev *dev)
1784 {
1785         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1786         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1787         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1788         struct ice_vsi *vsi = pf->main_vsi;
1789         uint32_t intr_vector = 0;
1790
1791         rte_intr_disable(intr_handle);
1792
1793         /* check and configure queue intr-vector mapping */
1794         if ((rte_intr_cap_multiple(intr_handle) ||
1795              !RTE_ETH_DEV_SRIOV(dev).active) &&
1796             dev->data->dev_conf.intr_conf.rxq != 0) {
1797                 intr_vector = dev->data->nb_rx_queues;
1798                 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
1799                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
1800                                     ICE_MAX_INTR_QUEUE_NUM);
1801                         return -ENOTSUP;
1802                 }
1803                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1804                         return -1;
1805         }
1806
1807         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1808                 intr_handle->intr_vec =
1809                 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
1810                             0);
1811                 if (!intr_handle->intr_vec) {
1812                         PMD_DRV_LOG(ERR,
1813                                     "Failed to allocate %d rx_queues intr_vec",
1814                                     dev->data->nb_rx_queues);
1815                         return -ENOMEM;
1816                 }
1817         }
1818
1819         /* Map queues with MSIX interrupt */
1820         vsi->nb_used_qps = dev->data->nb_rx_queues;
1821         ice_vsi_queues_bind_intr(vsi);
1822
1823         /* Enable interrupts for all the queues */
1824         ice_vsi_enable_queues_intr(vsi);
1825
1826         rte_intr_enable(intr_handle);
1827
1828         return 0;
1829 }
1830
1831 static int
1832 ice_dev_start(struct rte_eth_dev *dev)
1833 {
1834         struct rte_eth_dev_data *data = dev->data;
1835         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1836         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1837         struct ice_vsi *vsi = pf->main_vsi;
1838         uint16_t nb_rxq = 0;
1839         uint16_t nb_txq, i;
1840         int mask, ret;
1841
1842         /* program Tx queues' context in hardware */
1843         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1844                 ret = ice_tx_queue_start(dev, nb_txq);
1845                 if (ret) {
1846                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1847                         goto tx_err;
1848                 }
1849         }
1850
1851         /* program Rx queues' context in hardware*/
1852         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1853                 ret = ice_rx_queue_start(dev, nb_rxq);
1854                 if (ret) {
1855                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1856                         goto rx_err;
1857                 }
1858         }
1859
1860         ret = ice_init_rss(pf);
1861         if (ret) {
1862                 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1863                 goto rx_err;
1864         }
1865
1866         ice_set_rx_function(dev);
1867         ice_set_tx_function(dev);
1868
1869         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1870                         ETH_VLAN_EXTEND_MASK;
1871         ret = ice_vlan_offload_set(dev, mask);
1872         if (ret) {
1873                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1874                 goto rx_err;
1875         }
1876
1877         /* enable Rx interrput and mapping Rx queue to interrupt vector */
1878         if (ice_rxq_intr_setup(dev))
1879                 return -EIO;
1880
1881         /* Enable receiving broadcast packets and transmitting packets */
1882         ret = ice_set_vsi_promisc(hw, vsi->idx,
1883                                   ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
1884                                   ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
1885                                   0);
1886         if (ret != ICE_SUCCESS)
1887                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1888
1889         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1890                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1891                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1892                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1893                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1894                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
1895                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1896                                      NULL);
1897         if (ret != ICE_SUCCESS)
1898                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1899
1900         /* Call get_link_info aq commond to enable/disable LSE */
1901         ice_link_update(dev, 0);
1902
1903         pf->adapter_stopped = false;
1904
1905         return 0;
1906
1907         /* stop the started queues if failed to start all queues */
1908 rx_err:
1909         for (i = 0; i < nb_rxq; i++)
1910                 ice_rx_queue_stop(dev, i);
1911 tx_err:
1912         for (i = 0; i < nb_txq; i++)
1913                 ice_tx_queue_stop(dev, i);
1914
1915         return -EIO;
1916 }
1917
1918 static int
1919 ice_dev_reset(struct rte_eth_dev *dev)
1920 {
1921         int ret;
1922
1923         if (dev->data->sriov.active)
1924                 return -ENOTSUP;
1925
1926         ret = ice_dev_uninit(dev);
1927         if (ret) {
1928                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1929                 return -ENXIO;
1930         }
1931
1932         ret = ice_dev_init(dev);
1933         if (ret) {
1934                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1935                 return -ENXIO;
1936         }
1937
1938         return 0;
1939 }
1940
1941 static void
1942 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1943 {
1944         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1945         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1946         struct ice_vsi *vsi = pf->main_vsi;
1947         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1948         bool is_safe_mode = pf->adapter->is_safe_mode;
1949
1950         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1951         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1952         dev_info->max_rx_queues = vsi->nb_qps;
1953         dev_info->max_tx_queues = vsi->nb_qps;
1954         dev_info->max_mac_addrs = vsi->max_macaddrs;
1955         dev_info->max_vfs = pci_dev->max_vfs;
1956
1957         dev_info->rx_offload_capa =
1958                 DEV_RX_OFFLOAD_VLAN_STRIP |
1959                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1960                 DEV_RX_OFFLOAD_KEEP_CRC |
1961                 DEV_RX_OFFLOAD_SCATTER |
1962                 DEV_RX_OFFLOAD_VLAN_FILTER;
1963         dev_info->tx_offload_capa =
1964                 DEV_TX_OFFLOAD_VLAN_INSERT |
1965                 DEV_TX_OFFLOAD_TCP_TSO |
1966                 DEV_TX_OFFLOAD_MULTI_SEGS |
1967                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1968         dev_info->flow_type_rss_offloads = 0;
1969
1970         if (!is_safe_mode) {
1971                 dev_info->rx_offload_capa |=
1972                         DEV_RX_OFFLOAD_IPV4_CKSUM |
1973                         DEV_RX_OFFLOAD_UDP_CKSUM |
1974                         DEV_RX_OFFLOAD_TCP_CKSUM |
1975                         DEV_RX_OFFLOAD_QINQ_STRIP |
1976                         DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1977                         DEV_RX_OFFLOAD_VLAN_EXTEND;
1978                 dev_info->tx_offload_capa |=
1979                         DEV_TX_OFFLOAD_QINQ_INSERT |
1980                         DEV_TX_OFFLOAD_IPV4_CKSUM |
1981                         DEV_TX_OFFLOAD_UDP_CKSUM |
1982                         DEV_TX_OFFLOAD_TCP_CKSUM |
1983                         DEV_TX_OFFLOAD_SCTP_CKSUM |
1984                         DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
1985                 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
1986         }
1987
1988         dev_info->rx_queue_offload_capa = 0;
1989         dev_info->tx_queue_offload_capa = 0;
1990
1991         dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1992         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1993
1994         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1995                 .rx_thresh = {
1996                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
1997                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
1998                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
1999                 },
2000                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
2001                 .rx_drop_en = 0,
2002                 .offloads = 0,
2003         };
2004
2005         dev_info->default_txconf = (struct rte_eth_txconf) {
2006                 .tx_thresh = {
2007                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
2008                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
2009                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
2010                 },
2011                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
2012                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
2013                 .offloads = 0,
2014         };
2015
2016         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2017                 .nb_max = ICE_MAX_RING_DESC,
2018                 .nb_min = ICE_MIN_RING_DESC,
2019                 .nb_align = ICE_ALIGN_RING_DESC,
2020         };
2021
2022         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2023                 .nb_max = ICE_MAX_RING_DESC,
2024                 .nb_min = ICE_MIN_RING_DESC,
2025                 .nb_align = ICE_ALIGN_RING_DESC,
2026         };
2027
2028         dev_info->speed_capa = ETH_LINK_SPEED_10M |
2029                                ETH_LINK_SPEED_100M |
2030                                ETH_LINK_SPEED_1G |
2031                                ETH_LINK_SPEED_2_5G |
2032                                ETH_LINK_SPEED_5G |
2033                                ETH_LINK_SPEED_10G |
2034                                ETH_LINK_SPEED_20G |
2035                                ETH_LINK_SPEED_25G |
2036                                ETH_LINK_SPEED_40G |
2037                                ETH_LINK_SPEED_50G |
2038                                ETH_LINK_SPEED_100G;
2039
2040         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2041         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2042
2043         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
2044         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
2045         dev_info->default_rxportconf.nb_queues = 1;
2046         dev_info->default_txportconf.nb_queues = 1;
2047         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
2048         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
2049 }
2050
2051 static inline int
2052 ice_atomic_read_link_status(struct rte_eth_dev *dev,
2053                             struct rte_eth_link *link)
2054 {
2055         struct rte_eth_link *dst = link;
2056         struct rte_eth_link *src = &dev->data->dev_link;
2057
2058         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2059                                 *(uint64_t *)src) == 0)
2060                 return -1;
2061
2062         return 0;
2063 }
2064
2065 static inline int
2066 ice_atomic_write_link_status(struct rte_eth_dev *dev,
2067                              struct rte_eth_link *link)
2068 {
2069         struct rte_eth_link *dst = &dev->data->dev_link;
2070         struct rte_eth_link *src = link;
2071
2072         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2073                                 *(uint64_t *)src) == 0)
2074                 return -1;
2075
2076         return 0;
2077 }
2078
2079 static int
2080 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
2081 {
2082 #define CHECK_INTERVAL 100  /* 100ms */
2083 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2084         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2085         struct ice_link_status link_status;
2086         struct rte_eth_link link, old;
2087         int status;
2088         unsigned int rep_cnt = MAX_REPEAT_TIME;
2089         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2090
2091         memset(&link, 0, sizeof(link));
2092         memset(&old, 0, sizeof(old));
2093         memset(&link_status, 0, sizeof(link_status));
2094         ice_atomic_read_link_status(dev, &old);
2095
2096         do {
2097                 /* Get link status information from hardware */
2098                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
2099                                               &link_status, NULL);
2100                 if (status != ICE_SUCCESS) {
2101                         link.link_speed = ETH_SPEED_NUM_100M;
2102                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2103                         PMD_DRV_LOG(ERR, "Failed to get link info");
2104                         goto out;
2105                 }
2106
2107                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
2108                 if (!wait_to_complete || link.link_status)
2109                         break;
2110
2111                 rte_delay_ms(CHECK_INTERVAL);
2112         } while (--rep_cnt);
2113
2114         if (!link.link_status)
2115                 goto out;
2116
2117         /* Full-duplex operation at all supported speeds */
2118         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2119
2120         /* Parse the link status */
2121         switch (link_status.link_speed) {
2122         case ICE_AQ_LINK_SPEED_10MB:
2123                 link.link_speed = ETH_SPEED_NUM_10M;
2124                 break;
2125         case ICE_AQ_LINK_SPEED_100MB:
2126                 link.link_speed = ETH_SPEED_NUM_100M;
2127                 break;
2128         case ICE_AQ_LINK_SPEED_1000MB:
2129                 link.link_speed = ETH_SPEED_NUM_1G;
2130                 break;
2131         case ICE_AQ_LINK_SPEED_2500MB:
2132                 link.link_speed = ETH_SPEED_NUM_2_5G;
2133                 break;
2134         case ICE_AQ_LINK_SPEED_5GB:
2135                 link.link_speed = ETH_SPEED_NUM_5G;
2136                 break;
2137         case ICE_AQ_LINK_SPEED_10GB:
2138                 link.link_speed = ETH_SPEED_NUM_10G;
2139                 break;
2140         case ICE_AQ_LINK_SPEED_20GB:
2141                 link.link_speed = ETH_SPEED_NUM_20G;
2142                 break;
2143         case ICE_AQ_LINK_SPEED_25GB:
2144                 link.link_speed = ETH_SPEED_NUM_25G;
2145                 break;
2146         case ICE_AQ_LINK_SPEED_40GB:
2147                 link.link_speed = ETH_SPEED_NUM_40G;
2148                 break;
2149         case ICE_AQ_LINK_SPEED_50GB:
2150                 link.link_speed = ETH_SPEED_NUM_50G;
2151                 break;
2152         case ICE_AQ_LINK_SPEED_100GB:
2153                 link.link_speed = ETH_SPEED_NUM_100G;
2154                 break;
2155         case ICE_AQ_LINK_SPEED_UNKNOWN:
2156         default:
2157                 PMD_DRV_LOG(ERR, "Unknown link speed");
2158                 link.link_speed = ETH_SPEED_NUM_NONE;
2159                 break;
2160         }
2161
2162         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2163                               ETH_LINK_SPEED_FIXED);
2164
2165 out:
2166         ice_atomic_write_link_status(dev, &link);
2167         if (link.link_status == old.link_status)
2168                 return -1;
2169
2170         return 0;
2171 }
2172
2173 static int
2174 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2175 {
2176         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2177         struct rte_eth_dev_data *dev_data = pf->dev_data;
2178         uint32_t frame_size = mtu + ETHER_HDR_LEN
2179                               + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE;
2180
2181         /* check if mtu is within the allowed range */
2182         if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
2183                 return -EINVAL;
2184
2185         /* mtu setting is forbidden if port is start */
2186         if (dev_data->dev_started) {
2187                 PMD_DRV_LOG(ERR,
2188                             "port %d must be stopped before configuration",
2189                             dev_data->port_id);
2190                 return -EBUSY;
2191         }
2192
2193         if (frame_size > ETHER_MAX_LEN)
2194                 dev_data->dev_conf.rxmode.offloads |=
2195                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2196         else
2197                 dev_data->dev_conf.rxmode.offloads &=
2198                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2199
2200         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2201
2202         return 0;
2203 }
2204
2205 static int ice_macaddr_set(struct rte_eth_dev *dev,
2206                            struct ether_addr *mac_addr)
2207 {
2208         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2209         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2210         struct ice_vsi *vsi = pf->main_vsi;
2211         struct ice_mac_filter *f;
2212         uint8_t flags = 0;
2213         int ret;
2214
2215         if (!is_valid_assigned_ether_addr(mac_addr)) {
2216                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2217                 return -EINVAL;
2218         }
2219
2220         TAILQ_FOREACH(f, &vsi->mac_list, next) {
2221                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
2222                         break;
2223         }
2224
2225         if (!f) {
2226                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
2227                 return -EIO;
2228         }
2229
2230         ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
2231         if (ret != ICE_SUCCESS) {
2232                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
2233                 return -EIO;
2234         }
2235         ret = ice_add_mac_filter(vsi, mac_addr);
2236         if (ret != ICE_SUCCESS) {
2237                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
2238                 return -EIO;
2239         }
2240         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
2241
2242         flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2243         ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
2244         if (ret != ICE_SUCCESS)
2245                 PMD_DRV_LOG(ERR, "Failed to set manage mac");
2246
2247         return 0;
2248 }
2249
2250 /* Add a MAC address, and update filters */
2251 static int
2252 ice_macaddr_add(struct rte_eth_dev *dev,
2253                 struct ether_addr *mac_addr,
2254                 __rte_unused uint32_t index,
2255                 __rte_unused uint32_t pool)
2256 {
2257         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2258         struct ice_vsi *vsi = pf->main_vsi;
2259         int ret;
2260
2261         ret = ice_add_mac_filter(vsi, mac_addr);
2262         if (ret != ICE_SUCCESS) {
2263                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
2264                 return -EINVAL;
2265         }
2266
2267         return ICE_SUCCESS;
2268 }
2269
2270 /* Remove a MAC address, and update filters */
2271 static void
2272 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2273 {
2274         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2275         struct ice_vsi *vsi = pf->main_vsi;
2276         struct rte_eth_dev_data *data = dev->data;
2277         struct ether_addr *macaddr;
2278         int ret;
2279
2280         macaddr = &data->mac_addrs[index];
2281         ret = ice_remove_mac_filter(vsi, macaddr);
2282         if (ret) {
2283                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
2284                 return;
2285         }
2286 }
2287
2288 static int
2289 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2290 {
2291         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2292         struct ice_vsi *vsi = pf->main_vsi;
2293         int ret;
2294
2295         PMD_INIT_FUNC_TRACE();
2296
2297         if (on) {
2298                 ret = ice_add_vlan_filter(vsi, vlan_id);
2299                 if (ret < 0) {
2300                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
2301                         return -EINVAL;
2302                 }
2303         } else {
2304                 ret = ice_remove_vlan_filter(vsi, vlan_id);
2305                 if (ret < 0) {
2306                         PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
2307                         return -EINVAL;
2308                 }
2309         }
2310
2311         return 0;
2312 }
2313
2314 /* Configure vlan filter on or off */
2315 static int
2316 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
2317 {
2318         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2319         struct ice_vsi_ctx ctxt;
2320         uint8_t sec_flags, sw_flags2;
2321         int ret = 0;
2322
2323         sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2324                     ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2325         sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2326
2327         if (on) {
2328                 vsi->info.sec_flags |= sec_flags;
2329                 vsi->info.sw_flags2 |= sw_flags2;
2330         } else {
2331                 vsi->info.sec_flags &= ~sec_flags;
2332                 vsi->info.sw_flags2 &= ~sw_flags2;
2333         }
2334         vsi->info.sw_id = hw->port_info->sw_id;
2335         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2336         ctxt.info.valid_sections =
2337                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2338                                  ICE_AQ_VSI_PROP_SECURITY_VALID);
2339         ctxt.vsi_num = vsi->vsi_id;
2340
2341         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2342         if (ret) {
2343                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
2344                             on ? "enable" : "disable");
2345                 return -EINVAL;
2346         } else {
2347                 vsi->info.valid_sections |=
2348                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2349                                          ICE_AQ_VSI_PROP_SECURITY_VALID);
2350         }
2351
2352         /* consist with other drivers, allow untagged packet when vlan filter on */
2353         if (on)
2354                 ret = ice_add_vlan_filter(vsi, 0);
2355         else
2356                 ret = ice_remove_vlan_filter(vsi, 0);
2357
2358         return 0;
2359 }
2360
2361 static int
2362 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
2363 {
2364         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2365         struct ice_vsi_ctx ctxt;
2366         uint8_t vlan_flags;
2367         int ret = 0;
2368
2369         /* Check if it has been already on or off */
2370         if (vsi->info.valid_sections &
2371                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
2372                 if (on) {
2373                         if ((vsi->info.vlan_flags &
2374                              ICE_AQ_VSI_VLAN_EMOD_M) ==
2375                             ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
2376                                 return 0; /* already on */
2377                 } else {
2378                         if ((vsi->info.vlan_flags &
2379                              ICE_AQ_VSI_VLAN_EMOD_M) ==
2380                             ICE_AQ_VSI_VLAN_EMOD_NOTHING)
2381                                 return 0; /* already off */
2382                 }
2383         }
2384
2385         if (on)
2386                 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2387         else
2388                 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2389         vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
2390         vsi->info.vlan_flags |= vlan_flags;
2391         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2392         ctxt.info.valid_sections =
2393                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2394         ctxt.vsi_num = vsi->vsi_id;
2395         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2396         if (ret) {
2397                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2398                             on ? "enable" : "disable");
2399                 return -EINVAL;
2400         }
2401
2402         vsi->info.valid_sections |=
2403                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2404
2405         return ret;
2406 }
2407
2408 static int
2409 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2410 {
2411         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2412         struct ice_vsi *vsi = pf->main_vsi;
2413         struct rte_eth_rxmode *rxmode;
2414
2415         rxmode = &dev->data->dev_conf.rxmode;
2416         if (mask & ETH_VLAN_FILTER_MASK) {
2417                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2418                         ice_vsi_config_vlan_filter(vsi, TRUE);
2419                 else
2420                         ice_vsi_config_vlan_filter(vsi, FALSE);
2421         }
2422
2423         if (mask & ETH_VLAN_STRIP_MASK) {
2424                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2425                         ice_vsi_config_vlan_stripping(vsi, TRUE);
2426                 else
2427                         ice_vsi_config_vlan_stripping(vsi, FALSE);
2428         }
2429
2430         if (mask & ETH_VLAN_EXTEND_MASK) {
2431                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2432                         ice_vsi_config_double_vlan(vsi, TRUE);
2433                 else
2434                         ice_vsi_config_double_vlan(vsi, FALSE);
2435         }
2436
2437         return 0;
2438 }
2439
2440 static int
2441 ice_vlan_tpid_set(struct rte_eth_dev *dev,
2442                   enum rte_vlan_type vlan_type,
2443                   uint16_t tpid)
2444 {
2445         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2446         uint64_t reg_r = 0, reg_w = 0;
2447         uint16_t reg_id = 0;
2448         int ret = 0;
2449         int qinq = dev->data->dev_conf.rxmode.offloads &
2450                    DEV_RX_OFFLOAD_VLAN_EXTEND;
2451
2452         switch (vlan_type) {
2453         case ETH_VLAN_TYPE_OUTER:
2454                 if (qinq)
2455                         reg_id = 3;
2456                 else
2457                         reg_id = 5;
2458         break;
2459         case ETH_VLAN_TYPE_INNER:
2460                 if (qinq) {
2461                         reg_id = 5;
2462                 } else {
2463                         PMD_DRV_LOG(ERR,
2464                                     "Unsupported vlan type in single vlan.");
2465                         return -EINVAL;
2466                 }
2467                 break;
2468         default:
2469                 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2470                 return -EINVAL;
2471         }
2472         reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2473         PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2474                     "0x%08"PRIx64"", reg_id, reg_r);
2475
2476         reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2477         reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2478         if (reg_r == reg_w) {
2479                 PMD_DRV_LOG(DEBUG, "No need to write");
2480                 return 0;
2481         }
2482
2483         ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2484         PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2485                     "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2486
2487         return ret;
2488 }
2489
2490 static int
2491 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2492 {
2493         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2494         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2495         int ret;
2496
2497         if (!lut)
2498                 return -EINVAL;
2499
2500         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2501                 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2502                                          lut, lut_size);
2503                 if (ret) {
2504                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2505                         return -EINVAL;
2506                 }
2507         } else {
2508                 uint64_t *lut_dw = (uint64_t *)lut;
2509                 uint16_t i, lut_size_dw = lut_size / 4;
2510
2511                 for (i = 0; i < lut_size_dw; i++)
2512                         lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2513         }
2514
2515         return 0;
2516 }
2517
2518 static int
2519 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2520 {
2521         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2522         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2523         int ret;
2524
2525         if (!vsi || !lut)
2526                 return -EINVAL;
2527
2528         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2529                 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2530                                          lut, lut_size);
2531                 if (ret) {
2532                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2533                         return -EINVAL;
2534                 }
2535         } else {
2536                 uint64_t *lut_dw = (uint64_t *)lut;
2537                 uint16_t i, lut_size_dw = lut_size / 4;
2538
2539                 for (i = 0; i < lut_size_dw; i++)
2540                         ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2541
2542                 ice_flush(hw);
2543         }
2544
2545         return 0;
2546 }
2547
2548 static int
2549 ice_rss_reta_update(struct rte_eth_dev *dev,
2550                     struct rte_eth_rss_reta_entry64 *reta_conf,
2551                     uint16_t reta_size)
2552 {
2553         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2554         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2555         uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2556         uint16_t idx, shift;
2557         uint8_t *lut;
2558         int ret;
2559
2560         if (reta_size != lut_size ||
2561             reta_size > ETH_RSS_RETA_SIZE_512) {
2562                 PMD_DRV_LOG(ERR,
2563                             "The size of hash lookup table configured (%d)"
2564                             "doesn't match the number hardware can "
2565                             "supported (%d)",
2566                             reta_size, lut_size);
2567                 return -EINVAL;
2568         }
2569
2570         lut = rte_zmalloc(NULL, reta_size, 0);
2571         if (!lut) {
2572                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2573                 return -ENOMEM;
2574         }
2575         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2576         if (ret)
2577                 goto out;
2578
2579         for (i = 0; i < reta_size; i++) {
2580                 idx = i / RTE_RETA_GROUP_SIZE;
2581                 shift = i % RTE_RETA_GROUP_SIZE;
2582                 if (reta_conf[idx].mask & (1ULL << shift))
2583                         lut[i] = reta_conf[idx].reta[shift];
2584         }
2585         ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2586
2587 out:
2588         rte_free(lut);
2589
2590         return ret;
2591 }
2592
2593 static int
2594 ice_rss_reta_query(struct rte_eth_dev *dev,
2595                    struct rte_eth_rss_reta_entry64 *reta_conf,
2596                    uint16_t reta_size)
2597 {
2598         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2599         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2600         uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2601         uint16_t idx, shift;
2602         uint8_t *lut;
2603         int ret;
2604
2605         if (reta_size != lut_size ||
2606             reta_size > ETH_RSS_RETA_SIZE_512) {
2607                 PMD_DRV_LOG(ERR,
2608                             "The size of hash lookup table configured (%d)"
2609                             "doesn't match the number hardware can "
2610                             "supported (%d)",
2611                             reta_size, lut_size);
2612                 return -EINVAL;
2613         }
2614
2615         lut = rte_zmalloc(NULL, reta_size, 0);
2616         if (!lut) {
2617                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2618                 return -ENOMEM;
2619         }
2620
2621         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2622         if (ret)
2623                 goto out;
2624
2625         for (i = 0; i < reta_size; i++) {
2626                 idx = i / RTE_RETA_GROUP_SIZE;
2627                 shift = i % RTE_RETA_GROUP_SIZE;
2628                 if (reta_conf[idx].mask & (1ULL << shift))
2629                         reta_conf[idx].reta[shift] = lut[i];
2630         }
2631
2632 out:
2633         rte_free(lut);
2634
2635         return ret;
2636 }
2637
2638 static int
2639 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2640 {
2641         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2642         int ret = 0;
2643
2644         if (!key || key_len == 0) {
2645                 PMD_DRV_LOG(DEBUG, "No key to be configured");
2646                 return 0;
2647         } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2648                    sizeof(uint32_t)) {
2649                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2650                 return -EINVAL;
2651         }
2652
2653         struct ice_aqc_get_set_rss_keys *key_dw =
2654                 (struct ice_aqc_get_set_rss_keys *)key;
2655
2656         ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2657         if (ret) {
2658                 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2659                 ret = -EINVAL;
2660         }
2661
2662         return ret;
2663 }
2664
2665 static int
2666 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2667 {
2668         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2669         int ret;
2670
2671         if (!key || !key_len)
2672                 return -EINVAL;
2673
2674         ret = ice_aq_get_rss_key
2675                 (hw, vsi->idx,
2676                  (struct ice_aqc_get_set_rss_keys *)key);
2677         if (ret) {
2678                 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2679                 return -EINVAL;
2680         }
2681         *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2682
2683         return 0;
2684 }
2685
2686 static int
2687 ice_rss_hash_update(struct rte_eth_dev *dev,
2688                     struct rte_eth_rss_conf *rss_conf)
2689 {
2690         enum ice_status status = ICE_SUCCESS;
2691         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2692         struct ice_vsi *vsi = pf->main_vsi;
2693
2694         /* set hash key */
2695         status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2696         if (status)
2697                 return status;
2698
2699         /* TODO: hash enable config, ice_add_rss_cfg */
2700         return 0;
2701 }
2702
2703 static int
2704 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2705                       struct rte_eth_rss_conf *rss_conf)
2706 {
2707         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2708         struct ice_vsi *vsi = pf->main_vsi;
2709
2710         ice_get_rss_key(vsi, rss_conf->rss_key,
2711                         &rss_conf->rss_key_len);
2712
2713         /* TODO: default set to 0 as hf config is not supported now */
2714         rss_conf->rss_hf = 0;
2715         return 0;
2716 }
2717
2718 static void
2719 ice_promisc_enable(struct rte_eth_dev *dev)
2720 {
2721         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2722         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2723         struct ice_vsi *vsi = pf->main_vsi;
2724         uint8_t pmask;
2725         uint16_t status;
2726
2727         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2728                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2729
2730         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2731         if (status != ICE_SUCCESS)
2732                 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
2733 }
2734
2735 static void
2736 ice_promisc_disable(struct rte_eth_dev *dev)
2737 {
2738         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2739         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2740         struct ice_vsi *vsi = pf->main_vsi;
2741         uint16_t status;
2742         uint8_t pmask;
2743
2744         pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2745                 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2746
2747         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2748         if (status != ICE_SUCCESS)
2749                 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
2750 }
2751
2752 static void
2753 ice_allmulti_enable(struct rte_eth_dev *dev)
2754 {
2755         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2756         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2757         struct ice_vsi *vsi = pf->main_vsi;
2758         uint8_t pmask;
2759         uint16_t status;
2760
2761         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2762
2763         status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2764         if (status != ICE_SUCCESS)
2765                 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
2766 }
2767
2768 static void
2769 ice_allmulti_disable(struct rte_eth_dev *dev)
2770 {
2771         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2772         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2773         struct ice_vsi *vsi = pf->main_vsi;
2774         uint16_t status;
2775         uint8_t pmask;
2776
2777         if (dev->data->promiscuous == 1)
2778                 return; /* must remain in all_multicast mode */
2779
2780         pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2781
2782         status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2783         if (status != ICE_SUCCESS)
2784                 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
2785 }
2786
2787 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
2788                                     uint16_t queue_id)
2789 {
2790         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2791         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2792         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2793         uint32_t val;
2794         uint16_t msix_intr;
2795
2796         msix_intr = intr_handle->intr_vec[queue_id];
2797
2798         val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
2799               GLINT_DYN_CTL_ITR_INDX_M;
2800         val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
2801
2802         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
2803         rte_intr_enable(&pci_dev->intr_handle);
2804
2805         return 0;
2806 }
2807
2808 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
2809                                      uint16_t queue_id)
2810 {
2811         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2812         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2813         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2814         uint16_t msix_intr;
2815
2816         msix_intr = intr_handle->intr_vec[queue_id];
2817
2818         ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
2819
2820         return 0;
2821 }
2822
2823 static int
2824 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2825 {
2826         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2827         u32 full_ver;
2828         u8 ver, patch;
2829         u16 build;
2830         int ret;
2831
2832         full_ver = hw->nvm.oem_ver;
2833         ver = (u8)(full_ver >> 24);
2834         build = (u16)((full_ver >> 8) & 0xffff);
2835         patch = (u8)(full_ver & 0xff);
2836
2837         ret = snprintf(fw_version, fw_size,
2838                         "%d.%d%d 0x%08x %d.%d.%d",
2839                         ((hw->nvm.ver >> 12) & 0xf),
2840                         ((hw->nvm.ver >> 4) & 0xff),
2841                         (hw->nvm.ver & 0xf), hw->nvm.eetrack,
2842                         ver, build, patch);
2843
2844         /* add the size of '\0' */
2845         ret += 1;
2846         if (fw_size < (u32)ret)
2847                 return ret;
2848         else
2849                 return 0;
2850 }
2851
2852 static int
2853 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
2854 {
2855         struct ice_hw *hw;
2856         struct ice_vsi_ctx ctxt;
2857         uint8_t vlan_flags = 0;
2858         int ret;
2859
2860         if (!vsi || !info) {
2861                 PMD_DRV_LOG(ERR, "invalid parameters");
2862                 return -EINVAL;
2863         }
2864
2865         if (info->on) {
2866                 vsi->info.pvid = info->config.pvid;
2867                 /**
2868                  * If insert pvid is enabled, only tagged pkts are
2869                  * allowed to be sent out.
2870                  */
2871                 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
2872                              ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2873         } else {
2874                 vsi->info.pvid = 0;
2875                 if (info->config.reject.tagged == 0)
2876                         vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
2877
2878                 if (info->config.reject.untagged == 0)
2879                         vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2880         }
2881         vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
2882                                   ICE_AQ_VSI_VLAN_MODE_M);
2883         vsi->info.vlan_flags |= vlan_flags;
2884         memset(&ctxt, 0, sizeof(ctxt));
2885         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2886         ctxt.info.valid_sections =
2887                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2888         ctxt.vsi_num = vsi->vsi_id;
2889
2890         hw = ICE_VSI_TO_HW(vsi);
2891         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2892         if (ret != ICE_SUCCESS) {
2893                 PMD_DRV_LOG(ERR,
2894                             "update VSI for VLAN insert failed, err %d",
2895                             ret);
2896                 return -EINVAL;
2897         }
2898
2899         vsi->info.valid_sections |=
2900                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2901
2902         return ret;
2903 }
2904
2905 static int
2906 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2907 {
2908         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2909         struct ice_vsi *vsi = pf->main_vsi;
2910         struct rte_eth_dev_data *data = pf->dev_data;
2911         struct ice_vsi_vlan_pvid_info info;
2912         int ret;
2913
2914         memset(&info, 0, sizeof(info));
2915         info.on = on;
2916         if (info.on) {
2917                 info.config.pvid = pvid;
2918         } else {
2919                 info.config.reject.tagged =
2920                         data->dev_conf.txmode.hw_vlan_reject_tagged;
2921                 info.config.reject.untagged =
2922                         data->dev_conf.txmode.hw_vlan_reject_untagged;
2923         }
2924
2925         ret = ice_vsi_vlan_pvid_set(vsi, &info);
2926         if (ret < 0) {
2927                 PMD_DRV_LOG(ERR, "Failed to set pvid.");
2928                 return -EINVAL;
2929         }
2930
2931         return 0;
2932 }
2933
2934 static int
2935 ice_get_eeprom_length(struct rte_eth_dev *dev)
2936 {
2937         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2938
2939         /* Convert word count to byte count */
2940         return hw->nvm.sr_words << 1;
2941 }
2942
2943 static int
2944 ice_get_eeprom(struct rte_eth_dev *dev,
2945                struct rte_dev_eeprom_info *eeprom)
2946 {
2947         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2948         uint16_t *data = eeprom->data;
2949         uint16_t first_word, last_word, nwords;
2950         enum ice_status status = ICE_SUCCESS;
2951
2952         first_word = eeprom->offset >> 1;
2953         last_word = (eeprom->offset + eeprom->length - 1) >> 1;
2954         nwords = last_word - first_word + 1;
2955
2956         if (first_word > hw->nvm.sr_words ||
2957             last_word > hw->nvm.sr_words) {
2958                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
2959                 return -EINVAL;
2960         }
2961
2962         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2963
2964         status = ice_read_sr_buf(hw, first_word, &nwords, data);
2965         if (status) {
2966                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
2967                 eeprom->length = sizeof(uint16_t) * nwords;
2968                 return -EIO;
2969         }
2970
2971         return 0;
2972 }
2973
2974 static void
2975 ice_stat_update_32(struct ice_hw *hw,
2976                    uint32_t reg,
2977                    bool offset_loaded,
2978                    uint64_t *offset,
2979                    uint64_t *stat)
2980 {
2981         uint64_t new_data;
2982
2983         new_data = (uint64_t)ICE_READ_REG(hw, reg);
2984         if (!offset_loaded)
2985                 *offset = new_data;
2986
2987         if (new_data >= *offset)
2988                 *stat = (uint64_t)(new_data - *offset);
2989         else
2990                 *stat = (uint64_t)((new_data +
2991                                     ((uint64_t)1 << ICE_32_BIT_WIDTH))
2992                                    - *offset);
2993 }
2994
2995 static void
2996 ice_stat_update_40(struct ice_hw *hw,
2997                    uint32_t hireg,
2998                    uint32_t loreg,
2999                    bool offset_loaded,
3000                    uint64_t *offset,
3001                    uint64_t *stat)
3002 {
3003         uint64_t new_data;
3004
3005         new_data = (uint64_t)ICE_READ_REG(hw, loreg);
3006         new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
3007                     ICE_32_BIT_WIDTH;
3008
3009         if (!offset_loaded)
3010                 *offset = new_data;
3011
3012         if (new_data >= *offset)
3013                 *stat = new_data - *offset;
3014         else
3015                 *stat = (uint64_t)((new_data +
3016                                     ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
3017                                    *offset);
3018
3019         *stat &= ICE_40_BIT_MASK;
3020 }
3021
3022 /* Get all the statistics of a VSI */
3023 static void
3024 ice_update_vsi_stats(struct ice_vsi *vsi)
3025 {
3026         struct ice_eth_stats *oes = &vsi->eth_stats_offset;
3027         struct ice_eth_stats *nes = &vsi->eth_stats;
3028         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3029         int idx = rte_le_to_cpu_16(vsi->vsi_id);
3030
3031         ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
3032                            vsi->offset_loaded, &oes->rx_bytes,
3033                            &nes->rx_bytes);
3034         ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
3035                            vsi->offset_loaded, &oes->rx_unicast,
3036                            &nes->rx_unicast);
3037         ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
3038                            vsi->offset_loaded, &oes->rx_multicast,
3039                            &nes->rx_multicast);
3040         ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
3041                            vsi->offset_loaded, &oes->rx_broadcast,
3042                            &nes->rx_broadcast);
3043         /* exclude CRC bytes */
3044         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3045                           nes->rx_broadcast) * ETHER_CRC_LEN;
3046
3047         ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
3048                            &oes->rx_discards, &nes->rx_discards);
3049         /* GLV_REPC not supported */
3050         /* GLV_RMPC not supported */
3051         ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
3052                            &oes->rx_unknown_protocol,
3053                            &nes->rx_unknown_protocol);
3054         ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
3055                            vsi->offset_loaded, &oes->tx_bytes,
3056                            &nes->tx_bytes);
3057         ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
3058                            vsi->offset_loaded, &oes->tx_unicast,
3059                            &nes->tx_unicast);
3060         ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
3061                            vsi->offset_loaded, &oes->tx_multicast,
3062                            &nes->tx_multicast);
3063         ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
3064                            vsi->offset_loaded,  &oes->tx_broadcast,
3065                            &nes->tx_broadcast);
3066         /* GLV_TDPC not supported */
3067         ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
3068                            &oes->tx_errors, &nes->tx_errors);
3069         vsi->offset_loaded = true;
3070
3071         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
3072                     vsi->vsi_id);
3073         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3074         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3075         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3076         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3077         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3078         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3079                     nes->rx_unknown_protocol);
3080         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3081         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3082         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3083         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3084         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3085         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3086         PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
3087                     vsi->vsi_id);
3088 }
3089
3090 static void
3091 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
3092 {
3093         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3094         struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
3095
3096         /* Get statistics of struct ice_eth_stats */
3097         ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
3098                            GLPRT_GORCL(hw->port_info->lport),
3099                            pf->offset_loaded, &os->eth.rx_bytes,
3100                            &ns->eth.rx_bytes);
3101         ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
3102                            GLPRT_UPRCL(hw->port_info->lport),
3103                            pf->offset_loaded, &os->eth.rx_unicast,
3104                            &ns->eth.rx_unicast);
3105         ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
3106                            GLPRT_MPRCL(hw->port_info->lport),
3107                            pf->offset_loaded, &os->eth.rx_multicast,
3108                            &ns->eth.rx_multicast);
3109         ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
3110                            GLPRT_BPRCL(hw->port_info->lport),
3111                            pf->offset_loaded, &os->eth.rx_broadcast,
3112                            &ns->eth.rx_broadcast);
3113         ice_stat_update_32(hw, PRTRPB_RDPC,
3114                            pf->offset_loaded, &os->eth.rx_discards,
3115                            &ns->eth.rx_discards);
3116
3117         /* Workaround: CRC size should not be included in byte statistics,
3118          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
3119          */
3120         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3121                              ns->eth.rx_broadcast) * ETHER_CRC_LEN;
3122
3123         /* GLPRT_REPC not supported */
3124         /* GLPRT_RMPC not supported */
3125         ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
3126                            pf->offset_loaded,
3127                            &os->eth.rx_unknown_protocol,
3128                            &ns->eth.rx_unknown_protocol);
3129         ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
3130                            GLPRT_GOTCL(hw->port_info->lport),
3131                            pf->offset_loaded, &os->eth.tx_bytes,
3132                            &ns->eth.tx_bytes);
3133         ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
3134                            GLPRT_UPTCL(hw->port_info->lport),
3135                            pf->offset_loaded, &os->eth.tx_unicast,
3136                            &ns->eth.tx_unicast);
3137         ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
3138                            GLPRT_MPTCL(hw->port_info->lport),
3139                            pf->offset_loaded, &os->eth.tx_multicast,
3140                            &ns->eth.tx_multicast);
3141         ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
3142                            GLPRT_BPTCL(hw->port_info->lport),
3143                            pf->offset_loaded, &os->eth.tx_broadcast,
3144                            &ns->eth.tx_broadcast);
3145         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3146                              ns->eth.tx_broadcast) * ETHER_CRC_LEN;
3147
3148         /* GLPRT_TEPC not supported */
3149
3150         /* additional port specific stats */
3151         ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
3152                            pf->offset_loaded, &os->tx_dropped_link_down,
3153                            &ns->tx_dropped_link_down);
3154         ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
3155                            pf->offset_loaded, &os->crc_errors,
3156                            &ns->crc_errors);
3157         ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
3158                            pf->offset_loaded, &os->illegal_bytes,
3159                            &ns->illegal_bytes);
3160         /* GLPRT_ERRBC not supported */
3161         ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
3162                            pf->offset_loaded, &os->mac_local_faults,
3163                            &ns->mac_local_faults);
3164         ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
3165                            pf->offset_loaded, &os->mac_remote_faults,
3166                            &ns->mac_remote_faults);
3167
3168         ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
3169                            pf->offset_loaded, &os->rx_len_errors,
3170                            &ns->rx_len_errors);
3171
3172         ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
3173                            pf->offset_loaded, &os->link_xon_rx,
3174                            &ns->link_xon_rx);
3175         ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
3176                            pf->offset_loaded, &os->link_xoff_rx,
3177                            &ns->link_xoff_rx);
3178         ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
3179                            pf->offset_loaded, &os->link_xon_tx,
3180                            &ns->link_xon_tx);
3181         ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
3182                            pf->offset_loaded, &os->link_xoff_tx,
3183                            &ns->link_xoff_tx);
3184         ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
3185                            GLPRT_PRC64L(hw->port_info->lport),
3186                            pf->offset_loaded, &os->rx_size_64,
3187                            &ns->rx_size_64);
3188         ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
3189                            GLPRT_PRC127L(hw->port_info->lport),
3190                            pf->offset_loaded, &os->rx_size_127,
3191                            &ns->rx_size_127);
3192         ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
3193                            GLPRT_PRC255L(hw->port_info->lport),
3194                            pf->offset_loaded, &os->rx_size_255,
3195                            &ns->rx_size_255);
3196         ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
3197                            GLPRT_PRC511L(hw->port_info->lport),
3198                            pf->offset_loaded, &os->rx_size_511,
3199                            &ns->rx_size_511);
3200         ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
3201                            GLPRT_PRC1023L(hw->port_info->lport),
3202                            pf->offset_loaded, &os->rx_size_1023,
3203                            &ns->rx_size_1023);
3204         ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
3205                            GLPRT_PRC1522L(hw->port_info->lport),
3206                            pf->offset_loaded, &os->rx_size_1522,
3207                            &ns->rx_size_1522);
3208         ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
3209                            GLPRT_PRC9522L(hw->port_info->lport),
3210                            pf->offset_loaded, &os->rx_size_big,
3211                            &ns->rx_size_big);
3212         ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
3213                            pf->offset_loaded, &os->rx_undersize,
3214                            &ns->rx_undersize);
3215         ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
3216                            pf->offset_loaded, &os->rx_fragments,
3217                            &ns->rx_fragments);
3218         ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
3219                            pf->offset_loaded, &os->rx_oversize,
3220                            &ns->rx_oversize);
3221         ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
3222                            pf->offset_loaded, &os->rx_jabber,
3223                            &ns->rx_jabber);
3224         ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
3225                            GLPRT_PTC64L(hw->port_info->lport),
3226                            pf->offset_loaded, &os->tx_size_64,
3227                            &ns->tx_size_64);
3228         ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
3229                            GLPRT_PTC127L(hw->port_info->lport),
3230                            pf->offset_loaded, &os->tx_size_127,
3231                            &ns->tx_size_127);
3232         ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
3233                            GLPRT_PTC255L(hw->port_info->lport),
3234                            pf->offset_loaded, &os->tx_size_255,
3235                            &ns->tx_size_255);
3236         ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
3237                            GLPRT_PTC511L(hw->port_info->lport),
3238                            pf->offset_loaded, &os->tx_size_511,
3239                            &ns->tx_size_511);
3240         ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
3241                            GLPRT_PTC1023L(hw->port_info->lport),
3242                            pf->offset_loaded, &os->tx_size_1023,
3243                            &ns->tx_size_1023);
3244         ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
3245                            GLPRT_PTC1522L(hw->port_info->lport),
3246                            pf->offset_loaded, &os->tx_size_1522,
3247                            &ns->tx_size_1522);
3248         ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
3249                            GLPRT_PTC9522L(hw->port_info->lport),
3250                            pf->offset_loaded, &os->tx_size_big,
3251                            &ns->tx_size_big);
3252
3253         /* GLPRT_MSPDC not supported */
3254         /* GLPRT_XEC not supported */
3255
3256         pf->offset_loaded = true;
3257
3258         if (pf->main_vsi)
3259                 ice_update_vsi_stats(pf->main_vsi);
3260 }
3261
3262 /* Get all statistics of a port */
3263 static int
3264 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3265 {
3266         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3267         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3268         struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3269
3270         /* call read registers - updates values, now write them to struct */
3271         ice_read_stats_registers(pf, hw);
3272
3273         stats->ipackets = ns->eth.rx_unicast +
3274                           ns->eth.rx_multicast +
3275                           ns->eth.rx_broadcast -
3276                           ns->eth.rx_discards -
3277                           pf->main_vsi->eth_stats.rx_discards;
3278         stats->opackets = ns->eth.tx_unicast +
3279                           ns->eth.tx_multicast +
3280                           ns->eth.tx_broadcast;
3281         stats->ibytes   = ns->eth.rx_bytes;
3282         stats->obytes   = ns->eth.tx_bytes;
3283         stats->oerrors  = ns->eth.tx_errors +
3284                           pf->main_vsi->eth_stats.tx_errors;
3285
3286         /* Rx Errors */
3287         stats->imissed  = ns->eth.rx_discards +
3288                           pf->main_vsi->eth_stats.rx_discards;
3289         stats->ierrors  = ns->crc_errors +
3290                           ns->rx_undersize +
3291                           ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3292
3293         PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
3294         PMD_DRV_LOG(DEBUG, "rx_bytes:   %"PRIu64"", ns->eth.rx_bytes);
3295         PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
3296         PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
3297         PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
3298         PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
3299         PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
3300                     pf->main_vsi->eth_stats.rx_discards);
3301         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
3302                     ns->eth.rx_unknown_protocol);
3303         PMD_DRV_LOG(DEBUG, "tx_bytes:   %"PRIu64"", ns->eth.tx_bytes);
3304         PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
3305         PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
3306         PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
3307         PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
3308         PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
3309                     pf->main_vsi->eth_stats.tx_discards);
3310         PMD_DRV_LOG(DEBUG, "tx_errors:          %"PRIu64"", ns->eth.tx_errors);
3311
3312         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:       %"PRIu64"",
3313                     ns->tx_dropped_link_down);
3314         PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
3315         PMD_DRV_LOG(DEBUG, "illegal_bytes:      %"PRIu64"",
3316                     ns->illegal_bytes);
3317         PMD_DRV_LOG(DEBUG, "error_bytes:        %"PRIu64"", ns->error_bytes);
3318         PMD_DRV_LOG(DEBUG, "mac_local_faults:   %"PRIu64"",
3319                     ns->mac_local_faults);
3320         PMD_DRV_LOG(DEBUG, "mac_remote_faults:  %"PRIu64"",
3321                     ns->mac_remote_faults);
3322         PMD_DRV_LOG(DEBUG, "link_xon_rx:        %"PRIu64"", ns->link_xon_rx);
3323         PMD_DRV_LOG(DEBUG, "link_xoff_rx:       %"PRIu64"", ns->link_xoff_rx);
3324         PMD_DRV_LOG(DEBUG, "link_xon_tx:        %"PRIu64"", ns->link_xon_tx);
3325         PMD_DRV_LOG(DEBUG, "link_xoff_tx:       %"PRIu64"", ns->link_xoff_tx);
3326         PMD_DRV_LOG(DEBUG, "rx_size_64:         %"PRIu64"", ns->rx_size_64);
3327         PMD_DRV_LOG(DEBUG, "rx_size_127:        %"PRIu64"", ns->rx_size_127);
3328         PMD_DRV_LOG(DEBUG, "rx_size_255:        %"PRIu64"", ns->rx_size_255);
3329         PMD_DRV_LOG(DEBUG, "rx_size_511:        %"PRIu64"", ns->rx_size_511);
3330         PMD_DRV_LOG(DEBUG, "rx_size_1023:       %"PRIu64"", ns->rx_size_1023);
3331         PMD_DRV_LOG(DEBUG, "rx_size_1522:       %"PRIu64"", ns->rx_size_1522);
3332         PMD_DRV_LOG(DEBUG, "rx_size_big:        %"PRIu64"", ns->rx_size_big);
3333         PMD_DRV_LOG(DEBUG, "rx_undersize:       %"PRIu64"", ns->rx_undersize);
3334         PMD_DRV_LOG(DEBUG, "rx_fragments:       %"PRIu64"", ns->rx_fragments);
3335         PMD_DRV_LOG(DEBUG, "rx_oversize:        %"PRIu64"", ns->rx_oversize);
3336         PMD_DRV_LOG(DEBUG, "rx_jabber:          %"PRIu64"", ns->rx_jabber);
3337         PMD_DRV_LOG(DEBUG, "tx_size_64:         %"PRIu64"", ns->tx_size_64);
3338         PMD_DRV_LOG(DEBUG, "tx_size_127:        %"PRIu64"", ns->tx_size_127);
3339         PMD_DRV_LOG(DEBUG, "tx_size_255:        %"PRIu64"", ns->tx_size_255);
3340         PMD_DRV_LOG(DEBUG, "tx_size_511:        %"PRIu64"", ns->tx_size_511);
3341         PMD_DRV_LOG(DEBUG, "tx_size_1023:       %"PRIu64"", ns->tx_size_1023);
3342         PMD_DRV_LOG(DEBUG, "tx_size_1522:       %"PRIu64"", ns->tx_size_1522);
3343         PMD_DRV_LOG(DEBUG, "tx_size_big:        %"PRIu64"", ns->tx_size_big);
3344         PMD_DRV_LOG(DEBUG, "rx_len_errors:      %"PRIu64"", ns->rx_len_errors);
3345         PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
3346         return 0;
3347 }
3348
3349 /* Reset the statistics */
3350 static void
3351 ice_stats_reset(struct rte_eth_dev *dev)
3352 {
3353         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3354         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3355
3356         /* Mark PF and VSI stats to update the offset, aka "reset" */
3357         pf->offset_loaded = false;
3358         if (pf->main_vsi)
3359                 pf->main_vsi->offset_loaded = false;
3360
3361         /* read the stats, reading current register values into offset */
3362         ice_read_stats_registers(pf, hw);
3363 }
3364
3365 static uint32_t
3366 ice_xstats_calc_num(void)
3367 {
3368         uint32_t num;
3369
3370         num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
3371
3372         return num;
3373 }
3374
3375 static int
3376 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3377                unsigned int n)
3378 {
3379         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3380         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3381         unsigned int i;
3382         unsigned int count;
3383         struct ice_hw_port_stats *hw_stats = &pf->stats;
3384
3385         count = ice_xstats_calc_num();
3386         if (n < count)
3387                 return count;
3388
3389         ice_read_stats_registers(pf, hw);
3390
3391         if (!xstats)
3392                 return 0;
3393
3394         count = 0;
3395
3396         /* Get stats from ice_eth_stats struct */
3397         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3398                 xstats[count].value =
3399                         *(uint64_t *)((char *)&hw_stats->eth +
3400                                       ice_stats_strings[i].offset);
3401                 xstats[count].id = count;
3402                 count++;
3403         }
3404
3405         /* Get individiual stats from ice_hw_port struct */
3406         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3407                 xstats[count].value =
3408                         *(uint64_t *)((char *)hw_stats +
3409                                       ice_hw_port_strings[i].offset);
3410                 xstats[count].id = count;
3411                 count++;
3412         }
3413
3414         return count;
3415 }
3416
3417 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3418                                 struct rte_eth_xstat_name *xstats_names,
3419                                 __rte_unused unsigned int limit)
3420 {
3421         unsigned int count = 0;
3422         unsigned int i;
3423
3424         if (!xstats_names)
3425                 return ice_xstats_calc_num();
3426
3427         /* Note: limit checked in rte_eth_xstats_names() */
3428
3429         /* Get stats from ice_eth_stats struct */
3430         for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3431                 snprintf(xstats_names[count].name,
3432                          sizeof(xstats_names[count].name),
3433                          "%s", ice_stats_strings[i].name);
3434                 count++;
3435         }
3436
3437         /* Get individiual stats from ice_hw_port struct */
3438         for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3439                 snprintf(xstats_names[count].name,
3440                          sizeof(xstats_names[count].name),
3441                          "%s", ice_hw_port_strings[i].name);
3442                 count++;
3443         }
3444
3445         return count;
3446 }
3447
3448 static int
3449 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3450               struct rte_pci_device *pci_dev)
3451 {
3452         return rte_eth_dev_pci_generic_probe(pci_dev,
3453                                              sizeof(struct ice_adapter),
3454                                              ice_dev_init);
3455 }
3456
3457 static int
3458 ice_pci_remove(struct rte_pci_device *pci_dev)
3459 {
3460         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
3461 }
3462
3463 static struct rte_pci_driver rte_ice_pmd = {
3464         .id_table = pci_id_ice_map,
3465         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3466                      RTE_PCI_DRV_IOVA_AS_VA,
3467         .probe = ice_pci_probe,
3468         .remove = ice_pci_remove,
3469 };
3470
3471 /**
3472  * Driver initialization routine.
3473  * Invoked once at EAL init time.
3474  * Register itself as the [Poll Mode] Driver of PCI devices.
3475  */
3476 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
3477 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
3478 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
3479 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
3480                               ICE_MAX_QP_NUM "=<int>");
3481
3482 RTE_INIT(ice_init_log)
3483 {
3484         ice_logtype_init = rte_log_register("pmd.net.ice.init");
3485         if (ice_logtype_init >= 0)
3486                 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
3487         ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
3488         if (ice_logtype_driver >= 0)
3489                 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
3490 }