net/ice: support RSS
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
13
14 int ice_logtype_init;
15 int ice_logtype_driver;
16
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23                              struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25                            int wait_to_complete);
26 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
27 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
28 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
29                              enum rte_vlan_type vlan_type,
30                              uint16_t tpid);
31 static int ice_rss_reta_update(struct rte_eth_dev *dev,
32                                struct rte_eth_rss_reta_entry64 *reta_conf,
33                                uint16_t reta_size);
34 static int ice_rss_reta_query(struct rte_eth_dev *dev,
35                               struct rte_eth_rss_reta_entry64 *reta_conf,
36                               uint16_t reta_size);
37 static int ice_rss_hash_update(struct rte_eth_dev *dev,
38                                struct rte_eth_rss_conf *rss_conf);
39 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
40                                  struct rte_eth_rss_conf *rss_conf);
41 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
42                                uint16_t vlan_id,
43                                int on);
44 static int ice_macaddr_set(struct rte_eth_dev *dev,
45                            struct ether_addr *mac_addr);
46 static int ice_macaddr_add(struct rte_eth_dev *dev,
47                            struct ether_addr *mac_addr,
48                            __rte_unused uint32_t index,
49                            uint32_t pool);
50 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
51 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
52                              uint16_t pvid, int on);
53
54 static const struct rte_pci_id pci_id_ice_map[] = {
55         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
56         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
57         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
58         { .vendor_id = 0, /* sentinel */ },
59 };
60
61 static const struct eth_dev_ops ice_eth_dev_ops = {
62         .dev_configure                = ice_dev_configure,
63         .dev_start                    = ice_dev_start,
64         .dev_stop                     = ice_dev_stop,
65         .dev_close                    = ice_dev_close,
66         .dev_reset                    = ice_dev_reset,
67         .rx_queue_start               = ice_rx_queue_start,
68         .rx_queue_stop                = ice_rx_queue_stop,
69         .tx_queue_start               = ice_tx_queue_start,
70         .tx_queue_stop                = ice_tx_queue_stop,
71         .rx_queue_setup               = ice_rx_queue_setup,
72         .rx_queue_release             = ice_rx_queue_release,
73         .tx_queue_setup               = ice_tx_queue_setup,
74         .tx_queue_release             = ice_tx_queue_release,
75         .dev_infos_get                = ice_dev_info_get,
76         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
77         .link_update                  = ice_link_update,
78         .mtu_set                      = ice_mtu_set,
79         .mac_addr_set                 = ice_macaddr_set,
80         .mac_addr_add                 = ice_macaddr_add,
81         .mac_addr_remove              = ice_macaddr_remove,
82         .vlan_filter_set              = ice_vlan_filter_set,
83         .vlan_offload_set             = ice_vlan_offload_set,
84         .vlan_tpid_set                = ice_vlan_tpid_set,
85         .reta_update                  = ice_rss_reta_update,
86         .reta_query                   = ice_rss_reta_query,
87         .rss_hash_update              = ice_rss_hash_update,
88         .rss_hash_conf_get            = ice_rss_hash_conf_get,
89         .vlan_pvid_set                = ice_vlan_pvid_set,
90         .rxq_info_get                 = ice_rxq_info_get,
91         .txq_info_get                 = ice_txq_info_get,
92         .rx_queue_count               = ice_rx_queue_count,
93 };
94
95 static void
96 ice_init_controlq_parameter(struct ice_hw *hw)
97 {
98         /* fields for adminq */
99         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
100         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
101         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
102         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
103
104         /* fields for mailboxq, DPDK used as PF host */
105         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
106         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
107         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
108         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
109 }
110
111 static int
112 ice_check_qp_num(const char *key, const char *qp_value,
113                  __rte_unused void *opaque)
114 {
115         char *end = NULL;
116         int num = 0;
117
118         while (isblank(*qp_value))
119                 qp_value++;
120
121         num = strtoul(qp_value, &end, 10);
122
123         if (!num || (*end == '-') || errno) {
124                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
125                             "value must be > 0",
126                             qp_value, key);
127                 return -1;
128         }
129
130         return num;
131 }
132
133 static int
134 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
135 {
136         struct rte_kvargs *kvlist;
137         const char *queue_num_key = ICE_MAX_QP_NUM;
138         int ret;
139
140         if (!devargs)
141                 return 0;
142
143         kvlist = rte_kvargs_parse(devargs->args, NULL);
144         if (!kvlist)
145                 return 0;
146
147         if (!rte_kvargs_count(kvlist, queue_num_key)) {
148                 rte_kvargs_free(kvlist);
149                 return 0;
150         }
151
152         if (rte_kvargs_process(kvlist, queue_num_key,
153                                ice_check_qp_num, NULL) < 0) {
154                 rte_kvargs_free(kvlist);
155                 return 0;
156         }
157         ret = rte_kvargs_process(kvlist, queue_num_key,
158                                  ice_check_qp_num, NULL);
159         rte_kvargs_free(kvlist);
160
161         return ret;
162 }
163
164 static int
165 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
166                   uint32_t num)
167 {
168         struct pool_entry *entry;
169
170         if (!pool || !num)
171                 return -EINVAL;
172
173         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
174         if (!entry) {
175                 PMD_INIT_LOG(ERR,
176                              "Failed to allocate memory for resource pool");
177                 return -ENOMEM;
178         }
179
180         /* queue heap initialize */
181         pool->num_free = num;
182         pool->num_alloc = 0;
183         pool->base = base;
184         LIST_INIT(&pool->alloc_list);
185         LIST_INIT(&pool->free_list);
186
187         /* Initialize element  */
188         entry->base = 0;
189         entry->len = num;
190
191         LIST_INSERT_HEAD(&pool->free_list, entry, next);
192         return 0;
193 }
194
195 static int
196 ice_res_pool_alloc(struct ice_res_pool_info *pool,
197                    uint16_t num)
198 {
199         struct pool_entry *entry, *valid_entry;
200
201         if (!pool || !num) {
202                 PMD_INIT_LOG(ERR, "Invalid parameter");
203                 return -EINVAL;
204         }
205
206         if (pool->num_free < num) {
207                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
208                              num, pool->num_free);
209                 return -ENOMEM;
210         }
211
212         valid_entry = NULL;
213         /* Lookup  in free list and find most fit one */
214         LIST_FOREACH(entry, &pool->free_list, next) {
215                 if (entry->len >= num) {
216                         /* Find best one */
217                         if (entry->len == num) {
218                                 valid_entry = entry;
219                                 break;
220                         }
221                         if (!valid_entry ||
222                             valid_entry->len > entry->len)
223                                 valid_entry = entry;
224                 }
225         }
226
227         /* Not find one to satisfy the request, return */
228         if (!valid_entry) {
229                 PMD_INIT_LOG(ERR, "No valid entry found");
230                 return -ENOMEM;
231         }
232         /**
233          * The entry have equal queue number as requested,
234          * remove it from alloc_list.
235          */
236         if (valid_entry->len == num) {
237                 LIST_REMOVE(valid_entry, next);
238         } else {
239                 /**
240                  * The entry have more numbers than requested,
241                  * create a new entry for alloc_list and minus its
242                  * queue base and number in free_list.
243                  */
244                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
245                 if (!entry) {
246                         PMD_INIT_LOG(ERR,
247                                      "Failed to allocate memory for "
248                                      "resource pool");
249                         return -ENOMEM;
250                 }
251                 entry->base = valid_entry->base;
252                 entry->len = num;
253                 valid_entry->base += num;
254                 valid_entry->len -= num;
255                 valid_entry = entry;
256         }
257
258         /* Insert it into alloc list, not sorted */
259         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
260
261         pool->num_free -= valid_entry->len;
262         pool->num_alloc += valid_entry->len;
263
264         return valid_entry->base + pool->base;
265 }
266
267 static void
268 ice_res_pool_destroy(struct ice_res_pool_info *pool)
269 {
270         struct pool_entry *entry, *next_entry;
271
272         if (!pool)
273                 return;
274
275         for (entry = LIST_FIRST(&pool->alloc_list);
276              entry && (next_entry = LIST_NEXT(entry, next), 1);
277              entry = next_entry) {
278                 LIST_REMOVE(entry, next);
279                 rte_free(entry);
280         }
281
282         for (entry = LIST_FIRST(&pool->free_list);
283              entry && (next_entry = LIST_NEXT(entry, next), 1);
284              entry = next_entry) {
285                 LIST_REMOVE(entry, next);
286                 rte_free(entry);
287         }
288
289         pool->num_free = 0;
290         pool->num_alloc = 0;
291         pool->base = 0;
292         LIST_INIT(&pool->alloc_list);
293         LIST_INIT(&pool->free_list);
294 }
295
296 static void
297 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
298 {
299         /* Set VSI LUT selection */
300         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
301                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
302         /* Set Hash scheme */
303         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
304                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
305         /* enable TC */
306         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
307 }
308
309 static enum ice_status
310 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
311                                 struct ice_aqc_vsi_props *info,
312                                 uint8_t enabled_tcmap)
313 {
314         uint16_t bsf, qp_idx;
315
316         /* default tc 0 now. Multi-TC supporting need to be done later.
317          * Configure TC and queue mapping parameters, for enabled TC,
318          * allocate qpnum_per_tc queues to this traffic.
319          */
320         if (enabled_tcmap != 0x01) {
321                 PMD_INIT_LOG(ERR, "only TC0 is supported");
322                 return -ENOTSUP;
323         }
324
325         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
326         bsf = rte_bsf32(vsi->nb_qps);
327         /* Adjust the queue number to actual queues that can be applied */
328         vsi->nb_qps = 0x1 << bsf;
329
330         qp_idx = 0;
331         /* Set tc and queue mapping with VSI */
332         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
333                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
334                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
335
336         /* Associate queue number with VSI */
337         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
338         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
339         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
340         info->valid_sections |=
341                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
342         /* Set the info.ingress_table and info.egress_table
343          * for UP translate table. Now just set it to 1:1 map by default
344          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
345          */
346 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
347         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
348         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
349         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
350         return 0;
351 }
352
353 static int
354 ice_init_mac_address(struct rte_eth_dev *dev)
355 {
356         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
357
358         if (!is_unicast_ether_addr
359                 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
360                 PMD_INIT_LOG(ERR, "Invalid MAC address");
361                 return -EINVAL;
362         }
363
364         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
365                         (struct ether_addr *)hw->port_info[0].mac.perm_addr);
366
367         dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
368         if (!dev->data->mac_addrs) {
369                 PMD_INIT_LOG(ERR,
370                              "Failed to allocate memory to store mac address");
371                 return -ENOMEM;
372         }
373         /* store it to dev data */
374         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
375                         &dev->data->mac_addrs[0]);
376         return 0;
377 }
378
379 /* Find out specific MAC filter */
380 static struct ice_mac_filter *
381 ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
382 {
383         struct ice_mac_filter *f;
384
385         TAILQ_FOREACH(f, &vsi->mac_list, next) {
386                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
387                         return f;
388         }
389
390         return NULL;
391 }
392
393 static int
394 ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
395 {
396         struct ice_fltr_list_entry *m_list_itr = NULL;
397         struct ice_mac_filter *f;
398         struct LIST_HEAD_TYPE list_head;
399         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
400         int ret = 0;
401
402         /* If it's added and configured, return */
403         f = ice_find_mac_filter(vsi, mac_addr);
404         if (f) {
405                 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
406                 return 0;
407         }
408
409         INIT_LIST_HEAD(&list_head);
410
411         m_list_itr = (struct ice_fltr_list_entry *)
412                 ice_malloc(hw, sizeof(*m_list_itr));
413         if (!m_list_itr) {
414                 ret = -ENOMEM;
415                 goto DONE;
416         }
417         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
418                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
419         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
420         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
421         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
422         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
423         m_list_itr->fltr_info.vsi_handle = vsi->idx;
424
425         LIST_ADD(&m_list_itr->list_entry, &list_head);
426
427         /* Add the mac */
428         ret = ice_add_mac(hw, &list_head);
429         if (ret != ICE_SUCCESS) {
430                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
431                 ret = -EINVAL;
432                 goto DONE;
433         }
434         /* Add the mac addr into mac list */
435         f = rte_zmalloc(NULL, sizeof(*f), 0);
436         if (!f) {
437                 PMD_DRV_LOG(ERR, "failed to allocate memory");
438                 ret = -ENOMEM;
439                 goto DONE;
440         }
441         rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
442         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
443         vsi->mac_num++;
444
445         ret = 0;
446
447 DONE:
448         rte_free(m_list_itr);
449         return ret;
450 }
451
452 static int
453 ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
454 {
455         struct ice_fltr_list_entry *m_list_itr = NULL;
456         struct ice_mac_filter *f;
457         struct LIST_HEAD_TYPE list_head;
458         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
459         int ret = 0;
460
461         /* Can't find it, return an error */
462         f = ice_find_mac_filter(vsi, mac_addr);
463         if (!f)
464                 return -EINVAL;
465
466         INIT_LIST_HEAD(&list_head);
467
468         m_list_itr = (struct ice_fltr_list_entry *)
469                 ice_malloc(hw, sizeof(*m_list_itr));
470         if (!m_list_itr) {
471                 ret = -ENOMEM;
472                 goto DONE;
473         }
474         ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
475                    mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
476         m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
477         m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
478         m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
479         m_list_itr->fltr_info.flag = ICE_FLTR_TX;
480         m_list_itr->fltr_info.vsi_handle = vsi->idx;
481
482         LIST_ADD(&m_list_itr->list_entry, &list_head);
483
484         /* remove the mac filter */
485         ret = ice_remove_mac(hw, &list_head);
486         if (ret != ICE_SUCCESS) {
487                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
488                 ret = -EINVAL;
489                 goto DONE;
490         }
491
492         /* Remove the mac addr from mac list */
493         TAILQ_REMOVE(&vsi->mac_list, f, next);
494         rte_free(f);
495         vsi->mac_num--;
496
497         ret = 0;
498 DONE:
499         rte_free(m_list_itr);
500         return ret;
501 }
502
503 /* Find out specific VLAN filter */
504 static struct ice_vlan_filter *
505 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
506 {
507         struct ice_vlan_filter *f;
508
509         TAILQ_FOREACH(f, &vsi->vlan_list, next) {
510                 if (vlan_id == f->vlan_info.vlan_id)
511                         return f;
512         }
513
514         return NULL;
515 }
516
517 static int
518 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
519 {
520         struct ice_fltr_list_entry *v_list_itr = NULL;
521         struct ice_vlan_filter *f;
522         struct LIST_HEAD_TYPE list_head;
523         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
524         int ret = 0;
525
526         if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
527                 return -EINVAL;
528
529         /* If it's added and configured, return. */
530         f = ice_find_vlan_filter(vsi, vlan_id);
531         if (f) {
532                 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
533                 return 0;
534         }
535
536         if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
537                 return 0;
538
539         INIT_LIST_HEAD(&list_head);
540
541         v_list_itr = (struct ice_fltr_list_entry *)
542                       ice_malloc(hw, sizeof(*v_list_itr));
543         if (!v_list_itr) {
544                 ret = -ENOMEM;
545                 goto DONE;
546         }
547         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
548         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
549         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
550         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
551         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
552         v_list_itr->fltr_info.vsi_handle = vsi->idx;
553
554         LIST_ADD(&v_list_itr->list_entry, &list_head);
555
556         /* Add the vlan */
557         ret = ice_add_vlan(hw, &list_head);
558         if (ret != ICE_SUCCESS) {
559                 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
560                 ret = -EINVAL;
561                 goto DONE;
562         }
563
564         /* Add vlan into vlan list */
565         f = rte_zmalloc(NULL, sizeof(*f), 0);
566         if (!f) {
567                 PMD_DRV_LOG(ERR, "failed to allocate memory");
568                 ret = -ENOMEM;
569                 goto DONE;
570         }
571         f->vlan_info.vlan_id = vlan_id;
572         TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
573         vsi->vlan_num++;
574
575         ret = 0;
576
577 DONE:
578         rte_free(v_list_itr);
579         return ret;
580 }
581
582 static int
583 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
584 {
585         struct ice_fltr_list_entry *v_list_itr = NULL;
586         struct ice_vlan_filter *f;
587         struct LIST_HEAD_TYPE list_head;
588         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
589         int ret = 0;
590
591         /**
592          * Vlan 0 is the generic filter for untagged packets
593          * and can't be removed.
594          */
595         if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
596                 return -EINVAL;
597
598         /* Can't find it, return an error */
599         f = ice_find_vlan_filter(vsi, vlan_id);
600         if (!f)
601                 return -EINVAL;
602
603         INIT_LIST_HEAD(&list_head);
604
605         v_list_itr = (struct ice_fltr_list_entry *)
606                       ice_malloc(hw, sizeof(*v_list_itr));
607         if (!v_list_itr) {
608                 ret = -ENOMEM;
609                 goto DONE;
610         }
611
612         v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
613         v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
614         v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
615         v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
616         v_list_itr->fltr_info.flag = ICE_FLTR_TX;
617         v_list_itr->fltr_info.vsi_handle = vsi->idx;
618
619         LIST_ADD(&v_list_itr->list_entry, &list_head);
620
621         /* remove the vlan filter */
622         ret = ice_remove_vlan(hw, &list_head);
623         if (ret != ICE_SUCCESS) {
624                 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
625                 ret = -EINVAL;
626                 goto DONE;
627         }
628
629         /* Remove the vlan id from vlan list */
630         TAILQ_REMOVE(&vsi->vlan_list, f, next);
631         rte_free(f);
632         vsi->vlan_num--;
633
634         ret = 0;
635 DONE:
636         rte_free(v_list_itr);
637         return ret;
638 }
639
640 static int
641 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
642 {
643         struct ice_mac_filter *m_f;
644         struct ice_vlan_filter *v_f;
645         int ret = 0;
646
647         if (!vsi || !vsi->mac_num)
648                 return -EINVAL;
649
650         TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
651                 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
652                 if (ret != ICE_SUCCESS) {
653                         ret = -EINVAL;
654                         goto DONE;
655                 }
656         }
657
658         if (vsi->vlan_num == 0)
659                 return 0;
660
661         TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
662                 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
663                 if (ret != ICE_SUCCESS) {
664                         ret = -EINVAL;
665                         goto DONE;
666                 }
667         }
668
669 DONE:
670         return ret;
671 }
672
673 static int
674 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
675 {
676         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
677         struct ice_vsi_ctx ctxt;
678         uint8_t qinq_flags;
679         int ret = 0;
680
681         /* Check if it has been already on or off */
682         if (vsi->info.valid_sections &
683                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
684                 if (on) {
685                         if ((vsi->info.outer_tag_flags &
686                              ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
687                             ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
688                                 return 0; /* already on */
689                 } else {
690                         if (!(vsi->info.outer_tag_flags &
691                               ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
692                                 return 0; /* already off */
693                 }
694         }
695
696         if (on)
697                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
698         else
699                 qinq_flags = 0;
700         /* clear global insertion and use per packet insertion */
701         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
702         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
703         vsi->info.outer_tag_flags |= qinq_flags;
704         /* use default vlan type 0x8100 */
705         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
706         vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
707                                      ICE_AQ_VSI_OUTER_TAG_TYPE_S;
708         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
709         ctxt.info.valid_sections =
710                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
711         ctxt.vsi_num = vsi->vsi_id;
712         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
713         if (ret) {
714                 PMD_DRV_LOG(INFO,
715                             "Update VSI failed to %s qinq stripping",
716                             on ? "enable" : "disable");
717                 return -EINVAL;
718         }
719
720         vsi->info.valid_sections |=
721                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
722
723         return ret;
724 }
725
726 static int
727 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
728 {
729         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
730         struct ice_vsi_ctx ctxt;
731         uint8_t qinq_flags;
732         int ret = 0;
733
734         /* Check if it has been already on or off */
735         if (vsi->info.valid_sections &
736                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
737                 if (on) {
738                         if ((vsi->info.outer_tag_flags &
739                              ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
740                             ICE_AQ_VSI_OUTER_TAG_COPY)
741                                 return 0; /* already on */
742                 } else {
743                         if ((vsi->info.outer_tag_flags &
744                              ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
745                             ICE_AQ_VSI_OUTER_TAG_NOTHING)
746                                 return 0; /* already off */
747                 }
748         }
749
750         if (on)
751                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
752         else
753                 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
754         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
755         vsi->info.outer_tag_flags |= qinq_flags;
756         /* use default vlan type 0x8100 */
757         vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
758         vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
759                                      ICE_AQ_VSI_OUTER_TAG_TYPE_S;
760         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
761         ctxt.info.valid_sections =
762                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
763         ctxt.vsi_num = vsi->vsi_id;
764         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
765         if (ret) {
766                 PMD_DRV_LOG(INFO,
767                             "Update VSI failed to %s qinq stripping",
768                             on ? "enable" : "disable");
769                 return -EINVAL;
770         }
771
772         vsi->info.valid_sections |=
773                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
774
775         return ret;
776 }
777
778 static int
779 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
780 {
781         int ret;
782
783         ret = ice_vsi_config_qinq_stripping(vsi, on);
784         if (ret)
785                 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
786
787         ret = ice_vsi_config_qinq_insertion(vsi, on);
788         if (ret)
789                 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
790
791         return ret;
792 }
793
794 /* Enable IRQ0 */
795 static void
796 ice_pf_enable_irq0(struct ice_hw *hw)
797 {
798         /* reset the registers */
799         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
800         ICE_READ_REG(hw, PFINT_OICR);
801
802 #ifdef ICE_LSE_SPT
803         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
804                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
805                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
806
807         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
808                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
809                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
810                        PFINT_OICR_CTL_ITR_INDX_M) |
811                       PFINT_OICR_CTL_CAUSE_ENA_M);
812
813         ICE_WRITE_REG(hw, PFINT_FW_CTL,
814                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
815                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
816                        PFINT_FW_CTL_ITR_INDX_M) |
817                       PFINT_FW_CTL_CAUSE_ENA_M);
818 #else
819         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
820 #endif
821
822         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
823                       GLINT_DYN_CTL_INTENA_M |
824                       GLINT_DYN_CTL_CLEARPBA_M |
825                       GLINT_DYN_CTL_ITR_INDX_M);
826
827         ice_flush(hw);
828 }
829
830 /* Disable IRQ0 */
831 static void
832 ice_pf_disable_irq0(struct ice_hw *hw)
833 {
834         /* Disable all interrupt types */
835         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
836         ice_flush(hw);
837 }
838
839 #ifdef ICE_LSE_SPT
840 static void
841 ice_handle_aq_msg(struct rte_eth_dev *dev)
842 {
843         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
844         struct ice_ctl_q_info *cq = &hw->adminq;
845         struct ice_rq_event_info event;
846         uint16_t pending, opcode;
847         int ret;
848
849         event.buf_len = ICE_AQ_MAX_BUF_LEN;
850         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
851         if (!event.msg_buf) {
852                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
853                 return;
854         }
855
856         pending = 1;
857         while (pending) {
858                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
859
860                 if (ret != ICE_SUCCESS) {
861                         PMD_DRV_LOG(INFO,
862                                     "Failed to read msg from AdminQ, "
863                                     "adminq_err: %u",
864                                     hw->adminq.sq_last_status);
865                         break;
866                 }
867                 opcode = rte_le_to_cpu_16(event.desc.opcode);
868
869                 switch (opcode) {
870                 case ice_aqc_opc_get_link_status:
871                         ret = ice_link_update(dev, 0);
872                         if (!ret)
873                                 _rte_eth_dev_callback_process
874                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
875                         break;
876                 default:
877                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
878                                     opcode);
879                         break;
880                 }
881         }
882         rte_free(event.msg_buf);
883 }
884 #endif
885
886 /**
887  * Interrupt handler triggered by NIC for handling
888  * specific interrupt.
889  *
890  * @param handle
891  *  Pointer to interrupt handle.
892  * @param param
893  *  The address of parameter (struct rte_eth_dev *) regsitered before.
894  *
895  * @return
896  *  void
897  */
898 static void
899 ice_interrupt_handler(void *param)
900 {
901         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
902         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
903         uint32_t oicr;
904         uint32_t reg;
905         uint8_t pf_num;
906         uint8_t event;
907         uint16_t queue;
908 #ifdef ICE_LSE_SPT
909         uint32_t int_fw_ctl;
910 #endif
911
912         /* Disable interrupt */
913         ice_pf_disable_irq0(hw);
914
915         /* read out interrupt causes */
916         oicr = ICE_READ_REG(hw, PFINT_OICR);
917 #ifdef ICE_LSE_SPT
918         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
919 #endif
920
921         /* No interrupt event indicated */
922         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
923                 PMD_DRV_LOG(INFO, "No interrupt event");
924                 goto done;
925         }
926
927 #ifdef ICE_LSE_SPT
928         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
929                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
930                 ice_handle_aq_msg(dev);
931         }
932 #else
933         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
934                 PMD_DRV_LOG(INFO, "OICR: link state change event");
935                 ice_link_update(dev, 0);
936         }
937 #endif
938
939         if (oicr & PFINT_OICR_MAL_DETECT_M) {
940                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
941                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
942                 if (reg & GL_MDET_TX_PQM_VALID_M) {
943                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
944                                  GL_MDET_TX_PQM_PF_NUM_S;
945                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
946                                 GL_MDET_TX_PQM_MAL_TYPE_S;
947                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
948                                 GL_MDET_TX_PQM_QNUM_S;
949
950                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
951                                     "%d by PQM on TX queue %d PF# %d",
952                                     event, queue, pf_num);
953                 }
954
955                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
956                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
957                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
958                                  GL_MDET_TX_TCLAN_PF_NUM_S;
959                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
960                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
961                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
962                                 GL_MDET_TX_TCLAN_QNUM_S;
963
964                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
965                                     "%d by TCLAN on TX queue %d PF# %d",
966                                     event, queue, pf_num);
967                 }
968         }
969 done:
970         /* Enable interrupt */
971         ice_pf_enable_irq0(hw);
972         rte_intr_enable(dev->intr_handle);
973 }
974
975 /*  Initialize SW parameters of PF */
976 static int
977 ice_pf_sw_init(struct rte_eth_dev *dev)
978 {
979         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
980         struct ice_hw *hw = ICE_PF_TO_HW(pf);
981
982         if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
983                 pf->lan_nb_qp_max =
984                         ice_config_max_queue_pair_num(dev->device->devargs);
985         else
986                 pf->lan_nb_qp_max =
987                         (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
988                                           hw->func_caps.common_cap.num_rxq);
989
990         pf->lan_nb_qps = pf->lan_nb_qp_max;
991
992         return 0;
993 }
994
995 static struct ice_vsi *
996 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
997 {
998         struct ice_hw *hw = ICE_PF_TO_HW(pf);
999         struct ice_vsi *vsi = NULL;
1000         struct ice_vsi_ctx vsi_ctx;
1001         int ret;
1002         struct ether_addr broadcast = {
1003                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1004         struct ether_addr mac_addr;
1005         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1006         uint8_t tc_bitmap = 0x1;
1007
1008         /* hw->num_lports = 1 in NIC mode */
1009         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1010         if (!vsi)
1011                 return NULL;
1012
1013         vsi->idx = pf->next_vsi_idx;
1014         pf->next_vsi_idx++;
1015         vsi->type = type;
1016         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1017         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1018         vsi->vlan_anti_spoof_on = 0;
1019         vsi->vlan_filter_on = 1;
1020         TAILQ_INIT(&vsi->mac_list);
1021         TAILQ_INIT(&vsi->vlan_list);
1022
1023         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1024         /* base_queue in used in queue mapping of VSI add/update command.
1025          * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1026          * cases in the first stage. Only Main VSI.
1027          */
1028         vsi->base_queue = 0;
1029         switch (type) {
1030         case ICE_VSI_PF:
1031                 vsi->nb_qps = pf->lan_nb_qps;
1032                 ice_vsi_config_default_rss(&vsi_ctx.info);
1033                 vsi_ctx.alloc_from_pool = true;
1034                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1035                 /* switch_id is queried by get_switch_config aq, which is done
1036                  * by ice_init_hw
1037                  */
1038                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1039                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1040                 /* Allow all untagged or tagged packets */
1041                 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1042                 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1043                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1044                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1045                 /* Enable VLAN/UP trip */
1046                 ret = ice_vsi_config_tc_queue_mapping(vsi,
1047                                                       &vsi_ctx.info,
1048                                                       ICE_DEFAULT_TCMAP);
1049                 if (ret) {
1050                         PMD_INIT_LOG(ERR,
1051                                      "tc queue mapping with vsi failed, "
1052                                      "err = %d",
1053                                      ret);
1054                         goto fail_mem;
1055                 }
1056
1057                 break;
1058         default:
1059                 /* for other types of VSI */
1060                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1061                 goto fail_mem;
1062         }
1063
1064         /* VF has MSIX interrupt in VF range, don't allocate here */
1065         if (type == ICE_VSI_PF) {
1066                 ret = ice_res_pool_alloc(&pf->msix_pool,
1067                                          RTE_MIN(vsi->nb_qps,
1068                                                  RTE_MAX_RXTX_INTR_VEC_ID));
1069                 if (ret < 0) {
1070                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1071                                      vsi->vsi_id, ret);
1072                 }
1073                 vsi->msix_intr = ret;
1074                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1075         } else {
1076                 vsi->msix_intr = 0;
1077                 vsi->nb_msix = 0;
1078         }
1079         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1080         if (ret != ICE_SUCCESS) {
1081                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1082                 goto fail_mem;
1083         }
1084         /* store vsi information is SW structure */
1085         vsi->vsi_id = vsi_ctx.vsi_num;
1086         vsi->info = vsi_ctx.info;
1087         pf->vsis_allocated = vsi_ctx.vsis_allocd;
1088         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1089
1090         /* MAC configuration */
1091         rte_memcpy(pf->dev_addr.addr_bytes,
1092                    hw->port_info->mac.perm_addr,
1093                    ETH_ADDR_LEN);
1094
1095         rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
1096         ret = ice_add_mac_filter(vsi, &mac_addr);
1097         if (ret != ICE_SUCCESS)
1098                 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1099
1100         rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
1101         ret = ice_add_mac_filter(vsi, &mac_addr);
1102         if (ret != ICE_SUCCESS)
1103                 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1104
1105         /* At the beginning, only TC0. */
1106         /* What we need here is the maximam number of the TX queues.
1107          * Currently vsi->nb_qps means it.
1108          * Correct it if any change.
1109          */
1110         max_txqs[0] = vsi->nb_qps;
1111         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1112                               tc_bitmap, max_txqs);
1113         if (ret != ICE_SUCCESS)
1114                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1115
1116         return vsi;
1117 fail_mem:
1118         rte_free(vsi);
1119         pf->next_vsi_idx--;
1120         return NULL;
1121 }
1122
1123 static int
1124 ice_pf_setup(struct ice_pf *pf)
1125 {
1126         struct ice_vsi *vsi;
1127
1128         /* Clear all stats counters */
1129         pf->offset_loaded = FALSE;
1130         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1131         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1132         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1133         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1134
1135         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1136         if (!vsi) {
1137                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1138                 return -EINVAL;
1139         }
1140
1141         pf->main_vsi = vsi;
1142
1143         return 0;
1144 }
1145
1146 static int
1147 ice_dev_init(struct rte_eth_dev *dev)
1148 {
1149         struct rte_pci_device *pci_dev;
1150         struct rte_intr_handle *intr_handle;
1151         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1152         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1153         struct ice_vsi *vsi;
1154         int ret;
1155
1156         dev->dev_ops = &ice_eth_dev_ops;
1157         dev->rx_pkt_burst = ice_recv_pkts;
1158         dev->tx_pkt_burst = ice_xmit_pkts;
1159         dev->tx_pkt_prepare = ice_prep_pkts;
1160
1161         ice_set_default_ptype_table(dev);
1162         pci_dev = RTE_DEV_TO_PCI(dev->device);
1163         intr_handle = &pci_dev->intr_handle;
1164
1165         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1166         pf->adapter->eth_dev = dev;
1167         pf->dev_data = dev->data;
1168         hw->back = pf->adapter;
1169         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1170         hw->vendor_id = pci_dev->id.vendor_id;
1171         hw->device_id = pci_dev->id.device_id;
1172         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1173         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1174         hw->bus.device = pci_dev->addr.devid;
1175         hw->bus.func = pci_dev->addr.function;
1176
1177         ice_init_controlq_parameter(hw);
1178
1179         ret = ice_init_hw(hw);
1180         if (ret) {
1181                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1182                 return -EINVAL;
1183         }
1184
1185         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1186                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1187                      hw->api_maj_ver, hw->api_min_ver);
1188
1189         ice_pf_sw_init(dev);
1190         ret = ice_init_mac_address(dev);
1191         if (ret) {
1192                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1193                 goto err_init_mac;
1194         }
1195
1196         ret = ice_res_pool_init(&pf->msix_pool, 1,
1197                                 hw->func_caps.common_cap.num_msix_vectors - 1);
1198         if (ret) {
1199                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1200                 goto err_msix_pool_init;
1201         }
1202
1203         ret = ice_pf_setup(pf);
1204         if (ret) {
1205                 PMD_INIT_LOG(ERR, "Failed to setup PF");
1206                 goto err_pf_setup;
1207         }
1208
1209         vsi = pf->main_vsi;
1210
1211         /* Disable double vlan by default */
1212         ice_vsi_config_double_vlan(vsi, FALSE);
1213
1214         /* register callback func to eal lib */
1215         rte_intr_callback_register(intr_handle,
1216                                    ice_interrupt_handler, dev);
1217
1218         ice_pf_enable_irq0(hw);
1219
1220         /* enable uio intr after callback register */
1221         rte_intr_enable(intr_handle);
1222
1223         return 0;
1224
1225 err_pf_setup:
1226         ice_res_pool_destroy(&pf->msix_pool);
1227 err_msix_pool_init:
1228         rte_free(dev->data->mac_addrs);
1229 err_init_mac:
1230         ice_sched_cleanup_all(hw);
1231         rte_free(hw->port_info);
1232         ice_shutdown_all_ctrlq(hw);
1233
1234         return ret;
1235 }
1236
1237 static int
1238 ice_release_vsi(struct ice_vsi *vsi)
1239 {
1240         struct ice_hw *hw;
1241         struct ice_vsi_ctx vsi_ctx;
1242         enum ice_status ret;
1243
1244         if (!vsi)
1245                 return 0;
1246
1247         hw = ICE_VSI_TO_HW(vsi);
1248
1249         ice_remove_all_mac_vlan_filters(vsi);
1250
1251         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1252
1253         vsi_ctx.vsi_num = vsi->vsi_id;
1254         vsi_ctx.info = vsi->info;
1255         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1256         if (ret != ICE_SUCCESS) {
1257                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1258                 rte_free(vsi);
1259                 return -1;
1260         }
1261
1262         rte_free(vsi);
1263         return 0;
1264 }
1265
1266 static void
1267 ice_dev_stop(struct rte_eth_dev *dev)
1268 {
1269         struct rte_eth_dev_data *data = dev->data;
1270         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1271         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1272         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1273         uint16_t i;
1274
1275         /* avoid stopping again */
1276         if (pf->adapter_stopped)
1277                 return;
1278
1279         /* stop and clear all Rx queues */
1280         for (i = 0; i < data->nb_rx_queues; i++)
1281                 ice_rx_queue_stop(dev, i);
1282
1283         /* stop and clear all Tx queues */
1284         for (i = 0; i < data->nb_tx_queues; i++)
1285                 ice_tx_queue_stop(dev, i);
1286
1287         /* Clear all queues and release mbufs */
1288         ice_clear_queues(dev);
1289
1290         /* Clean datapath event and queue/vec mapping */
1291         rte_intr_efd_disable(intr_handle);
1292         if (intr_handle->intr_vec) {
1293                 rte_free(intr_handle->intr_vec);
1294                 intr_handle->intr_vec = NULL;
1295         }
1296
1297         pf->adapter_stopped = true;
1298 }
1299
1300 static void
1301 ice_dev_close(struct rte_eth_dev *dev)
1302 {
1303         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1304         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1305
1306         ice_dev_stop(dev);
1307
1308         /* release all queue resource */
1309         ice_free_queues(dev);
1310
1311         ice_res_pool_destroy(&pf->msix_pool);
1312         ice_release_vsi(pf->main_vsi);
1313
1314         ice_shutdown_all_ctrlq(hw);
1315 }
1316
1317 static int
1318 ice_dev_uninit(struct rte_eth_dev *dev)
1319 {
1320         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1321         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1322         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1323         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1324
1325         ice_dev_close(dev);
1326
1327         dev->dev_ops = NULL;
1328         dev->rx_pkt_burst = NULL;
1329         dev->tx_pkt_burst = NULL;
1330
1331         rte_free(dev->data->mac_addrs);
1332         dev->data->mac_addrs = NULL;
1333
1334         /* disable uio intr before callback unregister */
1335         rte_intr_disable(intr_handle);
1336
1337         /* register callback func to eal lib */
1338         rte_intr_callback_unregister(intr_handle,
1339                                      ice_interrupt_handler, dev);
1340
1341         ice_release_vsi(pf->main_vsi);
1342         ice_sched_cleanup_all(hw);
1343         rte_free(hw->port_info);
1344         ice_shutdown_all_ctrlq(hw);
1345
1346         return 0;
1347 }
1348
1349 static int
1350 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1351 {
1352         struct ice_adapter *ad =
1353                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1354
1355         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1356          * bulk allocation or vector Rx preconditions we will reset it.
1357          */
1358         ad->rx_bulk_alloc_allowed = true;
1359         ad->tx_simple_allowed = true;
1360
1361         return 0;
1362 }
1363
1364 static int ice_init_rss(struct ice_pf *pf)
1365 {
1366         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1367         struct ice_vsi *vsi = pf->main_vsi;
1368         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1369         struct rte_eth_rss_conf *rss_conf;
1370         struct ice_aqc_get_set_rss_keys key;
1371         uint16_t i, nb_q;
1372         int ret = 0;
1373
1374         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1375         nb_q = dev->data->nb_rx_queues;
1376         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1377         vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
1378
1379         if (!vsi->rss_key)
1380                 vsi->rss_key = rte_zmalloc(NULL,
1381                                            vsi->rss_key_size, 0);
1382         if (!vsi->rss_lut)
1383                 vsi->rss_lut = rte_zmalloc(NULL,
1384                                            vsi->rss_lut_size, 0);
1385
1386         /* configure RSS key */
1387         if (!rss_conf->rss_key) {
1388                 /* Calculate the default hash key */
1389                 for (i = 0; i <= vsi->rss_key_size; i++)
1390                         vsi->rss_key[i] = (uint8_t)rte_rand();
1391         } else {
1392                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1393                            RTE_MIN(rss_conf->rss_key_len,
1394                                    vsi->rss_key_size));
1395         }
1396         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1397         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1398         if (ret)
1399                 return -EINVAL;
1400
1401         /* init RSS LUT table */
1402         for (i = 0; i < vsi->rss_lut_size; i++)
1403                 vsi->rss_lut[i] = i % nb_q;
1404
1405         ret = ice_aq_set_rss_lut(hw, vsi->idx,
1406                                  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1407                                  vsi->rss_lut, vsi->rss_lut_size);
1408         if (ret)
1409                 return -EINVAL;
1410
1411         return 0;
1412 }
1413
1414 static int
1415 ice_dev_start(struct rte_eth_dev *dev)
1416 {
1417         struct rte_eth_dev_data *data = dev->data;
1418         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1419         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1420         uint16_t nb_rxq = 0;
1421         uint16_t nb_txq, i;
1422         int ret;
1423
1424         /* program Tx queues' context in hardware */
1425         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1426                 ret = ice_tx_queue_start(dev, nb_txq);
1427                 if (ret) {
1428                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1429                         goto tx_err;
1430                 }
1431         }
1432
1433         /* program Rx queues' context in hardware*/
1434         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1435                 ret = ice_rx_queue_start(dev, nb_rxq);
1436                 if (ret) {
1437                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1438                         goto rx_err;
1439                 }
1440         }
1441
1442         ret = ice_init_rss(pf);
1443         if (ret) {
1444                 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1445                 goto rx_err;
1446         }
1447
1448         ice_set_rx_function(dev);
1449
1450         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1451                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1452                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1453                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1454                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1455                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
1456                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1457                                      NULL);
1458         if (ret != ICE_SUCCESS)
1459                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1460
1461         /* Call get_link_info aq commond to enable/disable LSE */
1462         ice_link_update(dev, 0);
1463
1464         pf->adapter_stopped = false;
1465
1466         return 0;
1467
1468         /* stop the started queues if failed to start all queues */
1469 rx_err:
1470         for (i = 0; i < nb_rxq; i++)
1471                 ice_rx_queue_stop(dev, i);
1472 tx_err:
1473         for (i = 0; i < nb_txq; i++)
1474                 ice_tx_queue_stop(dev, i);
1475
1476         return -EIO;
1477 }
1478
1479 static int
1480 ice_dev_reset(struct rte_eth_dev *dev)
1481 {
1482         int ret;
1483
1484         if (dev->data->sriov.active)
1485                 return -ENOTSUP;
1486
1487         ret = ice_dev_uninit(dev);
1488         if (ret) {
1489                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1490                 return -ENXIO;
1491         }
1492
1493         ret = ice_dev_init(dev);
1494         if (ret) {
1495                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1496                 return -ENXIO;
1497         }
1498
1499         return 0;
1500 }
1501
1502 static void
1503 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1504 {
1505         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1506         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1507         struct ice_vsi *vsi = pf->main_vsi;
1508         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1509
1510         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1511         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1512         dev_info->max_rx_queues = vsi->nb_qps;
1513         dev_info->max_tx_queues = vsi->nb_qps;
1514         dev_info->max_mac_addrs = vsi->max_macaddrs;
1515         dev_info->max_vfs = pci_dev->max_vfs;
1516
1517         dev_info->rx_offload_capa =
1518                 DEV_RX_OFFLOAD_VLAN_STRIP |
1519                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1520                 DEV_RX_OFFLOAD_UDP_CKSUM |
1521                 DEV_RX_OFFLOAD_TCP_CKSUM |
1522                 DEV_RX_OFFLOAD_QINQ_STRIP |
1523                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1524                 DEV_RX_OFFLOAD_VLAN_EXTEND |
1525                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1526                 DEV_RX_OFFLOAD_KEEP_CRC |
1527                 DEV_RX_OFFLOAD_VLAN_FILTER;
1528         dev_info->tx_offload_capa =
1529                 DEV_TX_OFFLOAD_VLAN_INSERT |
1530                 DEV_TX_OFFLOAD_QINQ_INSERT |
1531                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1532                 DEV_TX_OFFLOAD_UDP_CKSUM |
1533                 DEV_TX_OFFLOAD_TCP_CKSUM |
1534                 DEV_TX_OFFLOAD_SCTP_CKSUM |
1535                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1536                 DEV_TX_OFFLOAD_TCP_TSO |
1537                 DEV_TX_OFFLOAD_MULTI_SEGS;
1538         dev_info->rx_queue_offload_capa = 0;
1539         dev_info->tx_queue_offload_capa = 0;
1540
1541         dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1542         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1543         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
1544
1545         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1546                 .rx_thresh = {
1547                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
1548                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
1549                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
1550                 },
1551                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1552                 .rx_drop_en = 0,
1553                 .offloads = 0,
1554         };
1555
1556         dev_info->default_txconf = (struct rte_eth_txconf) {
1557                 .tx_thresh = {
1558                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
1559                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
1560                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
1561                 },
1562                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1563                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1564                 .offloads = 0,
1565         };
1566
1567         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1568                 .nb_max = ICE_MAX_RING_DESC,
1569                 .nb_min = ICE_MIN_RING_DESC,
1570                 .nb_align = ICE_ALIGN_RING_DESC,
1571         };
1572
1573         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1574                 .nb_max = ICE_MAX_RING_DESC,
1575                 .nb_min = ICE_MIN_RING_DESC,
1576                 .nb_align = ICE_ALIGN_RING_DESC,
1577         };
1578
1579         dev_info->speed_capa = ETH_LINK_SPEED_10M |
1580                                ETH_LINK_SPEED_100M |
1581                                ETH_LINK_SPEED_1G |
1582                                ETH_LINK_SPEED_2_5G |
1583                                ETH_LINK_SPEED_5G |
1584                                ETH_LINK_SPEED_10G |
1585                                ETH_LINK_SPEED_20G |
1586                                ETH_LINK_SPEED_25G |
1587                                ETH_LINK_SPEED_40G;
1588
1589         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1590         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1591
1592         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1593         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1594         dev_info->default_rxportconf.nb_queues = 1;
1595         dev_info->default_txportconf.nb_queues = 1;
1596         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1597         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1598 }
1599
1600 static inline int
1601 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1602                             struct rte_eth_link *link)
1603 {
1604         struct rte_eth_link *dst = link;
1605         struct rte_eth_link *src = &dev->data->dev_link;
1606
1607         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1608                                 *(uint64_t *)src) == 0)
1609                 return -1;
1610
1611         return 0;
1612 }
1613
1614 static inline int
1615 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1616                              struct rte_eth_link *link)
1617 {
1618         struct rte_eth_link *dst = &dev->data->dev_link;
1619         struct rte_eth_link *src = link;
1620
1621         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1622                                 *(uint64_t *)src) == 0)
1623                 return -1;
1624
1625         return 0;
1626 }
1627
1628 static int
1629 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1630 {
1631 #define CHECK_INTERVAL 100  /* 100ms */
1632 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1633         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1634         struct ice_link_status link_status;
1635         struct rte_eth_link link, old;
1636         int status;
1637         unsigned int rep_cnt = MAX_REPEAT_TIME;
1638         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1639
1640         memset(&link, 0, sizeof(link));
1641         memset(&old, 0, sizeof(old));
1642         memset(&link_status, 0, sizeof(link_status));
1643         ice_atomic_read_link_status(dev, &old);
1644
1645         do {
1646                 /* Get link status information from hardware */
1647                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1648                                               &link_status, NULL);
1649                 if (status != ICE_SUCCESS) {
1650                         link.link_speed = ETH_SPEED_NUM_100M;
1651                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1652                         PMD_DRV_LOG(ERR, "Failed to get link info");
1653                         goto out;
1654                 }
1655
1656                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1657                 if (!wait_to_complete || link.link_status)
1658                         break;
1659
1660                 rte_delay_ms(CHECK_INTERVAL);
1661         } while (--rep_cnt);
1662
1663         if (!link.link_status)
1664                 goto out;
1665
1666         /* Full-duplex operation at all supported speeds */
1667         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1668
1669         /* Parse the link status */
1670         switch (link_status.link_speed) {
1671         case ICE_AQ_LINK_SPEED_10MB:
1672                 link.link_speed = ETH_SPEED_NUM_10M;
1673                 break;
1674         case ICE_AQ_LINK_SPEED_100MB:
1675                 link.link_speed = ETH_SPEED_NUM_100M;
1676                 break;
1677         case ICE_AQ_LINK_SPEED_1000MB:
1678                 link.link_speed = ETH_SPEED_NUM_1G;
1679                 break;
1680         case ICE_AQ_LINK_SPEED_2500MB:
1681                 link.link_speed = ETH_SPEED_NUM_2_5G;
1682                 break;
1683         case ICE_AQ_LINK_SPEED_5GB:
1684                 link.link_speed = ETH_SPEED_NUM_5G;
1685                 break;
1686         case ICE_AQ_LINK_SPEED_10GB:
1687                 link.link_speed = ETH_SPEED_NUM_10G;
1688                 break;
1689         case ICE_AQ_LINK_SPEED_20GB:
1690                 link.link_speed = ETH_SPEED_NUM_20G;
1691                 break;
1692         case ICE_AQ_LINK_SPEED_25GB:
1693                 link.link_speed = ETH_SPEED_NUM_25G;
1694                 break;
1695         case ICE_AQ_LINK_SPEED_40GB:
1696                 link.link_speed = ETH_SPEED_NUM_40G;
1697                 break;
1698         case ICE_AQ_LINK_SPEED_UNKNOWN:
1699         default:
1700                 PMD_DRV_LOG(ERR, "Unknown link speed");
1701                 link.link_speed = ETH_SPEED_NUM_NONE;
1702                 break;
1703         }
1704
1705         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1706                               ETH_LINK_SPEED_FIXED);
1707
1708 out:
1709         ice_atomic_write_link_status(dev, &link);
1710         if (link.link_status == old.link_status)
1711                 return -1;
1712
1713         return 0;
1714 }
1715
1716 static int
1717 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1718 {
1719         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1720         struct rte_eth_dev_data *dev_data = pf->dev_data;
1721         uint32_t frame_size = mtu + ETHER_HDR_LEN
1722                               + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE;
1723
1724         /* check if mtu is within the allowed range */
1725         if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
1726                 return -EINVAL;
1727
1728         /* mtu setting is forbidden if port is start */
1729         if (dev_data->dev_started) {
1730                 PMD_DRV_LOG(ERR,
1731                             "port %d must be stopped before configuration",
1732                             dev_data->port_id);
1733                 return -EBUSY;
1734         }
1735
1736         if (frame_size > ETHER_MAX_LEN)
1737                 dev_data->dev_conf.rxmode.offloads |=
1738                         DEV_RX_OFFLOAD_JUMBO_FRAME;
1739         else
1740                 dev_data->dev_conf.rxmode.offloads &=
1741                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1742
1743         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1744
1745         return 0;
1746 }
1747
1748 static int ice_macaddr_set(struct rte_eth_dev *dev,
1749                            struct ether_addr *mac_addr)
1750 {
1751         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1752         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1753         struct ice_vsi *vsi = pf->main_vsi;
1754         struct ice_mac_filter *f;
1755         uint8_t flags = 0;
1756         int ret;
1757
1758         if (!is_valid_assigned_ether_addr(mac_addr)) {
1759                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
1760                 return -EINVAL;
1761         }
1762
1763         TAILQ_FOREACH(f, &vsi->mac_list, next) {
1764                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
1765                         break;
1766         }
1767
1768         if (!f) {
1769                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
1770                 return -EIO;
1771         }
1772
1773         ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
1774         if (ret != ICE_SUCCESS) {
1775                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
1776                 return -EIO;
1777         }
1778         ret = ice_add_mac_filter(vsi, mac_addr);
1779         if (ret != ICE_SUCCESS) {
1780                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
1781                 return -EIO;
1782         }
1783         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
1784
1785         flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
1786         ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
1787         if (ret != ICE_SUCCESS)
1788                 PMD_DRV_LOG(ERR, "Failed to set manage mac");
1789
1790         return 0;
1791 }
1792
1793 /* Add a MAC address, and update filters */
1794 static int
1795 ice_macaddr_add(struct rte_eth_dev *dev,
1796                 struct ether_addr *mac_addr,
1797                 __rte_unused uint32_t index,
1798                 __rte_unused uint32_t pool)
1799 {
1800         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1801         struct ice_vsi *vsi = pf->main_vsi;
1802         int ret;
1803
1804         ret = ice_add_mac_filter(vsi, mac_addr);
1805         if (ret != ICE_SUCCESS) {
1806                 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
1807                 return -EINVAL;
1808         }
1809
1810         return ICE_SUCCESS;
1811 }
1812
1813 /* Remove a MAC address, and update filters */
1814 static void
1815 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1816 {
1817         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1818         struct ice_vsi *vsi = pf->main_vsi;
1819         struct rte_eth_dev_data *data = dev->data;
1820         struct ether_addr *macaddr;
1821         int ret;
1822
1823         macaddr = &data->mac_addrs[index];
1824         ret = ice_remove_mac_filter(vsi, macaddr);
1825         if (ret) {
1826                 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
1827                 return;
1828         }
1829 }
1830
1831 static int
1832 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1833 {
1834         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1835         struct ice_vsi *vsi = pf->main_vsi;
1836         int ret;
1837
1838         PMD_INIT_FUNC_TRACE();
1839
1840         if (on) {
1841                 ret = ice_add_vlan_filter(vsi, vlan_id);
1842                 if (ret < 0) {
1843                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
1844                         return -EINVAL;
1845                 }
1846         } else {
1847                 ret = ice_remove_vlan_filter(vsi, vlan_id);
1848                 if (ret < 0) {
1849                         PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
1850                         return -EINVAL;
1851                 }
1852         }
1853
1854         return 0;
1855 }
1856
1857 /* Configure vlan filter on or off */
1858 static int
1859 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
1860 {
1861         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1862         struct ice_vsi_ctx ctxt;
1863         uint8_t sec_flags, sw_flags2;
1864         int ret = 0;
1865
1866         sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
1867                     ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
1868         sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1869
1870         if (on) {
1871                 vsi->info.sec_flags |= sec_flags;
1872                 vsi->info.sw_flags2 |= sw_flags2;
1873         } else {
1874                 vsi->info.sec_flags &= ~sec_flags;
1875                 vsi->info.sw_flags2 &= ~sw_flags2;
1876         }
1877         vsi->info.sw_id = hw->port_info->sw_id;
1878         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1879         ctxt.info.valid_sections =
1880                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
1881                                  ICE_AQ_VSI_PROP_SECURITY_VALID);
1882         ctxt.vsi_num = vsi->vsi_id;
1883
1884         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1885         if (ret) {
1886                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
1887                             on ? "enable" : "disable");
1888                 ret = -EINVAL;
1889         } else {
1890                 vsi->info.valid_sections |=
1891                         rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
1892                                          ICE_AQ_VSI_PROP_SECURITY_VALID);
1893         }
1894
1895         return ret;
1896 }
1897
1898 static int
1899 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
1900 {
1901         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1902         struct ice_vsi_ctx ctxt;
1903         uint8_t vlan_flags;
1904         int ret = 0;
1905
1906         /* Check if it has been already on or off */
1907         if (vsi->info.valid_sections &
1908                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
1909                 if (on) {
1910                         if ((vsi->info.vlan_flags &
1911                              ICE_AQ_VSI_VLAN_EMOD_M) ==
1912                             ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
1913                                 return 0; /* already on */
1914                 } else {
1915                         if ((vsi->info.vlan_flags &
1916                              ICE_AQ_VSI_VLAN_EMOD_M) ==
1917                             ICE_AQ_VSI_VLAN_EMOD_NOTHING)
1918                                 return 0; /* already off */
1919                 }
1920         }
1921
1922         if (on)
1923                 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
1924         else
1925                 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1926         vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
1927         vsi->info.vlan_flags |= vlan_flags;
1928         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1929         ctxt.info.valid_sections =
1930                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
1931         ctxt.vsi_num = vsi->vsi_id;
1932         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1933         if (ret) {
1934                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
1935                             on ? "enable" : "disable");
1936                 return -EINVAL;
1937         }
1938
1939         vsi->info.valid_sections |=
1940                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
1941
1942         return ret;
1943 }
1944
1945 static int
1946 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1947 {
1948         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1949         struct ice_vsi *vsi = pf->main_vsi;
1950         struct rte_eth_rxmode *rxmode;
1951
1952         rxmode = &dev->data->dev_conf.rxmode;
1953         if (mask & ETH_VLAN_FILTER_MASK) {
1954                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1955                         ice_vsi_config_vlan_filter(vsi, TRUE);
1956                 else
1957                         ice_vsi_config_vlan_filter(vsi, FALSE);
1958         }
1959
1960         if (mask & ETH_VLAN_STRIP_MASK) {
1961                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1962                         ice_vsi_config_vlan_stripping(vsi, TRUE);
1963                 else
1964                         ice_vsi_config_vlan_stripping(vsi, FALSE);
1965         }
1966
1967         if (mask & ETH_VLAN_EXTEND_MASK) {
1968                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1969                         ice_vsi_config_double_vlan(vsi, TRUE);
1970                 else
1971                         ice_vsi_config_double_vlan(vsi, FALSE);
1972         }
1973
1974         return 0;
1975 }
1976
1977 static int
1978 ice_vlan_tpid_set(struct rte_eth_dev *dev,
1979                   enum rte_vlan_type vlan_type,
1980                   uint16_t tpid)
1981 {
1982         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1983         uint64_t reg_r = 0, reg_w = 0;
1984         uint16_t reg_id = 0;
1985         int ret = 0;
1986         int qinq = dev->data->dev_conf.rxmode.offloads &
1987                    DEV_RX_OFFLOAD_VLAN_EXTEND;
1988
1989         switch (vlan_type) {
1990         case ETH_VLAN_TYPE_OUTER:
1991                 if (qinq)
1992                         reg_id = 3;
1993                 else
1994                         reg_id = 5;
1995         break;
1996         case ETH_VLAN_TYPE_INNER:
1997                 if (qinq) {
1998                         reg_id = 5;
1999                 } else {
2000                         PMD_DRV_LOG(ERR,
2001                                     "Unsupported vlan type in single vlan.");
2002                         return -EINVAL;
2003                 }
2004                 break;
2005         default:
2006                 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2007                 return -EINVAL;
2008         }
2009         reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2010         PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2011                     "0x%08"PRIx64"", reg_id, reg_r);
2012
2013         reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2014         reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2015         if (reg_r == reg_w) {
2016                 PMD_DRV_LOG(DEBUG, "No need to write");
2017                 return 0;
2018         }
2019
2020         ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2021         PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2022                     "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2023
2024         return ret;
2025 }
2026
2027 static int
2028 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2029 {
2030         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2031         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2032         int ret;
2033
2034         if (!lut)
2035                 return -EINVAL;
2036
2037         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2038                 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2039                                          lut, lut_size);
2040                 if (ret) {
2041                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2042                         return -EINVAL;
2043                 }
2044         } else {
2045                 uint64_t *lut_dw = (uint64_t *)lut;
2046                 uint16_t i, lut_size_dw = lut_size / 4;
2047
2048                 for (i = 0; i < lut_size_dw; i++)
2049                         lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2050         }
2051
2052         return 0;
2053 }
2054
2055 static int
2056 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2057 {
2058         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2059         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2060         int ret;
2061
2062         if (!vsi || !lut)
2063                 return -EINVAL;
2064
2065         if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2066                 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2067                                          lut, lut_size);
2068                 if (ret) {
2069                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2070                         return -EINVAL;
2071                 }
2072         } else {
2073                 uint64_t *lut_dw = (uint64_t *)lut;
2074                 uint16_t i, lut_size_dw = lut_size / 4;
2075
2076                 for (i = 0; i < lut_size_dw; i++)
2077                         ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2078
2079                 ice_flush(hw);
2080         }
2081
2082         return 0;
2083 }
2084
2085 static int
2086 ice_rss_reta_update(struct rte_eth_dev *dev,
2087                     struct rte_eth_rss_reta_entry64 *reta_conf,
2088                     uint16_t reta_size)
2089 {
2090         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2091         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2092         uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2093         uint16_t idx, shift;
2094         uint8_t *lut;
2095         int ret;
2096
2097         if (reta_size != lut_size ||
2098             reta_size > ETH_RSS_RETA_SIZE_512) {
2099                 PMD_DRV_LOG(ERR,
2100                             "The size of hash lookup table configured (%d)"
2101                             "doesn't match the number hardware can "
2102                             "supported (%d)",
2103                             reta_size, lut_size);
2104                 return -EINVAL;
2105         }
2106
2107         lut = rte_zmalloc(NULL, reta_size, 0);
2108         if (!lut) {
2109                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2110                 return -ENOMEM;
2111         }
2112         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2113         if (ret)
2114                 goto out;
2115
2116         for (i = 0; i < reta_size; i++) {
2117                 idx = i / RTE_RETA_GROUP_SIZE;
2118                 shift = i % RTE_RETA_GROUP_SIZE;
2119                 if (reta_conf[idx].mask & (1ULL << shift))
2120                         lut[i] = reta_conf[idx].reta[shift];
2121         }
2122         ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2123
2124 out:
2125         rte_free(lut);
2126
2127         return ret;
2128 }
2129
2130 static int
2131 ice_rss_reta_query(struct rte_eth_dev *dev,
2132                    struct rte_eth_rss_reta_entry64 *reta_conf,
2133                    uint16_t reta_size)
2134 {
2135         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2136         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2137         uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2138         uint16_t idx, shift;
2139         uint8_t *lut;
2140         int ret;
2141
2142         if (reta_size != lut_size ||
2143             reta_size > ETH_RSS_RETA_SIZE_512) {
2144                 PMD_DRV_LOG(ERR,
2145                             "The size of hash lookup table configured (%d)"
2146                             "doesn't match the number hardware can "
2147                             "supported (%d)",
2148                             reta_size, lut_size);
2149                 return -EINVAL;
2150         }
2151
2152         lut = rte_zmalloc(NULL, reta_size, 0);
2153         if (!lut) {
2154                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2155                 return -ENOMEM;
2156         }
2157
2158         ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2159         if (ret)
2160                 goto out;
2161
2162         for (i = 0; i < reta_size; i++) {
2163                 idx = i / RTE_RETA_GROUP_SIZE;
2164                 shift = i % RTE_RETA_GROUP_SIZE;
2165                 if (reta_conf[idx].mask & (1ULL << shift))
2166                         reta_conf[idx].reta[shift] = lut[i];
2167         }
2168
2169 out:
2170         rte_free(lut);
2171
2172         return ret;
2173 }
2174
2175 static int
2176 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2177 {
2178         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2179         int ret = 0;
2180
2181         if (!key || key_len == 0) {
2182                 PMD_DRV_LOG(DEBUG, "No key to be configured");
2183                 return 0;
2184         } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2185                    sizeof(uint32_t)) {
2186                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2187                 return -EINVAL;
2188         }
2189
2190         struct ice_aqc_get_set_rss_keys *key_dw =
2191                 (struct ice_aqc_get_set_rss_keys *)key;
2192
2193         ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2194         if (ret) {
2195                 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2196                 ret = -EINVAL;
2197         }
2198
2199         return ret;
2200 }
2201
2202 static int
2203 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2204 {
2205         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2206         int ret;
2207
2208         if (!key || !key_len)
2209                 return -EINVAL;
2210
2211         ret = ice_aq_get_rss_key
2212                 (hw, vsi->idx,
2213                  (struct ice_aqc_get_set_rss_keys *)key);
2214         if (ret) {
2215                 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2216                 return -EINVAL;
2217         }
2218         *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2219
2220         return 0;
2221 }
2222
2223 static int
2224 ice_rss_hash_update(struct rte_eth_dev *dev,
2225                     struct rte_eth_rss_conf *rss_conf)
2226 {
2227         enum ice_status status = ICE_SUCCESS;
2228         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2229         struct ice_vsi *vsi = pf->main_vsi;
2230
2231         /* set hash key */
2232         status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2233         if (status)
2234                 return status;
2235
2236         /* TODO: hash enable config, ice_add_rss_cfg */
2237         return 0;
2238 }
2239
2240 static int
2241 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2242                       struct rte_eth_rss_conf *rss_conf)
2243 {
2244         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2245         struct ice_vsi *vsi = pf->main_vsi;
2246
2247         ice_get_rss_key(vsi, rss_conf->rss_key,
2248                         &rss_conf->rss_key_len);
2249
2250         /* TODO: default set to 0 as hf config is not supported now */
2251         rss_conf->rss_hf = 0;
2252         return 0;
2253 }
2254
2255 static int
2256 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
2257 {
2258         struct ice_hw *hw;
2259         struct ice_vsi_ctx ctxt;
2260         uint8_t vlan_flags = 0;
2261         int ret;
2262
2263         if (!vsi || !info) {
2264                 PMD_DRV_LOG(ERR, "invalid parameters");
2265                 return -EINVAL;
2266         }
2267
2268         if (info->on) {
2269                 vsi->info.pvid = info->config.pvid;
2270                 /**
2271                  * If insert pvid is enabled, only tagged pkts are
2272                  * allowed to be sent out.
2273                  */
2274                 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
2275                              ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2276         } else {
2277                 vsi->info.pvid = 0;
2278                 if (info->config.reject.tagged == 0)
2279                         vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
2280
2281                 if (info->config.reject.untagged == 0)
2282                         vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2283         }
2284         vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
2285                                   ICE_AQ_VSI_VLAN_MODE_M);
2286         vsi->info.vlan_flags |= vlan_flags;
2287         memset(&ctxt, 0, sizeof(ctxt));
2288         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2289         ctxt.info.valid_sections =
2290                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2291         ctxt.vsi_num = vsi->vsi_id;
2292
2293         hw = ICE_VSI_TO_HW(vsi);
2294         ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2295         if (ret != ICE_SUCCESS) {
2296                 PMD_DRV_LOG(ERR,
2297                             "update VSI for VLAN insert failed, err %d",
2298                             ret);
2299                 return -EINVAL;
2300         }
2301
2302         vsi->info.valid_sections |=
2303                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2304
2305         return ret;
2306 }
2307
2308 static int
2309 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2310 {
2311         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2312         struct ice_vsi *vsi = pf->main_vsi;
2313         struct rte_eth_dev_data *data = pf->dev_data;
2314         struct ice_vsi_vlan_pvid_info info;
2315         int ret;
2316
2317         memset(&info, 0, sizeof(info));
2318         info.on = on;
2319         if (info.on) {
2320                 info.config.pvid = pvid;
2321         } else {
2322                 info.config.reject.tagged =
2323                         data->dev_conf.txmode.hw_vlan_reject_tagged;
2324                 info.config.reject.untagged =
2325                         data->dev_conf.txmode.hw_vlan_reject_untagged;
2326         }
2327
2328         ret = ice_vsi_vlan_pvid_set(vsi, &info);
2329         if (ret < 0) {
2330                 PMD_DRV_LOG(ERR, "Failed to set pvid.");
2331                 return -EINVAL;
2332         }
2333
2334         return 0;
2335 }
2336
2337 static int
2338 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2339               struct rte_pci_device *pci_dev)
2340 {
2341         return rte_eth_dev_pci_generic_probe(pci_dev,
2342                                              sizeof(struct ice_adapter),
2343                                              ice_dev_init);
2344 }
2345
2346 static int
2347 ice_pci_remove(struct rte_pci_device *pci_dev)
2348 {
2349         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
2350 }
2351
2352 static struct rte_pci_driver rte_ice_pmd = {
2353         .id_table = pci_id_ice_map,
2354         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
2355                      RTE_PCI_DRV_IOVA_AS_VA,
2356         .probe = ice_pci_probe,
2357         .remove = ice_pci_remove,
2358 };
2359
2360 /**
2361  * Driver initialization routine.
2362  * Invoked once at EAL init time.
2363  * Register itself as the [Poll Mode] Driver of PCI devices.
2364  */
2365 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
2366 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
2367 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
2368 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
2369                               ICE_MAX_QP_NUM "=<int>");
2370
2371 RTE_INIT(ice_init_log)
2372 {
2373         ice_logtype_init = rte_log_register("pmd.net.ice.init");
2374         if (ice_logtype_init >= 0)
2375                 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
2376         ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
2377         if (ice_logtype_driver >= 0)
2378                 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
2379 }