net/ice: support link update
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
13
14 int ice_logtype_init;
15 int ice_logtype_driver;
16
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23                              struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25                            int wait_to_complete);
26
27 static const struct rte_pci_id pci_id_ice_map[] = {
28         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
29         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
30         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
31         { .vendor_id = 0, /* sentinel */ },
32 };
33
34 static const struct eth_dev_ops ice_eth_dev_ops = {
35         .dev_configure                = ice_dev_configure,
36         .dev_start                    = ice_dev_start,
37         .dev_stop                     = ice_dev_stop,
38         .dev_close                    = ice_dev_close,
39         .dev_reset                    = ice_dev_reset,
40         .rx_queue_start               = ice_rx_queue_start,
41         .rx_queue_stop                = ice_rx_queue_stop,
42         .tx_queue_start               = ice_tx_queue_start,
43         .tx_queue_stop                = ice_tx_queue_stop,
44         .rx_queue_setup               = ice_rx_queue_setup,
45         .rx_queue_release             = ice_rx_queue_release,
46         .tx_queue_setup               = ice_tx_queue_setup,
47         .tx_queue_release             = ice_tx_queue_release,
48         .dev_infos_get                = ice_dev_info_get,
49         .link_update                  = ice_link_update,
50 };
51
52 static void
53 ice_init_controlq_parameter(struct ice_hw *hw)
54 {
55         /* fields for adminq */
56         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
57         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
58         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
59         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
60
61         /* fields for mailboxq, DPDK used as PF host */
62         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
63         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
64         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
65         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
66 }
67
68 static int
69 ice_check_qp_num(const char *key, const char *qp_value,
70                  __rte_unused void *opaque)
71 {
72         char *end = NULL;
73         int num = 0;
74
75         while (isblank(*qp_value))
76                 qp_value++;
77
78         num = strtoul(qp_value, &end, 10);
79
80         if (!num || (*end == '-') || errno) {
81                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
82                             "value must be > 0",
83                             qp_value, key);
84                 return -1;
85         }
86
87         return num;
88 }
89
90 static int
91 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
92 {
93         struct rte_kvargs *kvlist;
94         const char *queue_num_key = ICE_MAX_QP_NUM;
95         int ret;
96
97         if (!devargs)
98                 return 0;
99
100         kvlist = rte_kvargs_parse(devargs->args, NULL);
101         if (!kvlist)
102                 return 0;
103
104         if (!rte_kvargs_count(kvlist, queue_num_key)) {
105                 rte_kvargs_free(kvlist);
106                 return 0;
107         }
108
109         if (rte_kvargs_process(kvlist, queue_num_key,
110                                ice_check_qp_num, NULL) < 0) {
111                 rte_kvargs_free(kvlist);
112                 return 0;
113         }
114         ret = rte_kvargs_process(kvlist, queue_num_key,
115                                  ice_check_qp_num, NULL);
116         rte_kvargs_free(kvlist);
117
118         return ret;
119 }
120
121 static int
122 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
123                   uint32_t num)
124 {
125         struct pool_entry *entry;
126
127         if (!pool || !num)
128                 return -EINVAL;
129
130         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
131         if (!entry) {
132                 PMD_INIT_LOG(ERR,
133                              "Failed to allocate memory for resource pool");
134                 return -ENOMEM;
135         }
136
137         /* queue heap initialize */
138         pool->num_free = num;
139         pool->num_alloc = 0;
140         pool->base = base;
141         LIST_INIT(&pool->alloc_list);
142         LIST_INIT(&pool->free_list);
143
144         /* Initialize element  */
145         entry->base = 0;
146         entry->len = num;
147
148         LIST_INSERT_HEAD(&pool->free_list, entry, next);
149         return 0;
150 }
151
152 static int
153 ice_res_pool_alloc(struct ice_res_pool_info *pool,
154                    uint16_t num)
155 {
156         struct pool_entry *entry, *valid_entry;
157
158         if (!pool || !num) {
159                 PMD_INIT_LOG(ERR, "Invalid parameter");
160                 return -EINVAL;
161         }
162
163         if (pool->num_free < num) {
164                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
165                              num, pool->num_free);
166                 return -ENOMEM;
167         }
168
169         valid_entry = NULL;
170         /* Lookup  in free list and find most fit one */
171         LIST_FOREACH(entry, &pool->free_list, next) {
172                 if (entry->len >= num) {
173                         /* Find best one */
174                         if (entry->len == num) {
175                                 valid_entry = entry;
176                                 break;
177                         }
178                         if (!valid_entry ||
179                             valid_entry->len > entry->len)
180                                 valid_entry = entry;
181                 }
182         }
183
184         /* Not find one to satisfy the request, return */
185         if (!valid_entry) {
186                 PMD_INIT_LOG(ERR, "No valid entry found");
187                 return -ENOMEM;
188         }
189         /**
190          * The entry have equal queue number as requested,
191          * remove it from alloc_list.
192          */
193         if (valid_entry->len == num) {
194                 LIST_REMOVE(valid_entry, next);
195         } else {
196                 /**
197                  * The entry have more numbers than requested,
198                  * create a new entry for alloc_list and minus its
199                  * queue base and number in free_list.
200                  */
201                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
202                 if (!entry) {
203                         PMD_INIT_LOG(ERR,
204                                      "Failed to allocate memory for "
205                                      "resource pool");
206                         return -ENOMEM;
207                 }
208                 entry->base = valid_entry->base;
209                 entry->len = num;
210                 valid_entry->base += num;
211                 valid_entry->len -= num;
212                 valid_entry = entry;
213         }
214
215         /* Insert it into alloc list, not sorted */
216         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
217
218         pool->num_free -= valid_entry->len;
219         pool->num_alloc += valid_entry->len;
220
221         return valid_entry->base + pool->base;
222 }
223
224 static void
225 ice_res_pool_destroy(struct ice_res_pool_info *pool)
226 {
227         struct pool_entry *entry, *next_entry;
228
229         if (!pool)
230                 return;
231
232         for (entry = LIST_FIRST(&pool->alloc_list);
233              entry && (next_entry = LIST_NEXT(entry, next), 1);
234              entry = next_entry) {
235                 LIST_REMOVE(entry, next);
236                 rte_free(entry);
237         }
238
239         for (entry = LIST_FIRST(&pool->free_list);
240              entry && (next_entry = LIST_NEXT(entry, next), 1);
241              entry = next_entry) {
242                 LIST_REMOVE(entry, next);
243                 rte_free(entry);
244         }
245
246         pool->num_free = 0;
247         pool->num_alloc = 0;
248         pool->base = 0;
249         LIST_INIT(&pool->alloc_list);
250         LIST_INIT(&pool->free_list);
251 }
252
253 static void
254 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
255 {
256         /* Set VSI LUT selection */
257         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
258                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
259         /* Set Hash scheme */
260         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
261                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
262         /* enable TC */
263         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
264 }
265
266 static enum ice_status
267 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
268                                 struct ice_aqc_vsi_props *info,
269                                 uint8_t enabled_tcmap)
270 {
271         uint16_t bsf, qp_idx;
272
273         /* default tc 0 now. Multi-TC supporting need to be done later.
274          * Configure TC and queue mapping parameters, for enabled TC,
275          * allocate qpnum_per_tc queues to this traffic.
276          */
277         if (enabled_tcmap != 0x01) {
278                 PMD_INIT_LOG(ERR, "only TC0 is supported");
279                 return -ENOTSUP;
280         }
281
282         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
283         bsf = rte_bsf32(vsi->nb_qps);
284         /* Adjust the queue number to actual queues that can be applied */
285         vsi->nb_qps = 0x1 << bsf;
286
287         qp_idx = 0;
288         /* Set tc and queue mapping with VSI */
289         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
290                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
291                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
292
293         /* Associate queue number with VSI */
294         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
295         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
296         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
297         info->valid_sections |=
298                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
299         /* Set the info.ingress_table and info.egress_table
300          * for UP translate table. Now just set it to 1:1 map by default
301          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
302          */
303 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
304         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
305         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
306         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
307         return 0;
308 }
309
310 static int
311 ice_init_mac_address(struct rte_eth_dev *dev)
312 {
313         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
314
315         if (!is_unicast_ether_addr
316                 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
317                 PMD_INIT_LOG(ERR, "Invalid MAC address");
318                 return -EINVAL;
319         }
320
321         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
322                         (struct ether_addr *)hw->port_info[0].mac.perm_addr);
323
324         dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
325         if (!dev->data->mac_addrs) {
326                 PMD_INIT_LOG(ERR,
327                              "Failed to allocate memory to store mac address");
328                 return -ENOMEM;
329         }
330         /* store it to dev data */
331         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
332                         &dev->data->mac_addrs[0]);
333         return 0;
334 }
335
336 /* Enable IRQ0 */
337 static void
338 ice_pf_enable_irq0(struct ice_hw *hw)
339 {
340         /* reset the registers */
341         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
342         ICE_READ_REG(hw, PFINT_OICR);
343
344 #ifdef ICE_LSE_SPT
345         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
346                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
347                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
348
349         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
350                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
351                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
352                        PFINT_OICR_CTL_ITR_INDX_M) |
353                       PFINT_OICR_CTL_CAUSE_ENA_M);
354
355         ICE_WRITE_REG(hw, PFINT_FW_CTL,
356                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
357                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
358                        PFINT_FW_CTL_ITR_INDX_M) |
359                       PFINT_FW_CTL_CAUSE_ENA_M);
360 #else
361         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
362 #endif
363
364         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
365                       GLINT_DYN_CTL_INTENA_M |
366                       GLINT_DYN_CTL_CLEARPBA_M |
367                       GLINT_DYN_CTL_ITR_INDX_M);
368
369         ice_flush(hw);
370 }
371
372 /* Disable IRQ0 */
373 static void
374 ice_pf_disable_irq0(struct ice_hw *hw)
375 {
376         /* Disable all interrupt types */
377         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
378         ice_flush(hw);
379 }
380
381 #ifdef ICE_LSE_SPT
382 static void
383 ice_handle_aq_msg(struct rte_eth_dev *dev)
384 {
385         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
386         struct ice_ctl_q_info *cq = &hw->adminq;
387         struct ice_rq_event_info event;
388         uint16_t pending, opcode;
389         int ret;
390
391         event.buf_len = ICE_AQ_MAX_BUF_LEN;
392         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
393         if (!event.msg_buf) {
394                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
395                 return;
396         }
397
398         pending = 1;
399         while (pending) {
400                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
401
402                 if (ret != ICE_SUCCESS) {
403                         PMD_DRV_LOG(INFO,
404                                     "Failed to read msg from AdminQ, "
405                                     "adminq_err: %u",
406                                     hw->adminq.sq_last_status);
407                         break;
408                 }
409                 opcode = rte_le_to_cpu_16(event.desc.opcode);
410
411                 switch (opcode) {
412                 case ice_aqc_opc_get_link_status:
413                         ret = ice_link_update(dev, 0);
414                         if (!ret)
415                                 _rte_eth_dev_callback_process
416                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
417                         break;
418                 default:
419                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
420                                     opcode);
421                         break;
422                 }
423         }
424         rte_free(event.msg_buf);
425 }
426 #endif
427
428 /**
429  * Interrupt handler triggered by NIC for handling
430  * specific interrupt.
431  *
432  * @param handle
433  *  Pointer to interrupt handle.
434  * @param param
435  *  The address of parameter (struct rte_eth_dev *) regsitered before.
436  *
437  * @return
438  *  void
439  */
440 static void
441 ice_interrupt_handler(void *param)
442 {
443         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
444         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
445         uint32_t oicr;
446         uint32_t reg;
447         uint8_t pf_num;
448         uint8_t event;
449         uint16_t queue;
450 #ifdef ICE_LSE_SPT
451         uint32_t int_fw_ctl;
452 #endif
453
454         /* Disable interrupt */
455         ice_pf_disable_irq0(hw);
456
457         /* read out interrupt causes */
458         oicr = ICE_READ_REG(hw, PFINT_OICR);
459 #ifdef ICE_LSE_SPT
460         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
461 #endif
462
463         /* No interrupt event indicated */
464         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
465                 PMD_DRV_LOG(INFO, "No interrupt event");
466                 goto done;
467         }
468
469 #ifdef ICE_LSE_SPT
470         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
471                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
472                 ice_handle_aq_msg(dev);
473         }
474 #else
475         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
476                 PMD_DRV_LOG(INFO, "OICR: link state change event");
477                 ice_link_update(dev, 0);
478         }
479 #endif
480
481         if (oicr & PFINT_OICR_MAL_DETECT_M) {
482                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
483                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
484                 if (reg & GL_MDET_TX_PQM_VALID_M) {
485                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
486                                  GL_MDET_TX_PQM_PF_NUM_S;
487                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
488                                 GL_MDET_TX_PQM_MAL_TYPE_S;
489                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
490                                 GL_MDET_TX_PQM_QNUM_S;
491
492                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
493                                     "%d by PQM on TX queue %d PF# %d",
494                                     event, queue, pf_num);
495                 }
496
497                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
498                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
499                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
500                                  GL_MDET_TX_TCLAN_PF_NUM_S;
501                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
502                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
503                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
504                                 GL_MDET_TX_TCLAN_QNUM_S;
505
506                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
507                                     "%d by TCLAN on TX queue %d PF# %d",
508                                     event, queue, pf_num);
509                 }
510         }
511 done:
512         /* Enable interrupt */
513         ice_pf_enable_irq0(hw);
514         rte_intr_enable(dev->intr_handle);
515 }
516
517 /*  Initialize SW parameters of PF */
518 static int
519 ice_pf_sw_init(struct rte_eth_dev *dev)
520 {
521         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
522         struct ice_hw *hw = ICE_PF_TO_HW(pf);
523
524         if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
525                 pf->lan_nb_qp_max =
526                         ice_config_max_queue_pair_num(dev->device->devargs);
527         else
528                 pf->lan_nb_qp_max =
529                         (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
530                                           hw->func_caps.common_cap.num_rxq);
531
532         pf->lan_nb_qps = pf->lan_nb_qp_max;
533
534         return 0;
535 }
536
537 static struct ice_vsi *
538 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
539 {
540         struct ice_hw *hw = ICE_PF_TO_HW(pf);
541         struct ice_vsi *vsi = NULL;
542         struct ice_vsi_ctx vsi_ctx;
543         int ret;
544         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
545         uint8_t tc_bitmap = 0x1;
546
547         /* hw->num_lports = 1 in NIC mode */
548         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
549         if (!vsi)
550                 return NULL;
551
552         vsi->idx = pf->next_vsi_idx;
553         pf->next_vsi_idx++;
554         vsi->type = type;
555         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
556         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
557         vsi->vlan_anti_spoof_on = 0;
558         vsi->vlan_filter_on = 1;
559         TAILQ_INIT(&vsi->mac_list);
560         TAILQ_INIT(&vsi->vlan_list);
561
562         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
563         /* base_queue in used in queue mapping of VSI add/update command.
564          * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
565          * cases in the first stage. Only Main VSI.
566          */
567         vsi->base_queue = 0;
568         switch (type) {
569         case ICE_VSI_PF:
570                 vsi->nb_qps = pf->lan_nb_qps;
571                 ice_vsi_config_default_rss(&vsi_ctx.info);
572                 vsi_ctx.alloc_from_pool = true;
573                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
574                 /* switch_id is queried by get_switch_config aq, which is done
575                  * by ice_init_hw
576                  */
577                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
578                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
579                 /* Allow all untagged or tagged packets */
580                 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
581                 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
582                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
583                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
584                 /* Enable VLAN/UP trip */
585                 ret = ice_vsi_config_tc_queue_mapping(vsi,
586                                                       &vsi_ctx.info,
587                                                       ICE_DEFAULT_TCMAP);
588                 if (ret) {
589                         PMD_INIT_LOG(ERR,
590                                      "tc queue mapping with vsi failed, "
591                                      "err = %d",
592                                      ret);
593                         goto fail_mem;
594                 }
595
596                 break;
597         default:
598                 /* for other types of VSI */
599                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
600                 goto fail_mem;
601         }
602
603         /* VF has MSIX interrupt in VF range, don't allocate here */
604         if (type == ICE_VSI_PF) {
605                 ret = ice_res_pool_alloc(&pf->msix_pool,
606                                          RTE_MIN(vsi->nb_qps,
607                                                  RTE_MAX_RXTX_INTR_VEC_ID));
608                 if (ret < 0) {
609                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
610                                      vsi->vsi_id, ret);
611                 }
612                 vsi->msix_intr = ret;
613                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
614         } else {
615                 vsi->msix_intr = 0;
616                 vsi->nb_msix = 0;
617         }
618         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
619         if (ret != ICE_SUCCESS) {
620                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
621                 goto fail_mem;
622         }
623         /* store vsi information is SW structure */
624         vsi->vsi_id = vsi_ctx.vsi_num;
625         vsi->info = vsi_ctx.info;
626         pf->vsis_allocated = vsi_ctx.vsis_allocd;
627         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
628
629         /* At the beginning, only TC0. */
630         /* What we need here is the maximam number of the TX queues.
631          * Currently vsi->nb_qps means it.
632          * Correct it if any change.
633          */
634         max_txqs[0] = vsi->nb_qps;
635         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
636                               tc_bitmap, max_txqs);
637         if (ret != ICE_SUCCESS)
638                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
639
640         return vsi;
641 fail_mem:
642         rte_free(vsi);
643         pf->next_vsi_idx--;
644         return NULL;
645 }
646
647 static int
648 ice_pf_setup(struct ice_pf *pf)
649 {
650         struct ice_vsi *vsi;
651
652         /* Clear all stats counters */
653         pf->offset_loaded = FALSE;
654         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
655         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
656         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
657         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
658
659         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
660         if (!vsi) {
661                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
662                 return -EINVAL;
663         }
664
665         pf->main_vsi = vsi;
666
667         return 0;
668 }
669
670 static int
671 ice_dev_init(struct rte_eth_dev *dev)
672 {
673         struct rte_pci_device *pci_dev;
674         struct rte_intr_handle *intr_handle;
675         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
676         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
677         int ret;
678
679         dev->dev_ops = &ice_eth_dev_ops;
680
681         pci_dev = RTE_DEV_TO_PCI(dev->device);
682         intr_handle = &pci_dev->intr_handle;
683
684         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
685         pf->adapter->eth_dev = dev;
686         pf->dev_data = dev->data;
687         hw->back = pf->adapter;
688         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
689         hw->vendor_id = pci_dev->id.vendor_id;
690         hw->device_id = pci_dev->id.device_id;
691         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
692         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
693         hw->bus.device = pci_dev->addr.devid;
694         hw->bus.func = pci_dev->addr.function;
695
696         ice_init_controlq_parameter(hw);
697
698         ret = ice_init_hw(hw);
699         if (ret) {
700                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
701                 return -EINVAL;
702         }
703
704         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
705                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
706                      hw->api_maj_ver, hw->api_min_ver);
707
708         ice_pf_sw_init(dev);
709         ret = ice_init_mac_address(dev);
710         if (ret) {
711                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
712                 goto err_init_mac;
713         }
714
715         ret = ice_res_pool_init(&pf->msix_pool, 1,
716                                 hw->func_caps.common_cap.num_msix_vectors - 1);
717         if (ret) {
718                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
719                 goto err_msix_pool_init;
720         }
721
722         ret = ice_pf_setup(pf);
723         if (ret) {
724                 PMD_INIT_LOG(ERR, "Failed to setup PF");
725                 goto err_pf_setup;
726         }
727
728         /* register callback func to eal lib */
729         rte_intr_callback_register(intr_handle,
730                                    ice_interrupt_handler, dev);
731
732         ice_pf_enable_irq0(hw);
733
734         /* enable uio intr after callback register */
735         rte_intr_enable(intr_handle);
736
737         return 0;
738
739 err_pf_setup:
740         ice_res_pool_destroy(&pf->msix_pool);
741 err_msix_pool_init:
742         rte_free(dev->data->mac_addrs);
743 err_init_mac:
744         ice_sched_cleanup_all(hw);
745         rte_free(hw->port_info);
746         ice_shutdown_all_ctrlq(hw);
747
748         return ret;
749 }
750
751 static int
752 ice_release_vsi(struct ice_vsi *vsi)
753 {
754         struct ice_hw *hw;
755         struct ice_vsi_ctx vsi_ctx;
756         enum ice_status ret;
757
758         if (!vsi)
759                 return 0;
760
761         hw = ICE_VSI_TO_HW(vsi);
762
763         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
764
765         vsi_ctx.vsi_num = vsi->vsi_id;
766         vsi_ctx.info = vsi->info;
767         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
768         if (ret != ICE_SUCCESS) {
769                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
770                 rte_free(vsi);
771                 return -1;
772         }
773
774         rte_free(vsi);
775         return 0;
776 }
777
778 static void
779 ice_dev_stop(struct rte_eth_dev *dev)
780 {
781         struct rte_eth_dev_data *data = dev->data;
782         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
783         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
784         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
785         uint16_t i;
786
787         /* avoid stopping again */
788         if (pf->adapter_stopped)
789                 return;
790
791         /* stop and clear all Rx queues */
792         for (i = 0; i < data->nb_rx_queues; i++)
793                 ice_rx_queue_stop(dev, i);
794
795         /* stop and clear all Tx queues */
796         for (i = 0; i < data->nb_tx_queues; i++)
797                 ice_tx_queue_stop(dev, i);
798
799         /* Clear all queues and release mbufs */
800         ice_clear_queues(dev);
801
802         /* Clean datapath event and queue/vec mapping */
803         rte_intr_efd_disable(intr_handle);
804         if (intr_handle->intr_vec) {
805                 rte_free(intr_handle->intr_vec);
806                 intr_handle->intr_vec = NULL;
807         }
808
809         pf->adapter_stopped = true;
810 }
811
812 static void
813 ice_dev_close(struct rte_eth_dev *dev)
814 {
815         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
816         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
817
818         ice_dev_stop(dev);
819
820         /* release all queue resource */
821         ice_free_queues(dev);
822
823         ice_res_pool_destroy(&pf->msix_pool);
824         ice_release_vsi(pf->main_vsi);
825
826         ice_shutdown_all_ctrlq(hw);
827 }
828
829 static int
830 ice_dev_uninit(struct rte_eth_dev *dev)
831 {
832         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
833         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
834         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
835         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
836
837         ice_dev_close(dev);
838
839         dev->dev_ops = NULL;
840         dev->rx_pkt_burst = NULL;
841         dev->tx_pkt_burst = NULL;
842
843         rte_free(dev->data->mac_addrs);
844         dev->data->mac_addrs = NULL;
845
846         /* disable uio intr before callback unregister */
847         rte_intr_disable(intr_handle);
848
849         /* register callback func to eal lib */
850         rte_intr_callback_unregister(intr_handle,
851                                      ice_interrupt_handler, dev);
852
853         ice_release_vsi(pf->main_vsi);
854         ice_sched_cleanup_all(hw);
855         rte_free(hw->port_info);
856         ice_shutdown_all_ctrlq(hw);
857
858         return 0;
859 }
860
861 static int
862 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
863 {
864         struct ice_adapter *ad =
865                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
866
867         /* Initialize to TRUE. If any of Rx queues doesn't meet the
868          * bulk allocation or vector Rx preconditions we will reset it.
869          */
870         ad->rx_bulk_alloc_allowed = true;
871         ad->tx_simple_allowed = true;
872
873         return 0;
874 }
875
876 static int ice_init_rss(struct ice_pf *pf)
877 {
878         struct ice_hw *hw = ICE_PF_TO_HW(pf);
879         struct ice_vsi *vsi = pf->main_vsi;
880         struct rte_eth_dev *dev = pf->adapter->eth_dev;
881         struct rte_eth_rss_conf *rss_conf;
882         struct ice_aqc_get_set_rss_keys key;
883         uint16_t i, nb_q;
884         int ret = 0;
885
886         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
887         nb_q = dev->data->nb_rx_queues;
888         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
889         vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
890
891         if (!vsi->rss_key)
892                 vsi->rss_key = rte_zmalloc(NULL,
893                                            vsi->rss_key_size, 0);
894         if (!vsi->rss_lut)
895                 vsi->rss_lut = rte_zmalloc(NULL,
896                                            vsi->rss_lut_size, 0);
897
898         /* configure RSS key */
899         if (!rss_conf->rss_key) {
900                 /* Calculate the default hash key */
901                 for (i = 0; i <= vsi->rss_key_size; i++)
902                         vsi->rss_key[i] = (uint8_t)rte_rand();
903         } else {
904                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
905                            RTE_MIN(rss_conf->rss_key_len,
906                                    vsi->rss_key_size));
907         }
908         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
909         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
910         if (ret)
911                 return -EINVAL;
912
913         /* init RSS LUT table */
914         for (i = 0; i < vsi->rss_lut_size; i++)
915                 vsi->rss_lut[i] = i % nb_q;
916
917         ret = ice_aq_set_rss_lut(hw, vsi->idx,
918                                  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
919                                  vsi->rss_lut, vsi->rss_lut_size);
920         if (ret)
921                 return -EINVAL;
922
923         return 0;
924 }
925
926 static int
927 ice_dev_start(struct rte_eth_dev *dev)
928 {
929         struct rte_eth_dev_data *data = dev->data;
930         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
931         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
932         uint16_t nb_rxq = 0;
933         uint16_t nb_txq, i;
934         int ret;
935
936         /* program Tx queues' context in hardware */
937         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
938                 ret = ice_tx_queue_start(dev, nb_txq);
939                 if (ret) {
940                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
941                         goto tx_err;
942                 }
943         }
944
945         /* program Rx queues' context in hardware*/
946         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
947                 ret = ice_rx_queue_start(dev, nb_rxq);
948                 if (ret) {
949                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
950                         goto rx_err;
951                 }
952         }
953
954         ret = ice_init_rss(pf);
955         if (ret) {
956                 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
957                 goto rx_err;
958         }
959
960         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
961                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
962                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
963                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
964                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
965                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
966                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
967                                      NULL);
968         if (ret != ICE_SUCCESS)
969                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
970
971         /* Call get_link_info aq commond to enable/disable LSE */
972         ice_link_update(dev, 0);
973
974         pf->adapter_stopped = false;
975
976         return 0;
977
978         /* stop the started queues if failed to start all queues */
979 rx_err:
980         for (i = 0; i < nb_rxq; i++)
981                 ice_rx_queue_stop(dev, i);
982 tx_err:
983         for (i = 0; i < nb_txq; i++)
984                 ice_tx_queue_stop(dev, i);
985
986         return -EIO;
987 }
988
989 static int
990 ice_dev_reset(struct rte_eth_dev *dev)
991 {
992         int ret;
993
994         if (dev->data->sriov.active)
995                 return -ENOTSUP;
996
997         ret = ice_dev_uninit(dev);
998         if (ret) {
999                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1000                 return -ENXIO;
1001         }
1002
1003         ret = ice_dev_init(dev);
1004         if (ret) {
1005                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1006                 return -ENXIO;
1007         }
1008
1009         return 0;
1010 }
1011
1012 static void
1013 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1014 {
1015         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1016         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1017         struct ice_vsi *vsi = pf->main_vsi;
1018         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1019
1020         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1021         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1022         dev_info->max_rx_queues = vsi->nb_qps;
1023         dev_info->max_tx_queues = vsi->nb_qps;
1024         dev_info->max_mac_addrs = vsi->max_macaddrs;
1025         dev_info->max_vfs = pci_dev->max_vfs;
1026
1027         dev_info->rx_offload_capa =
1028                 DEV_RX_OFFLOAD_VLAN_STRIP |
1029                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1030                 DEV_RX_OFFLOAD_UDP_CKSUM |
1031                 DEV_RX_OFFLOAD_TCP_CKSUM |
1032                 DEV_RX_OFFLOAD_QINQ_STRIP |
1033                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1034                 DEV_RX_OFFLOAD_VLAN_EXTEND |
1035                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1036                 DEV_RX_OFFLOAD_KEEP_CRC |
1037                 DEV_RX_OFFLOAD_SCATTER |
1038                 DEV_RX_OFFLOAD_VLAN_FILTER;
1039         dev_info->tx_offload_capa =
1040                 DEV_TX_OFFLOAD_VLAN_INSERT |
1041                 DEV_TX_OFFLOAD_QINQ_INSERT |
1042                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1043                 DEV_TX_OFFLOAD_UDP_CKSUM |
1044                 DEV_TX_OFFLOAD_TCP_CKSUM |
1045                 DEV_TX_OFFLOAD_SCTP_CKSUM |
1046                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1047                 DEV_TX_OFFLOAD_TCP_TSO |
1048                 DEV_TX_OFFLOAD_MULTI_SEGS |
1049                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1050         dev_info->rx_queue_offload_capa = 0;
1051         dev_info->tx_queue_offload_capa = 0;
1052
1053         dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1054         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1055         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
1056
1057         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1058                 .rx_thresh = {
1059                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
1060                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
1061                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
1062                 },
1063                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1064                 .rx_drop_en = 0,
1065                 .offloads = 0,
1066         };
1067
1068         dev_info->default_txconf = (struct rte_eth_txconf) {
1069                 .tx_thresh = {
1070                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
1071                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
1072                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
1073                 },
1074                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1075                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1076                 .offloads = 0,
1077         };
1078
1079         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1080                 .nb_max = ICE_MAX_RING_DESC,
1081                 .nb_min = ICE_MIN_RING_DESC,
1082                 .nb_align = ICE_ALIGN_RING_DESC,
1083         };
1084
1085         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1086                 .nb_max = ICE_MAX_RING_DESC,
1087                 .nb_min = ICE_MIN_RING_DESC,
1088                 .nb_align = ICE_ALIGN_RING_DESC,
1089         };
1090
1091         dev_info->speed_capa = ETH_LINK_SPEED_10M |
1092                                ETH_LINK_SPEED_100M |
1093                                ETH_LINK_SPEED_1G |
1094                                ETH_LINK_SPEED_2_5G |
1095                                ETH_LINK_SPEED_5G |
1096                                ETH_LINK_SPEED_10G |
1097                                ETH_LINK_SPEED_20G |
1098                                ETH_LINK_SPEED_25G |
1099                                ETH_LINK_SPEED_40G;
1100
1101         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1102         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1103
1104         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1105         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1106         dev_info->default_rxportconf.nb_queues = 1;
1107         dev_info->default_txportconf.nb_queues = 1;
1108         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1109         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1110 }
1111
1112 static inline int
1113 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1114                             struct rte_eth_link *link)
1115 {
1116         struct rte_eth_link *dst = link;
1117         struct rte_eth_link *src = &dev->data->dev_link;
1118
1119         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1120                                 *(uint64_t *)src) == 0)
1121                 return -1;
1122
1123         return 0;
1124 }
1125
1126 static inline int
1127 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1128                              struct rte_eth_link *link)
1129 {
1130         struct rte_eth_link *dst = &dev->data->dev_link;
1131         struct rte_eth_link *src = link;
1132
1133         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1134                                 *(uint64_t *)src) == 0)
1135                 return -1;
1136
1137         return 0;
1138 }
1139
1140 static int
1141 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1142 {
1143 #define CHECK_INTERVAL 100  /* 100ms */
1144 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1145         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1146         struct ice_link_status link_status;
1147         struct rte_eth_link link, old;
1148         int status;
1149         unsigned int rep_cnt = MAX_REPEAT_TIME;
1150         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1151
1152         memset(&link, 0, sizeof(link));
1153         memset(&old, 0, sizeof(old));
1154         memset(&link_status, 0, sizeof(link_status));
1155         ice_atomic_read_link_status(dev, &old);
1156
1157         do {
1158                 /* Get link status information from hardware */
1159                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1160                                               &link_status, NULL);
1161                 if (status != ICE_SUCCESS) {
1162                         link.link_speed = ETH_SPEED_NUM_100M;
1163                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1164                         PMD_DRV_LOG(ERR, "Failed to get link info");
1165                         goto out;
1166                 }
1167
1168                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1169                 if (!wait_to_complete || link.link_status)
1170                         break;
1171
1172                 rte_delay_ms(CHECK_INTERVAL);
1173         } while (--rep_cnt);
1174
1175         if (!link.link_status)
1176                 goto out;
1177
1178         /* Full-duplex operation at all supported speeds */
1179         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1180
1181         /* Parse the link status */
1182         switch (link_status.link_speed) {
1183         case ICE_AQ_LINK_SPEED_10MB:
1184                 link.link_speed = ETH_SPEED_NUM_10M;
1185                 break;
1186         case ICE_AQ_LINK_SPEED_100MB:
1187                 link.link_speed = ETH_SPEED_NUM_100M;
1188                 break;
1189         case ICE_AQ_LINK_SPEED_1000MB:
1190                 link.link_speed = ETH_SPEED_NUM_1G;
1191                 break;
1192         case ICE_AQ_LINK_SPEED_2500MB:
1193                 link.link_speed = ETH_SPEED_NUM_2_5G;
1194                 break;
1195         case ICE_AQ_LINK_SPEED_5GB:
1196                 link.link_speed = ETH_SPEED_NUM_5G;
1197                 break;
1198         case ICE_AQ_LINK_SPEED_10GB:
1199                 link.link_speed = ETH_SPEED_NUM_10G;
1200                 break;
1201         case ICE_AQ_LINK_SPEED_20GB:
1202                 link.link_speed = ETH_SPEED_NUM_20G;
1203                 break;
1204         case ICE_AQ_LINK_SPEED_25GB:
1205                 link.link_speed = ETH_SPEED_NUM_25G;
1206                 break;
1207         case ICE_AQ_LINK_SPEED_40GB:
1208                 link.link_speed = ETH_SPEED_NUM_40G;
1209                 break;
1210         case ICE_AQ_LINK_SPEED_UNKNOWN:
1211         default:
1212                 PMD_DRV_LOG(ERR, "Unknown link speed");
1213                 link.link_speed = ETH_SPEED_NUM_NONE;
1214                 break;
1215         }
1216
1217         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1218                               ETH_LINK_SPEED_FIXED);
1219
1220 out:
1221         ice_atomic_write_link_status(dev, &link);
1222         if (link.link_status == old.link_status)
1223                 return -1;
1224
1225         return 0;
1226 }
1227
1228 static int
1229 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1230               struct rte_pci_device *pci_dev)
1231 {
1232         return rte_eth_dev_pci_generic_probe(pci_dev,
1233                                              sizeof(struct ice_adapter),
1234                                              ice_dev_init);
1235 }
1236
1237 static int
1238 ice_pci_remove(struct rte_pci_device *pci_dev)
1239 {
1240         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
1241 }
1242
1243 static struct rte_pci_driver rte_ice_pmd = {
1244         .id_table = pci_id_ice_map,
1245         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1246                      RTE_PCI_DRV_IOVA_AS_VA,
1247         .probe = ice_pci_probe,
1248         .remove = ice_pci_remove,
1249 };
1250
1251 /**
1252  * Driver initialization routine.
1253  * Invoked once at EAL init time.
1254  * Register itself as the [Poll Mode] Driver of PCI devices.
1255  */
1256 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
1257 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
1258 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
1259 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
1260                               ICE_MAX_QP_NUM "=<int>");
1261
1262 RTE_INIT(ice_init_log)
1263 {
1264         ice_logtype_init = rte_log_register("pmd.net.ice.init");
1265         if (ice_logtype_init >= 0)
1266                 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
1267         ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
1268         if (ice_logtype_driver >= 0)
1269                 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
1270 }