net/ice: support basic Rx/Tx
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
13
14 int ice_logtype_init;
15 int ice_logtype_driver;
16
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23                              struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25                            int wait_to_complete);
26
27 static const struct rte_pci_id pci_id_ice_map[] = {
28         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
29         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
30         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
31         { .vendor_id = 0, /* sentinel */ },
32 };
33
34 static const struct eth_dev_ops ice_eth_dev_ops = {
35         .dev_configure                = ice_dev_configure,
36         .dev_start                    = ice_dev_start,
37         .dev_stop                     = ice_dev_stop,
38         .dev_close                    = ice_dev_close,
39         .dev_reset                    = ice_dev_reset,
40         .rx_queue_start               = ice_rx_queue_start,
41         .rx_queue_stop                = ice_rx_queue_stop,
42         .tx_queue_start               = ice_tx_queue_start,
43         .tx_queue_stop                = ice_tx_queue_stop,
44         .rx_queue_setup               = ice_rx_queue_setup,
45         .rx_queue_release             = ice_rx_queue_release,
46         .tx_queue_setup               = ice_tx_queue_setup,
47         .tx_queue_release             = ice_tx_queue_release,
48         .dev_infos_get                = ice_dev_info_get,
49         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
50         .link_update                  = ice_link_update,
51         .rxq_info_get                 = ice_rxq_info_get,
52         .txq_info_get                 = ice_txq_info_get,
53         .rx_queue_count               = ice_rx_queue_count,
54 };
55
56 static void
57 ice_init_controlq_parameter(struct ice_hw *hw)
58 {
59         /* fields for adminq */
60         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
61         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
62         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
63         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
64
65         /* fields for mailboxq, DPDK used as PF host */
66         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
67         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
68         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
69         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
70 }
71
72 static int
73 ice_check_qp_num(const char *key, const char *qp_value,
74                  __rte_unused void *opaque)
75 {
76         char *end = NULL;
77         int num = 0;
78
79         while (isblank(*qp_value))
80                 qp_value++;
81
82         num = strtoul(qp_value, &end, 10);
83
84         if (!num || (*end == '-') || errno) {
85                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
86                             "value must be > 0",
87                             qp_value, key);
88                 return -1;
89         }
90
91         return num;
92 }
93
94 static int
95 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
96 {
97         struct rte_kvargs *kvlist;
98         const char *queue_num_key = ICE_MAX_QP_NUM;
99         int ret;
100
101         if (!devargs)
102                 return 0;
103
104         kvlist = rte_kvargs_parse(devargs->args, NULL);
105         if (!kvlist)
106                 return 0;
107
108         if (!rte_kvargs_count(kvlist, queue_num_key)) {
109                 rte_kvargs_free(kvlist);
110                 return 0;
111         }
112
113         if (rte_kvargs_process(kvlist, queue_num_key,
114                                ice_check_qp_num, NULL) < 0) {
115                 rte_kvargs_free(kvlist);
116                 return 0;
117         }
118         ret = rte_kvargs_process(kvlist, queue_num_key,
119                                  ice_check_qp_num, NULL);
120         rte_kvargs_free(kvlist);
121
122         return ret;
123 }
124
125 static int
126 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
127                   uint32_t num)
128 {
129         struct pool_entry *entry;
130
131         if (!pool || !num)
132                 return -EINVAL;
133
134         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
135         if (!entry) {
136                 PMD_INIT_LOG(ERR,
137                              "Failed to allocate memory for resource pool");
138                 return -ENOMEM;
139         }
140
141         /* queue heap initialize */
142         pool->num_free = num;
143         pool->num_alloc = 0;
144         pool->base = base;
145         LIST_INIT(&pool->alloc_list);
146         LIST_INIT(&pool->free_list);
147
148         /* Initialize element  */
149         entry->base = 0;
150         entry->len = num;
151
152         LIST_INSERT_HEAD(&pool->free_list, entry, next);
153         return 0;
154 }
155
156 static int
157 ice_res_pool_alloc(struct ice_res_pool_info *pool,
158                    uint16_t num)
159 {
160         struct pool_entry *entry, *valid_entry;
161
162         if (!pool || !num) {
163                 PMD_INIT_LOG(ERR, "Invalid parameter");
164                 return -EINVAL;
165         }
166
167         if (pool->num_free < num) {
168                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
169                              num, pool->num_free);
170                 return -ENOMEM;
171         }
172
173         valid_entry = NULL;
174         /* Lookup  in free list and find most fit one */
175         LIST_FOREACH(entry, &pool->free_list, next) {
176                 if (entry->len >= num) {
177                         /* Find best one */
178                         if (entry->len == num) {
179                                 valid_entry = entry;
180                                 break;
181                         }
182                         if (!valid_entry ||
183                             valid_entry->len > entry->len)
184                                 valid_entry = entry;
185                 }
186         }
187
188         /* Not find one to satisfy the request, return */
189         if (!valid_entry) {
190                 PMD_INIT_LOG(ERR, "No valid entry found");
191                 return -ENOMEM;
192         }
193         /**
194          * The entry have equal queue number as requested,
195          * remove it from alloc_list.
196          */
197         if (valid_entry->len == num) {
198                 LIST_REMOVE(valid_entry, next);
199         } else {
200                 /**
201                  * The entry have more numbers than requested,
202                  * create a new entry for alloc_list and minus its
203                  * queue base and number in free_list.
204                  */
205                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
206                 if (!entry) {
207                         PMD_INIT_LOG(ERR,
208                                      "Failed to allocate memory for "
209                                      "resource pool");
210                         return -ENOMEM;
211                 }
212                 entry->base = valid_entry->base;
213                 entry->len = num;
214                 valid_entry->base += num;
215                 valid_entry->len -= num;
216                 valid_entry = entry;
217         }
218
219         /* Insert it into alloc list, not sorted */
220         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
221
222         pool->num_free -= valid_entry->len;
223         pool->num_alloc += valid_entry->len;
224
225         return valid_entry->base + pool->base;
226 }
227
228 static void
229 ice_res_pool_destroy(struct ice_res_pool_info *pool)
230 {
231         struct pool_entry *entry, *next_entry;
232
233         if (!pool)
234                 return;
235
236         for (entry = LIST_FIRST(&pool->alloc_list);
237              entry && (next_entry = LIST_NEXT(entry, next), 1);
238              entry = next_entry) {
239                 LIST_REMOVE(entry, next);
240                 rte_free(entry);
241         }
242
243         for (entry = LIST_FIRST(&pool->free_list);
244              entry && (next_entry = LIST_NEXT(entry, next), 1);
245              entry = next_entry) {
246                 LIST_REMOVE(entry, next);
247                 rte_free(entry);
248         }
249
250         pool->num_free = 0;
251         pool->num_alloc = 0;
252         pool->base = 0;
253         LIST_INIT(&pool->alloc_list);
254         LIST_INIT(&pool->free_list);
255 }
256
257 static void
258 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
259 {
260         /* Set VSI LUT selection */
261         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
262                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
263         /* Set Hash scheme */
264         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
265                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
266         /* enable TC */
267         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
268 }
269
270 static enum ice_status
271 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
272                                 struct ice_aqc_vsi_props *info,
273                                 uint8_t enabled_tcmap)
274 {
275         uint16_t bsf, qp_idx;
276
277         /* default tc 0 now. Multi-TC supporting need to be done later.
278          * Configure TC and queue mapping parameters, for enabled TC,
279          * allocate qpnum_per_tc queues to this traffic.
280          */
281         if (enabled_tcmap != 0x01) {
282                 PMD_INIT_LOG(ERR, "only TC0 is supported");
283                 return -ENOTSUP;
284         }
285
286         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
287         bsf = rte_bsf32(vsi->nb_qps);
288         /* Adjust the queue number to actual queues that can be applied */
289         vsi->nb_qps = 0x1 << bsf;
290
291         qp_idx = 0;
292         /* Set tc and queue mapping with VSI */
293         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
294                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
295                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
296
297         /* Associate queue number with VSI */
298         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
299         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
300         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
301         info->valid_sections |=
302                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
303         /* Set the info.ingress_table and info.egress_table
304          * for UP translate table. Now just set it to 1:1 map by default
305          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
306          */
307 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
308         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
309         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
310         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
311         return 0;
312 }
313
314 static int
315 ice_init_mac_address(struct rte_eth_dev *dev)
316 {
317         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
318
319         if (!is_unicast_ether_addr
320                 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
321                 PMD_INIT_LOG(ERR, "Invalid MAC address");
322                 return -EINVAL;
323         }
324
325         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
326                         (struct ether_addr *)hw->port_info[0].mac.perm_addr);
327
328         dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
329         if (!dev->data->mac_addrs) {
330                 PMD_INIT_LOG(ERR,
331                              "Failed to allocate memory to store mac address");
332                 return -ENOMEM;
333         }
334         /* store it to dev data */
335         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
336                         &dev->data->mac_addrs[0]);
337         return 0;
338 }
339
340 /* Enable IRQ0 */
341 static void
342 ice_pf_enable_irq0(struct ice_hw *hw)
343 {
344         /* reset the registers */
345         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
346         ICE_READ_REG(hw, PFINT_OICR);
347
348 #ifdef ICE_LSE_SPT
349         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
350                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
351                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
352
353         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
354                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
355                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
356                        PFINT_OICR_CTL_ITR_INDX_M) |
357                       PFINT_OICR_CTL_CAUSE_ENA_M);
358
359         ICE_WRITE_REG(hw, PFINT_FW_CTL,
360                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
361                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
362                        PFINT_FW_CTL_ITR_INDX_M) |
363                       PFINT_FW_CTL_CAUSE_ENA_M);
364 #else
365         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
366 #endif
367
368         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
369                       GLINT_DYN_CTL_INTENA_M |
370                       GLINT_DYN_CTL_CLEARPBA_M |
371                       GLINT_DYN_CTL_ITR_INDX_M);
372
373         ice_flush(hw);
374 }
375
376 /* Disable IRQ0 */
377 static void
378 ice_pf_disable_irq0(struct ice_hw *hw)
379 {
380         /* Disable all interrupt types */
381         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
382         ice_flush(hw);
383 }
384
385 #ifdef ICE_LSE_SPT
386 static void
387 ice_handle_aq_msg(struct rte_eth_dev *dev)
388 {
389         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
390         struct ice_ctl_q_info *cq = &hw->adminq;
391         struct ice_rq_event_info event;
392         uint16_t pending, opcode;
393         int ret;
394
395         event.buf_len = ICE_AQ_MAX_BUF_LEN;
396         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
397         if (!event.msg_buf) {
398                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
399                 return;
400         }
401
402         pending = 1;
403         while (pending) {
404                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
405
406                 if (ret != ICE_SUCCESS) {
407                         PMD_DRV_LOG(INFO,
408                                     "Failed to read msg from AdminQ, "
409                                     "adminq_err: %u",
410                                     hw->adminq.sq_last_status);
411                         break;
412                 }
413                 opcode = rte_le_to_cpu_16(event.desc.opcode);
414
415                 switch (opcode) {
416                 case ice_aqc_opc_get_link_status:
417                         ret = ice_link_update(dev, 0);
418                         if (!ret)
419                                 _rte_eth_dev_callback_process
420                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
421                         break;
422                 default:
423                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
424                                     opcode);
425                         break;
426                 }
427         }
428         rte_free(event.msg_buf);
429 }
430 #endif
431
432 /**
433  * Interrupt handler triggered by NIC for handling
434  * specific interrupt.
435  *
436  * @param handle
437  *  Pointer to interrupt handle.
438  * @param param
439  *  The address of parameter (struct rte_eth_dev *) regsitered before.
440  *
441  * @return
442  *  void
443  */
444 static void
445 ice_interrupt_handler(void *param)
446 {
447         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
448         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
449         uint32_t oicr;
450         uint32_t reg;
451         uint8_t pf_num;
452         uint8_t event;
453         uint16_t queue;
454 #ifdef ICE_LSE_SPT
455         uint32_t int_fw_ctl;
456 #endif
457
458         /* Disable interrupt */
459         ice_pf_disable_irq0(hw);
460
461         /* read out interrupt causes */
462         oicr = ICE_READ_REG(hw, PFINT_OICR);
463 #ifdef ICE_LSE_SPT
464         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
465 #endif
466
467         /* No interrupt event indicated */
468         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
469                 PMD_DRV_LOG(INFO, "No interrupt event");
470                 goto done;
471         }
472
473 #ifdef ICE_LSE_SPT
474         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
475                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
476                 ice_handle_aq_msg(dev);
477         }
478 #else
479         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
480                 PMD_DRV_LOG(INFO, "OICR: link state change event");
481                 ice_link_update(dev, 0);
482         }
483 #endif
484
485         if (oicr & PFINT_OICR_MAL_DETECT_M) {
486                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
487                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
488                 if (reg & GL_MDET_TX_PQM_VALID_M) {
489                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
490                                  GL_MDET_TX_PQM_PF_NUM_S;
491                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
492                                 GL_MDET_TX_PQM_MAL_TYPE_S;
493                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
494                                 GL_MDET_TX_PQM_QNUM_S;
495
496                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
497                                     "%d by PQM on TX queue %d PF# %d",
498                                     event, queue, pf_num);
499                 }
500
501                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
502                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
503                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
504                                  GL_MDET_TX_TCLAN_PF_NUM_S;
505                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
506                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
507                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
508                                 GL_MDET_TX_TCLAN_QNUM_S;
509
510                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
511                                     "%d by TCLAN on TX queue %d PF# %d",
512                                     event, queue, pf_num);
513                 }
514         }
515 done:
516         /* Enable interrupt */
517         ice_pf_enable_irq0(hw);
518         rte_intr_enable(dev->intr_handle);
519 }
520
521 /*  Initialize SW parameters of PF */
522 static int
523 ice_pf_sw_init(struct rte_eth_dev *dev)
524 {
525         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
526         struct ice_hw *hw = ICE_PF_TO_HW(pf);
527
528         if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
529                 pf->lan_nb_qp_max =
530                         ice_config_max_queue_pair_num(dev->device->devargs);
531         else
532                 pf->lan_nb_qp_max =
533                         (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
534                                           hw->func_caps.common_cap.num_rxq);
535
536         pf->lan_nb_qps = pf->lan_nb_qp_max;
537
538         return 0;
539 }
540
541 static struct ice_vsi *
542 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
543 {
544         struct ice_hw *hw = ICE_PF_TO_HW(pf);
545         struct ice_vsi *vsi = NULL;
546         struct ice_vsi_ctx vsi_ctx;
547         int ret;
548         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
549         uint8_t tc_bitmap = 0x1;
550
551         /* hw->num_lports = 1 in NIC mode */
552         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
553         if (!vsi)
554                 return NULL;
555
556         vsi->idx = pf->next_vsi_idx;
557         pf->next_vsi_idx++;
558         vsi->type = type;
559         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
560         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
561         vsi->vlan_anti_spoof_on = 0;
562         vsi->vlan_filter_on = 1;
563         TAILQ_INIT(&vsi->mac_list);
564         TAILQ_INIT(&vsi->vlan_list);
565
566         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
567         /* base_queue in used in queue mapping of VSI add/update command.
568          * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
569          * cases in the first stage. Only Main VSI.
570          */
571         vsi->base_queue = 0;
572         switch (type) {
573         case ICE_VSI_PF:
574                 vsi->nb_qps = pf->lan_nb_qps;
575                 ice_vsi_config_default_rss(&vsi_ctx.info);
576                 vsi_ctx.alloc_from_pool = true;
577                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
578                 /* switch_id is queried by get_switch_config aq, which is done
579                  * by ice_init_hw
580                  */
581                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
582                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
583                 /* Allow all untagged or tagged packets */
584                 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
585                 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
586                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
587                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
588                 /* Enable VLAN/UP trip */
589                 ret = ice_vsi_config_tc_queue_mapping(vsi,
590                                                       &vsi_ctx.info,
591                                                       ICE_DEFAULT_TCMAP);
592                 if (ret) {
593                         PMD_INIT_LOG(ERR,
594                                      "tc queue mapping with vsi failed, "
595                                      "err = %d",
596                                      ret);
597                         goto fail_mem;
598                 }
599
600                 break;
601         default:
602                 /* for other types of VSI */
603                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
604                 goto fail_mem;
605         }
606
607         /* VF has MSIX interrupt in VF range, don't allocate here */
608         if (type == ICE_VSI_PF) {
609                 ret = ice_res_pool_alloc(&pf->msix_pool,
610                                          RTE_MIN(vsi->nb_qps,
611                                                  RTE_MAX_RXTX_INTR_VEC_ID));
612                 if (ret < 0) {
613                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
614                                      vsi->vsi_id, ret);
615                 }
616                 vsi->msix_intr = ret;
617                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
618         } else {
619                 vsi->msix_intr = 0;
620                 vsi->nb_msix = 0;
621         }
622         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
623         if (ret != ICE_SUCCESS) {
624                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
625                 goto fail_mem;
626         }
627         /* store vsi information is SW structure */
628         vsi->vsi_id = vsi_ctx.vsi_num;
629         vsi->info = vsi_ctx.info;
630         pf->vsis_allocated = vsi_ctx.vsis_allocd;
631         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
632
633         /* At the beginning, only TC0. */
634         /* What we need here is the maximam number of the TX queues.
635          * Currently vsi->nb_qps means it.
636          * Correct it if any change.
637          */
638         max_txqs[0] = vsi->nb_qps;
639         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
640                               tc_bitmap, max_txqs);
641         if (ret != ICE_SUCCESS)
642                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
643
644         return vsi;
645 fail_mem:
646         rte_free(vsi);
647         pf->next_vsi_idx--;
648         return NULL;
649 }
650
651 static int
652 ice_pf_setup(struct ice_pf *pf)
653 {
654         struct ice_vsi *vsi;
655
656         /* Clear all stats counters */
657         pf->offset_loaded = FALSE;
658         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
659         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
660         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
661         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
662
663         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
664         if (!vsi) {
665                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
666                 return -EINVAL;
667         }
668
669         pf->main_vsi = vsi;
670
671         return 0;
672 }
673
674 static int
675 ice_dev_init(struct rte_eth_dev *dev)
676 {
677         struct rte_pci_device *pci_dev;
678         struct rte_intr_handle *intr_handle;
679         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
681         int ret;
682
683         dev->dev_ops = &ice_eth_dev_ops;
684         dev->rx_pkt_burst = ice_recv_pkts;
685         dev->tx_pkt_burst = ice_xmit_pkts;
686         dev->tx_pkt_prepare = ice_prep_pkts;
687
688         ice_set_default_ptype_table(dev);
689         pci_dev = RTE_DEV_TO_PCI(dev->device);
690         intr_handle = &pci_dev->intr_handle;
691
692         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
693         pf->adapter->eth_dev = dev;
694         pf->dev_data = dev->data;
695         hw->back = pf->adapter;
696         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
697         hw->vendor_id = pci_dev->id.vendor_id;
698         hw->device_id = pci_dev->id.device_id;
699         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
700         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
701         hw->bus.device = pci_dev->addr.devid;
702         hw->bus.func = pci_dev->addr.function;
703
704         ice_init_controlq_parameter(hw);
705
706         ret = ice_init_hw(hw);
707         if (ret) {
708                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
709                 return -EINVAL;
710         }
711
712         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
713                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
714                      hw->api_maj_ver, hw->api_min_ver);
715
716         ice_pf_sw_init(dev);
717         ret = ice_init_mac_address(dev);
718         if (ret) {
719                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
720                 goto err_init_mac;
721         }
722
723         ret = ice_res_pool_init(&pf->msix_pool, 1,
724                                 hw->func_caps.common_cap.num_msix_vectors - 1);
725         if (ret) {
726                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
727                 goto err_msix_pool_init;
728         }
729
730         ret = ice_pf_setup(pf);
731         if (ret) {
732                 PMD_INIT_LOG(ERR, "Failed to setup PF");
733                 goto err_pf_setup;
734         }
735
736         /* register callback func to eal lib */
737         rte_intr_callback_register(intr_handle,
738                                    ice_interrupt_handler, dev);
739
740         ice_pf_enable_irq0(hw);
741
742         /* enable uio intr after callback register */
743         rte_intr_enable(intr_handle);
744
745         return 0;
746
747 err_pf_setup:
748         ice_res_pool_destroy(&pf->msix_pool);
749 err_msix_pool_init:
750         rte_free(dev->data->mac_addrs);
751 err_init_mac:
752         ice_sched_cleanup_all(hw);
753         rte_free(hw->port_info);
754         ice_shutdown_all_ctrlq(hw);
755
756         return ret;
757 }
758
759 static int
760 ice_release_vsi(struct ice_vsi *vsi)
761 {
762         struct ice_hw *hw;
763         struct ice_vsi_ctx vsi_ctx;
764         enum ice_status ret;
765
766         if (!vsi)
767                 return 0;
768
769         hw = ICE_VSI_TO_HW(vsi);
770
771         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
772
773         vsi_ctx.vsi_num = vsi->vsi_id;
774         vsi_ctx.info = vsi->info;
775         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
776         if (ret != ICE_SUCCESS) {
777                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
778                 rte_free(vsi);
779                 return -1;
780         }
781
782         rte_free(vsi);
783         return 0;
784 }
785
786 static void
787 ice_dev_stop(struct rte_eth_dev *dev)
788 {
789         struct rte_eth_dev_data *data = dev->data;
790         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
791         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
792         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
793         uint16_t i;
794
795         /* avoid stopping again */
796         if (pf->adapter_stopped)
797                 return;
798
799         /* stop and clear all Rx queues */
800         for (i = 0; i < data->nb_rx_queues; i++)
801                 ice_rx_queue_stop(dev, i);
802
803         /* stop and clear all Tx queues */
804         for (i = 0; i < data->nb_tx_queues; i++)
805                 ice_tx_queue_stop(dev, i);
806
807         /* Clear all queues and release mbufs */
808         ice_clear_queues(dev);
809
810         /* Clean datapath event and queue/vec mapping */
811         rte_intr_efd_disable(intr_handle);
812         if (intr_handle->intr_vec) {
813                 rte_free(intr_handle->intr_vec);
814                 intr_handle->intr_vec = NULL;
815         }
816
817         pf->adapter_stopped = true;
818 }
819
820 static void
821 ice_dev_close(struct rte_eth_dev *dev)
822 {
823         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
824         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
825
826         ice_dev_stop(dev);
827
828         /* release all queue resource */
829         ice_free_queues(dev);
830
831         ice_res_pool_destroy(&pf->msix_pool);
832         ice_release_vsi(pf->main_vsi);
833
834         ice_shutdown_all_ctrlq(hw);
835 }
836
837 static int
838 ice_dev_uninit(struct rte_eth_dev *dev)
839 {
840         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
841         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
842         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
843         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
844
845         ice_dev_close(dev);
846
847         dev->dev_ops = NULL;
848         dev->rx_pkt_burst = NULL;
849         dev->tx_pkt_burst = NULL;
850
851         rte_free(dev->data->mac_addrs);
852         dev->data->mac_addrs = NULL;
853
854         /* disable uio intr before callback unregister */
855         rte_intr_disable(intr_handle);
856
857         /* register callback func to eal lib */
858         rte_intr_callback_unregister(intr_handle,
859                                      ice_interrupt_handler, dev);
860
861         ice_release_vsi(pf->main_vsi);
862         ice_sched_cleanup_all(hw);
863         rte_free(hw->port_info);
864         ice_shutdown_all_ctrlq(hw);
865
866         return 0;
867 }
868
869 static int
870 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
871 {
872         struct ice_adapter *ad =
873                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
874
875         /* Initialize to TRUE. If any of Rx queues doesn't meet the
876          * bulk allocation or vector Rx preconditions we will reset it.
877          */
878         ad->rx_bulk_alloc_allowed = true;
879         ad->tx_simple_allowed = true;
880
881         return 0;
882 }
883
884 static int ice_init_rss(struct ice_pf *pf)
885 {
886         struct ice_hw *hw = ICE_PF_TO_HW(pf);
887         struct ice_vsi *vsi = pf->main_vsi;
888         struct rte_eth_dev *dev = pf->adapter->eth_dev;
889         struct rte_eth_rss_conf *rss_conf;
890         struct ice_aqc_get_set_rss_keys key;
891         uint16_t i, nb_q;
892         int ret = 0;
893
894         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
895         nb_q = dev->data->nb_rx_queues;
896         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
897         vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
898
899         if (!vsi->rss_key)
900                 vsi->rss_key = rte_zmalloc(NULL,
901                                            vsi->rss_key_size, 0);
902         if (!vsi->rss_lut)
903                 vsi->rss_lut = rte_zmalloc(NULL,
904                                            vsi->rss_lut_size, 0);
905
906         /* configure RSS key */
907         if (!rss_conf->rss_key) {
908                 /* Calculate the default hash key */
909                 for (i = 0; i <= vsi->rss_key_size; i++)
910                         vsi->rss_key[i] = (uint8_t)rte_rand();
911         } else {
912                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
913                            RTE_MIN(rss_conf->rss_key_len,
914                                    vsi->rss_key_size));
915         }
916         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
917         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
918         if (ret)
919                 return -EINVAL;
920
921         /* init RSS LUT table */
922         for (i = 0; i < vsi->rss_lut_size; i++)
923                 vsi->rss_lut[i] = i % nb_q;
924
925         ret = ice_aq_set_rss_lut(hw, vsi->idx,
926                                  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
927                                  vsi->rss_lut, vsi->rss_lut_size);
928         if (ret)
929                 return -EINVAL;
930
931         return 0;
932 }
933
934 static int
935 ice_dev_start(struct rte_eth_dev *dev)
936 {
937         struct rte_eth_dev_data *data = dev->data;
938         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
939         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
940         uint16_t nb_rxq = 0;
941         uint16_t nb_txq, i;
942         int ret;
943
944         /* program Tx queues' context in hardware */
945         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
946                 ret = ice_tx_queue_start(dev, nb_txq);
947                 if (ret) {
948                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
949                         goto tx_err;
950                 }
951         }
952
953         /* program Rx queues' context in hardware*/
954         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
955                 ret = ice_rx_queue_start(dev, nb_rxq);
956                 if (ret) {
957                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
958                         goto rx_err;
959                 }
960         }
961
962         ret = ice_init_rss(pf);
963         if (ret) {
964                 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
965                 goto rx_err;
966         }
967
968         ice_set_rx_function(dev);
969
970         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
971                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
972                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
973                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
974                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
975                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
976                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
977                                      NULL);
978         if (ret != ICE_SUCCESS)
979                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
980
981         /* Call get_link_info aq commond to enable/disable LSE */
982         ice_link_update(dev, 0);
983
984         pf->adapter_stopped = false;
985
986         return 0;
987
988         /* stop the started queues if failed to start all queues */
989 rx_err:
990         for (i = 0; i < nb_rxq; i++)
991                 ice_rx_queue_stop(dev, i);
992 tx_err:
993         for (i = 0; i < nb_txq; i++)
994                 ice_tx_queue_stop(dev, i);
995
996         return -EIO;
997 }
998
999 static int
1000 ice_dev_reset(struct rte_eth_dev *dev)
1001 {
1002         int ret;
1003
1004         if (dev->data->sriov.active)
1005                 return -ENOTSUP;
1006
1007         ret = ice_dev_uninit(dev);
1008         if (ret) {
1009                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1010                 return -ENXIO;
1011         }
1012
1013         ret = ice_dev_init(dev);
1014         if (ret) {
1015                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1016                 return -ENXIO;
1017         }
1018
1019         return 0;
1020 }
1021
1022 static void
1023 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1024 {
1025         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1026         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1027         struct ice_vsi *vsi = pf->main_vsi;
1028         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1029
1030         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1031         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1032         dev_info->max_rx_queues = vsi->nb_qps;
1033         dev_info->max_tx_queues = vsi->nb_qps;
1034         dev_info->max_mac_addrs = vsi->max_macaddrs;
1035         dev_info->max_vfs = pci_dev->max_vfs;
1036
1037         dev_info->rx_offload_capa =
1038                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1039                 DEV_RX_OFFLOAD_UDP_CKSUM |
1040                 DEV_RX_OFFLOAD_TCP_CKSUM |
1041                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1042                 DEV_RX_OFFLOAD_KEEP_CRC;
1043         dev_info->tx_offload_capa =
1044                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1045                 DEV_TX_OFFLOAD_UDP_CKSUM |
1046                 DEV_TX_OFFLOAD_TCP_CKSUM |
1047                 DEV_TX_OFFLOAD_SCTP_CKSUM |
1048                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1049                 DEV_TX_OFFLOAD_TCP_TSO |
1050                 DEV_TX_OFFLOAD_MULTI_SEGS;
1051         dev_info->rx_queue_offload_capa = 0;
1052         dev_info->tx_queue_offload_capa = 0;
1053
1054         dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1055         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1056
1057         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1058                 .rx_thresh = {
1059                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
1060                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
1061                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
1062                 },
1063                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1064                 .rx_drop_en = 0,
1065                 .offloads = 0,
1066         };
1067
1068         dev_info->default_txconf = (struct rte_eth_txconf) {
1069                 .tx_thresh = {
1070                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
1071                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
1072                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
1073                 },
1074                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1075                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1076                 .offloads = 0,
1077         };
1078
1079         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1080                 .nb_max = ICE_MAX_RING_DESC,
1081                 .nb_min = ICE_MIN_RING_DESC,
1082                 .nb_align = ICE_ALIGN_RING_DESC,
1083         };
1084
1085         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1086                 .nb_max = ICE_MAX_RING_DESC,
1087                 .nb_min = ICE_MIN_RING_DESC,
1088                 .nb_align = ICE_ALIGN_RING_DESC,
1089         };
1090
1091         dev_info->speed_capa = ETH_LINK_SPEED_10M |
1092                                ETH_LINK_SPEED_100M |
1093                                ETH_LINK_SPEED_1G |
1094                                ETH_LINK_SPEED_2_5G |
1095                                ETH_LINK_SPEED_5G |
1096                                ETH_LINK_SPEED_10G |
1097                                ETH_LINK_SPEED_20G |
1098                                ETH_LINK_SPEED_25G |
1099                                ETH_LINK_SPEED_40G;
1100
1101         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1102         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1103
1104         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1105         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1106         dev_info->default_rxportconf.nb_queues = 1;
1107         dev_info->default_txportconf.nb_queues = 1;
1108         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1109         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1110 }
1111
1112 static inline int
1113 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1114                             struct rte_eth_link *link)
1115 {
1116         struct rte_eth_link *dst = link;
1117         struct rte_eth_link *src = &dev->data->dev_link;
1118
1119         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1120                                 *(uint64_t *)src) == 0)
1121                 return -1;
1122
1123         return 0;
1124 }
1125
1126 static inline int
1127 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1128                              struct rte_eth_link *link)
1129 {
1130         struct rte_eth_link *dst = &dev->data->dev_link;
1131         struct rte_eth_link *src = link;
1132
1133         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1134                                 *(uint64_t *)src) == 0)
1135                 return -1;
1136
1137         return 0;
1138 }
1139
1140 static int
1141 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1142 {
1143 #define CHECK_INTERVAL 100  /* 100ms */
1144 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1145         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1146         struct ice_link_status link_status;
1147         struct rte_eth_link link, old;
1148         int status;
1149         unsigned int rep_cnt = MAX_REPEAT_TIME;
1150         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1151
1152         memset(&link, 0, sizeof(link));
1153         memset(&old, 0, sizeof(old));
1154         memset(&link_status, 0, sizeof(link_status));
1155         ice_atomic_read_link_status(dev, &old);
1156
1157         do {
1158                 /* Get link status information from hardware */
1159                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1160                                               &link_status, NULL);
1161                 if (status != ICE_SUCCESS) {
1162                         link.link_speed = ETH_SPEED_NUM_100M;
1163                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1164                         PMD_DRV_LOG(ERR, "Failed to get link info");
1165                         goto out;
1166                 }
1167
1168                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1169                 if (!wait_to_complete || link.link_status)
1170                         break;
1171
1172                 rte_delay_ms(CHECK_INTERVAL);
1173         } while (--rep_cnt);
1174
1175         if (!link.link_status)
1176                 goto out;
1177
1178         /* Full-duplex operation at all supported speeds */
1179         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1180
1181         /* Parse the link status */
1182         switch (link_status.link_speed) {
1183         case ICE_AQ_LINK_SPEED_10MB:
1184                 link.link_speed = ETH_SPEED_NUM_10M;
1185                 break;
1186         case ICE_AQ_LINK_SPEED_100MB:
1187                 link.link_speed = ETH_SPEED_NUM_100M;
1188                 break;
1189         case ICE_AQ_LINK_SPEED_1000MB:
1190                 link.link_speed = ETH_SPEED_NUM_1G;
1191                 break;
1192         case ICE_AQ_LINK_SPEED_2500MB:
1193                 link.link_speed = ETH_SPEED_NUM_2_5G;
1194                 break;
1195         case ICE_AQ_LINK_SPEED_5GB:
1196                 link.link_speed = ETH_SPEED_NUM_5G;
1197                 break;
1198         case ICE_AQ_LINK_SPEED_10GB:
1199                 link.link_speed = ETH_SPEED_NUM_10G;
1200                 break;
1201         case ICE_AQ_LINK_SPEED_20GB:
1202                 link.link_speed = ETH_SPEED_NUM_20G;
1203                 break;
1204         case ICE_AQ_LINK_SPEED_25GB:
1205                 link.link_speed = ETH_SPEED_NUM_25G;
1206                 break;
1207         case ICE_AQ_LINK_SPEED_40GB:
1208                 link.link_speed = ETH_SPEED_NUM_40G;
1209                 break;
1210         case ICE_AQ_LINK_SPEED_UNKNOWN:
1211         default:
1212                 PMD_DRV_LOG(ERR, "Unknown link speed");
1213                 link.link_speed = ETH_SPEED_NUM_NONE;
1214                 break;
1215         }
1216
1217         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1218                               ETH_LINK_SPEED_FIXED);
1219
1220 out:
1221         ice_atomic_write_link_status(dev, &link);
1222         if (link.link_status == old.link_status)
1223                 return -1;
1224
1225         return 0;
1226 }
1227
1228 static int
1229 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1230               struct rte_pci_device *pci_dev)
1231 {
1232         return rte_eth_dev_pci_generic_probe(pci_dev,
1233                                              sizeof(struct ice_adapter),
1234                                              ice_dev_init);
1235 }
1236
1237 static int
1238 ice_pci_remove(struct rte_pci_device *pci_dev)
1239 {
1240         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
1241 }
1242
1243 static struct rte_pci_driver rte_ice_pmd = {
1244         .id_table = pci_id_ice_map,
1245         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1246                      RTE_PCI_DRV_IOVA_AS_VA,
1247         .probe = ice_pci_probe,
1248         .remove = ice_pci_remove,
1249 };
1250
1251 /**
1252  * Driver initialization routine.
1253  * Invoked once at EAL init time.
1254  * Register itself as the [Poll Mode] Driver of PCI devices.
1255  */
1256 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
1257 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
1258 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
1259 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
1260                               ICE_MAX_QP_NUM "=<int>");
1261
1262 RTE_INIT(ice_init_log)
1263 {
1264         ice_logtype_init = rte_log_register("pmd.net.ice.init");
1265         if (ice_logtype_init >= 0)
1266                 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
1267         ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
1268         if (ice_logtype_driver >= 0)
1269                 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
1270 }