net/ice: support MTU setting
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
13
14 int ice_logtype_init;
15 int ice_logtype_driver;
16
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23                              struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25                            int wait_to_complete);
26 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
27
28 static const struct rte_pci_id pci_id_ice_map[] = {
29         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
30         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
31         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
32         { .vendor_id = 0, /* sentinel */ },
33 };
34
35 static const struct eth_dev_ops ice_eth_dev_ops = {
36         .dev_configure                = ice_dev_configure,
37         .dev_start                    = ice_dev_start,
38         .dev_stop                     = ice_dev_stop,
39         .dev_close                    = ice_dev_close,
40         .dev_reset                    = ice_dev_reset,
41         .rx_queue_start               = ice_rx_queue_start,
42         .rx_queue_stop                = ice_rx_queue_stop,
43         .tx_queue_start               = ice_tx_queue_start,
44         .tx_queue_stop                = ice_tx_queue_stop,
45         .rx_queue_setup               = ice_rx_queue_setup,
46         .rx_queue_release             = ice_rx_queue_release,
47         .tx_queue_setup               = ice_tx_queue_setup,
48         .tx_queue_release             = ice_tx_queue_release,
49         .dev_infos_get                = ice_dev_info_get,
50         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
51         .link_update                  = ice_link_update,
52         .mtu_set                      = ice_mtu_set,
53         .rxq_info_get                 = ice_rxq_info_get,
54         .txq_info_get                 = ice_txq_info_get,
55         .rx_queue_count               = ice_rx_queue_count,
56 };
57
58 static void
59 ice_init_controlq_parameter(struct ice_hw *hw)
60 {
61         /* fields for adminq */
62         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
63         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
64         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
65         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
66
67         /* fields for mailboxq, DPDK used as PF host */
68         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
69         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
70         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
71         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
72 }
73
74 static int
75 ice_check_qp_num(const char *key, const char *qp_value,
76                  __rte_unused void *opaque)
77 {
78         char *end = NULL;
79         int num = 0;
80
81         while (isblank(*qp_value))
82                 qp_value++;
83
84         num = strtoul(qp_value, &end, 10);
85
86         if (!num || (*end == '-') || errno) {
87                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
88                             "value must be > 0",
89                             qp_value, key);
90                 return -1;
91         }
92
93         return num;
94 }
95
96 static int
97 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
98 {
99         struct rte_kvargs *kvlist;
100         const char *queue_num_key = ICE_MAX_QP_NUM;
101         int ret;
102
103         if (!devargs)
104                 return 0;
105
106         kvlist = rte_kvargs_parse(devargs->args, NULL);
107         if (!kvlist)
108                 return 0;
109
110         if (!rte_kvargs_count(kvlist, queue_num_key)) {
111                 rte_kvargs_free(kvlist);
112                 return 0;
113         }
114
115         if (rte_kvargs_process(kvlist, queue_num_key,
116                                ice_check_qp_num, NULL) < 0) {
117                 rte_kvargs_free(kvlist);
118                 return 0;
119         }
120         ret = rte_kvargs_process(kvlist, queue_num_key,
121                                  ice_check_qp_num, NULL);
122         rte_kvargs_free(kvlist);
123
124         return ret;
125 }
126
127 static int
128 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
129                   uint32_t num)
130 {
131         struct pool_entry *entry;
132
133         if (!pool || !num)
134                 return -EINVAL;
135
136         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
137         if (!entry) {
138                 PMD_INIT_LOG(ERR,
139                              "Failed to allocate memory for resource pool");
140                 return -ENOMEM;
141         }
142
143         /* queue heap initialize */
144         pool->num_free = num;
145         pool->num_alloc = 0;
146         pool->base = base;
147         LIST_INIT(&pool->alloc_list);
148         LIST_INIT(&pool->free_list);
149
150         /* Initialize element  */
151         entry->base = 0;
152         entry->len = num;
153
154         LIST_INSERT_HEAD(&pool->free_list, entry, next);
155         return 0;
156 }
157
158 static int
159 ice_res_pool_alloc(struct ice_res_pool_info *pool,
160                    uint16_t num)
161 {
162         struct pool_entry *entry, *valid_entry;
163
164         if (!pool || !num) {
165                 PMD_INIT_LOG(ERR, "Invalid parameter");
166                 return -EINVAL;
167         }
168
169         if (pool->num_free < num) {
170                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
171                              num, pool->num_free);
172                 return -ENOMEM;
173         }
174
175         valid_entry = NULL;
176         /* Lookup  in free list and find most fit one */
177         LIST_FOREACH(entry, &pool->free_list, next) {
178                 if (entry->len >= num) {
179                         /* Find best one */
180                         if (entry->len == num) {
181                                 valid_entry = entry;
182                                 break;
183                         }
184                         if (!valid_entry ||
185                             valid_entry->len > entry->len)
186                                 valid_entry = entry;
187                 }
188         }
189
190         /* Not find one to satisfy the request, return */
191         if (!valid_entry) {
192                 PMD_INIT_LOG(ERR, "No valid entry found");
193                 return -ENOMEM;
194         }
195         /**
196          * The entry have equal queue number as requested,
197          * remove it from alloc_list.
198          */
199         if (valid_entry->len == num) {
200                 LIST_REMOVE(valid_entry, next);
201         } else {
202                 /**
203                  * The entry have more numbers than requested,
204                  * create a new entry for alloc_list and minus its
205                  * queue base and number in free_list.
206                  */
207                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
208                 if (!entry) {
209                         PMD_INIT_LOG(ERR,
210                                      "Failed to allocate memory for "
211                                      "resource pool");
212                         return -ENOMEM;
213                 }
214                 entry->base = valid_entry->base;
215                 entry->len = num;
216                 valid_entry->base += num;
217                 valid_entry->len -= num;
218                 valid_entry = entry;
219         }
220
221         /* Insert it into alloc list, not sorted */
222         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
223
224         pool->num_free -= valid_entry->len;
225         pool->num_alloc += valid_entry->len;
226
227         return valid_entry->base + pool->base;
228 }
229
230 static void
231 ice_res_pool_destroy(struct ice_res_pool_info *pool)
232 {
233         struct pool_entry *entry, *next_entry;
234
235         if (!pool)
236                 return;
237
238         for (entry = LIST_FIRST(&pool->alloc_list);
239              entry && (next_entry = LIST_NEXT(entry, next), 1);
240              entry = next_entry) {
241                 LIST_REMOVE(entry, next);
242                 rte_free(entry);
243         }
244
245         for (entry = LIST_FIRST(&pool->free_list);
246              entry && (next_entry = LIST_NEXT(entry, next), 1);
247              entry = next_entry) {
248                 LIST_REMOVE(entry, next);
249                 rte_free(entry);
250         }
251
252         pool->num_free = 0;
253         pool->num_alloc = 0;
254         pool->base = 0;
255         LIST_INIT(&pool->alloc_list);
256         LIST_INIT(&pool->free_list);
257 }
258
259 static void
260 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
261 {
262         /* Set VSI LUT selection */
263         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
264                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
265         /* Set Hash scheme */
266         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
267                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
268         /* enable TC */
269         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
270 }
271
272 static enum ice_status
273 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
274                                 struct ice_aqc_vsi_props *info,
275                                 uint8_t enabled_tcmap)
276 {
277         uint16_t bsf, qp_idx;
278
279         /* default tc 0 now. Multi-TC supporting need to be done later.
280          * Configure TC and queue mapping parameters, for enabled TC,
281          * allocate qpnum_per_tc queues to this traffic.
282          */
283         if (enabled_tcmap != 0x01) {
284                 PMD_INIT_LOG(ERR, "only TC0 is supported");
285                 return -ENOTSUP;
286         }
287
288         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
289         bsf = rte_bsf32(vsi->nb_qps);
290         /* Adjust the queue number to actual queues that can be applied */
291         vsi->nb_qps = 0x1 << bsf;
292
293         qp_idx = 0;
294         /* Set tc and queue mapping with VSI */
295         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
296                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
297                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
298
299         /* Associate queue number with VSI */
300         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
301         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
302         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
303         info->valid_sections |=
304                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
305         /* Set the info.ingress_table and info.egress_table
306          * for UP translate table. Now just set it to 1:1 map by default
307          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
308          */
309 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
310         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
311         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
312         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
313         return 0;
314 }
315
316 static int
317 ice_init_mac_address(struct rte_eth_dev *dev)
318 {
319         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
320
321         if (!is_unicast_ether_addr
322                 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
323                 PMD_INIT_LOG(ERR, "Invalid MAC address");
324                 return -EINVAL;
325         }
326
327         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
328                         (struct ether_addr *)hw->port_info[0].mac.perm_addr);
329
330         dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
331         if (!dev->data->mac_addrs) {
332                 PMD_INIT_LOG(ERR,
333                              "Failed to allocate memory to store mac address");
334                 return -ENOMEM;
335         }
336         /* store it to dev data */
337         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
338                         &dev->data->mac_addrs[0]);
339         return 0;
340 }
341
342 /* Enable IRQ0 */
343 static void
344 ice_pf_enable_irq0(struct ice_hw *hw)
345 {
346         /* reset the registers */
347         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
348         ICE_READ_REG(hw, PFINT_OICR);
349
350 #ifdef ICE_LSE_SPT
351         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
352                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
353                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
354
355         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
356                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
357                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
358                        PFINT_OICR_CTL_ITR_INDX_M) |
359                       PFINT_OICR_CTL_CAUSE_ENA_M);
360
361         ICE_WRITE_REG(hw, PFINT_FW_CTL,
362                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
363                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
364                        PFINT_FW_CTL_ITR_INDX_M) |
365                       PFINT_FW_CTL_CAUSE_ENA_M);
366 #else
367         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
368 #endif
369
370         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
371                       GLINT_DYN_CTL_INTENA_M |
372                       GLINT_DYN_CTL_CLEARPBA_M |
373                       GLINT_DYN_CTL_ITR_INDX_M);
374
375         ice_flush(hw);
376 }
377
378 /* Disable IRQ0 */
379 static void
380 ice_pf_disable_irq0(struct ice_hw *hw)
381 {
382         /* Disable all interrupt types */
383         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
384         ice_flush(hw);
385 }
386
387 #ifdef ICE_LSE_SPT
388 static void
389 ice_handle_aq_msg(struct rte_eth_dev *dev)
390 {
391         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
392         struct ice_ctl_q_info *cq = &hw->adminq;
393         struct ice_rq_event_info event;
394         uint16_t pending, opcode;
395         int ret;
396
397         event.buf_len = ICE_AQ_MAX_BUF_LEN;
398         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
399         if (!event.msg_buf) {
400                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
401                 return;
402         }
403
404         pending = 1;
405         while (pending) {
406                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
407
408                 if (ret != ICE_SUCCESS) {
409                         PMD_DRV_LOG(INFO,
410                                     "Failed to read msg from AdminQ, "
411                                     "adminq_err: %u",
412                                     hw->adminq.sq_last_status);
413                         break;
414                 }
415                 opcode = rte_le_to_cpu_16(event.desc.opcode);
416
417                 switch (opcode) {
418                 case ice_aqc_opc_get_link_status:
419                         ret = ice_link_update(dev, 0);
420                         if (!ret)
421                                 _rte_eth_dev_callback_process
422                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
423                         break;
424                 default:
425                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
426                                     opcode);
427                         break;
428                 }
429         }
430         rte_free(event.msg_buf);
431 }
432 #endif
433
434 /**
435  * Interrupt handler triggered by NIC for handling
436  * specific interrupt.
437  *
438  * @param handle
439  *  Pointer to interrupt handle.
440  * @param param
441  *  The address of parameter (struct rte_eth_dev *) regsitered before.
442  *
443  * @return
444  *  void
445  */
446 static void
447 ice_interrupt_handler(void *param)
448 {
449         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
450         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
451         uint32_t oicr;
452         uint32_t reg;
453         uint8_t pf_num;
454         uint8_t event;
455         uint16_t queue;
456 #ifdef ICE_LSE_SPT
457         uint32_t int_fw_ctl;
458 #endif
459
460         /* Disable interrupt */
461         ice_pf_disable_irq0(hw);
462
463         /* read out interrupt causes */
464         oicr = ICE_READ_REG(hw, PFINT_OICR);
465 #ifdef ICE_LSE_SPT
466         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
467 #endif
468
469         /* No interrupt event indicated */
470         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
471                 PMD_DRV_LOG(INFO, "No interrupt event");
472                 goto done;
473         }
474
475 #ifdef ICE_LSE_SPT
476         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
477                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
478                 ice_handle_aq_msg(dev);
479         }
480 #else
481         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
482                 PMD_DRV_LOG(INFO, "OICR: link state change event");
483                 ice_link_update(dev, 0);
484         }
485 #endif
486
487         if (oicr & PFINT_OICR_MAL_DETECT_M) {
488                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
489                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
490                 if (reg & GL_MDET_TX_PQM_VALID_M) {
491                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
492                                  GL_MDET_TX_PQM_PF_NUM_S;
493                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
494                                 GL_MDET_TX_PQM_MAL_TYPE_S;
495                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
496                                 GL_MDET_TX_PQM_QNUM_S;
497
498                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
499                                     "%d by PQM on TX queue %d PF# %d",
500                                     event, queue, pf_num);
501                 }
502
503                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
504                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
505                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
506                                  GL_MDET_TX_TCLAN_PF_NUM_S;
507                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
508                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
509                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
510                                 GL_MDET_TX_TCLAN_QNUM_S;
511
512                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
513                                     "%d by TCLAN on TX queue %d PF# %d",
514                                     event, queue, pf_num);
515                 }
516         }
517 done:
518         /* Enable interrupt */
519         ice_pf_enable_irq0(hw);
520         rte_intr_enable(dev->intr_handle);
521 }
522
523 /*  Initialize SW parameters of PF */
524 static int
525 ice_pf_sw_init(struct rte_eth_dev *dev)
526 {
527         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
528         struct ice_hw *hw = ICE_PF_TO_HW(pf);
529
530         if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
531                 pf->lan_nb_qp_max =
532                         ice_config_max_queue_pair_num(dev->device->devargs);
533         else
534                 pf->lan_nb_qp_max =
535                         (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
536                                           hw->func_caps.common_cap.num_rxq);
537
538         pf->lan_nb_qps = pf->lan_nb_qp_max;
539
540         return 0;
541 }
542
543 static struct ice_vsi *
544 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
545 {
546         struct ice_hw *hw = ICE_PF_TO_HW(pf);
547         struct ice_vsi *vsi = NULL;
548         struct ice_vsi_ctx vsi_ctx;
549         int ret;
550         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
551         uint8_t tc_bitmap = 0x1;
552
553         /* hw->num_lports = 1 in NIC mode */
554         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
555         if (!vsi)
556                 return NULL;
557
558         vsi->idx = pf->next_vsi_idx;
559         pf->next_vsi_idx++;
560         vsi->type = type;
561         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
562         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
563         vsi->vlan_anti_spoof_on = 0;
564         vsi->vlan_filter_on = 1;
565         TAILQ_INIT(&vsi->mac_list);
566         TAILQ_INIT(&vsi->vlan_list);
567
568         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
569         /* base_queue in used in queue mapping of VSI add/update command.
570          * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
571          * cases in the first stage. Only Main VSI.
572          */
573         vsi->base_queue = 0;
574         switch (type) {
575         case ICE_VSI_PF:
576                 vsi->nb_qps = pf->lan_nb_qps;
577                 ice_vsi_config_default_rss(&vsi_ctx.info);
578                 vsi_ctx.alloc_from_pool = true;
579                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
580                 /* switch_id is queried by get_switch_config aq, which is done
581                  * by ice_init_hw
582                  */
583                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
584                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
585                 /* Allow all untagged or tagged packets */
586                 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
587                 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
588                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
589                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
590                 /* Enable VLAN/UP trip */
591                 ret = ice_vsi_config_tc_queue_mapping(vsi,
592                                                       &vsi_ctx.info,
593                                                       ICE_DEFAULT_TCMAP);
594                 if (ret) {
595                         PMD_INIT_LOG(ERR,
596                                      "tc queue mapping with vsi failed, "
597                                      "err = %d",
598                                      ret);
599                         goto fail_mem;
600                 }
601
602                 break;
603         default:
604                 /* for other types of VSI */
605                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
606                 goto fail_mem;
607         }
608
609         /* VF has MSIX interrupt in VF range, don't allocate here */
610         if (type == ICE_VSI_PF) {
611                 ret = ice_res_pool_alloc(&pf->msix_pool,
612                                          RTE_MIN(vsi->nb_qps,
613                                                  RTE_MAX_RXTX_INTR_VEC_ID));
614                 if (ret < 0) {
615                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
616                                      vsi->vsi_id, ret);
617                 }
618                 vsi->msix_intr = ret;
619                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
620         } else {
621                 vsi->msix_intr = 0;
622                 vsi->nb_msix = 0;
623         }
624         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
625         if (ret != ICE_SUCCESS) {
626                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
627                 goto fail_mem;
628         }
629         /* store vsi information is SW structure */
630         vsi->vsi_id = vsi_ctx.vsi_num;
631         vsi->info = vsi_ctx.info;
632         pf->vsis_allocated = vsi_ctx.vsis_allocd;
633         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
634
635         /* At the beginning, only TC0. */
636         /* What we need here is the maximam number of the TX queues.
637          * Currently vsi->nb_qps means it.
638          * Correct it if any change.
639          */
640         max_txqs[0] = vsi->nb_qps;
641         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
642                               tc_bitmap, max_txqs);
643         if (ret != ICE_SUCCESS)
644                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
645
646         return vsi;
647 fail_mem:
648         rte_free(vsi);
649         pf->next_vsi_idx--;
650         return NULL;
651 }
652
653 static int
654 ice_pf_setup(struct ice_pf *pf)
655 {
656         struct ice_vsi *vsi;
657
658         /* Clear all stats counters */
659         pf->offset_loaded = FALSE;
660         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
661         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
662         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
663         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
664
665         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
666         if (!vsi) {
667                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
668                 return -EINVAL;
669         }
670
671         pf->main_vsi = vsi;
672
673         return 0;
674 }
675
676 static int
677 ice_dev_init(struct rte_eth_dev *dev)
678 {
679         struct rte_pci_device *pci_dev;
680         struct rte_intr_handle *intr_handle;
681         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
682         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
683         int ret;
684
685         dev->dev_ops = &ice_eth_dev_ops;
686         dev->rx_pkt_burst = ice_recv_pkts;
687         dev->tx_pkt_burst = ice_xmit_pkts;
688         dev->tx_pkt_prepare = ice_prep_pkts;
689
690         ice_set_default_ptype_table(dev);
691         pci_dev = RTE_DEV_TO_PCI(dev->device);
692         intr_handle = &pci_dev->intr_handle;
693
694         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
695         pf->adapter->eth_dev = dev;
696         pf->dev_data = dev->data;
697         hw->back = pf->adapter;
698         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
699         hw->vendor_id = pci_dev->id.vendor_id;
700         hw->device_id = pci_dev->id.device_id;
701         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
702         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
703         hw->bus.device = pci_dev->addr.devid;
704         hw->bus.func = pci_dev->addr.function;
705
706         ice_init_controlq_parameter(hw);
707
708         ret = ice_init_hw(hw);
709         if (ret) {
710                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
711                 return -EINVAL;
712         }
713
714         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
715                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
716                      hw->api_maj_ver, hw->api_min_ver);
717
718         ice_pf_sw_init(dev);
719         ret = ice_init_mac_address(dev);
720         if (ret) {
721                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
722                 goto err_init_mac;
723         }
724
725         ret = ice_res_pool_init(&pf->msix_pool, 1,
726                                 hw->func_caps.common_cap.num_msix_vectors - 1);
727         if (ret) {
728                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
729                 goto err_msix_pool_init;
730         }
731
732         ret = ice_pf_setup(pf);
733         if (ret) {
734                 PMD_INIT_LOG(ERR, "Failed to setup PF");
735                 goto err_pf_setup;
736         }
737
738         /* register callback func to eal lib */
739         rte_intr_callback_register(intr_handle,
740                                    ice_interrupt_handler, dev);
741
742         ice_pf_enable_irq0(hw);
743
744         /* enable uio intr after callback register */
745         rte_intr_enable(intr_handle);
746
747         return 0;
748
749 err_pf_setup:
750         ice_res_pool_destroy(&pf->msix_pool);
751 err_msix_pool_init:
752         rte_free(dev->data->mac_addrs);
753 err_init_mac:
754         ice_sched_cleanup_all(hw);
755         rte_free(hw->port_info);
756         ice_shutdown_all_ctrlq(hw);
757
758         return ret;
759 }
760
761 static int
762 ice_release_vsi(struct ice_vsi *vsi)
763 {
764         struct ice_hw *hw;
765         struct ice_vsi_ctx vsi_ctx;
766         enum ice_status ret;
767
768         if (!vsi)
769                 return 0;
770
771         hw = ICE_VSI_TO_HW(vsi);
772
773         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
774
775         vsi_ctx.vsi_num = vsi->vsi_id;
776         vsi_ctx.info = vsi->info;
777         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
778         if (ret != ICE_SUCCESS) {
779                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
780                 rte_free(vsi);
781                 return -1;
782         }
783
784         rte_free(vsi);
785         return 0;
786 }
787
788 static void
789 ice_dev_stop(struct rte_eth_dev *dev)
790 {
791         struct rte_eth_dev_data *data = dev->data;
792         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
793         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
794         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
795         uint16_t i;
796
797         /* avoid stopping again */
798         if (pf->adapter_stopped)
799                 return;
800
801         /* stop and clear all Rx queues */
802         for (i = 0; i < data->nb_rx_queues; i++)
803                 ice_rx_queue_stop(dev, i);
804
805         /* stop and clear all Tx queues */
806         for (i = 0; i < data->nb_tx_queues; i++)
807                 ice_tx_queue_stop(dev, i);
808
809         /* Clear all queues and release mbufs */
810         ice_clear_queues(dev);
811
812         /* Clean datapath event and queue/vec mapping */
813         rte_intr_efd_disable(intr_handle);
814         if (intr_handle->intr_vec) {
815                 rte_free(intr_handle->intr_vec);
816                 intr_handle->intr_vec = NULL;
817         }
818
819         pf->adapter_stopped = true;
820 }
821
822 static void
823 ice_dev_close(struct rte_eth_dev *dev)
824 {
825         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
826         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
827
828         ice_dev_stop(dev);
829
830         /* release all queue resource */
831         ice_free_queues(dev);
832
833         ice_res_pool_destroy(&pf->msix_pool);
834         ice_release_vsi(pf->main_vsi);
835
836         ice_shutdown_all_ctrlq(hw);
837 }
838
839 static int
840 ice_dev_uninit(struct rte_eth_dev *dev)
841 {
842         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
843         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
844         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
845         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
846
847         ice_dev_close(dev);
848
849         dev->dev_ops = NULL;
850         dev->rx_pkt_burst = NULL;
851         dev->tx_pkt_burst = NULL;
852
853         rte_free(dev->data->mac_addrs);
854         dev->data->mac_addrs = NULL;
855
856         /* disable uio intr before callback unregister */
857         rte_intr_disable(intr_handle);
858
859         /* register callback func to eal lib */
860         rte_intr_callback_unregister(intr_handle,
861                                      ice_interrupt_handler, dev);
862
863         ice_release_vsi(pf->main_vsi);
864         ice_sched_cleanup_all(hw);
865         rte_free(hw->port_info);
866         ice_shutdown_all_ctrlq(hw);
867
868         return 0;
869 }
870
871 static int
872 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
873 {
874         struct ice_adapter *ad =
875                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
876
877         /* Initialize to TRUE. If any of Rx queues doesn't meet the
878          * bulk allocation or vector Rx preconditions we will reset it.
879          */
880         ad->rx_bulk_alloc_allowed = true;
881         ad->tx_simple_allowed = true;
882
883         return 0;
884 }
885
886 static int ice_init_rss(struct ice_pf *pf)
887 {
888         struct ice_hw *hw = ICE_PF_TO_HW(pf);
889         struct ice_vsi *vsi = pf->main_vsi;
890         struct rte_eth_dev *dev = pf->adapter->eth_dev;
891         struct rte_eth_rss_conf *rss_conf;
892         struct ice_aqc_get_set_rss_keys key;
893         uint16_t i, nb_q;
894         int ret = 0;
895
896         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
897         nb_q = dev->data->nb_rx_queues;
898         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
899         vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
900
901         if (!vsi->rss_key)
902                 vsi->rss_key = rte_zmalloc(NULL,
903                                            vsi->rss_key_size, 0);
904         if (!vsi->rss_lut)
905                 vsi->rss_lut = rte_zmalloc(NULL,
906                                            vsi->rss_lut_size, 0);
907
908         /* configure RSS key */
909         if (!rss_conf->rss_key) {
910                 /* Calculate the default hash key */
911                 for (i = 0; i <= vsi->rss_key_size; i++)
912                         vsi->rss_key[i] = (uint8_t)rte_rand();
913         } else {
914                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
915                            RTE_MIN(rss_conf->rss_key_len,
916                                    vsi->rss_key_size));
917         }
918         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
919         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
920         if (ret)
921                 return -EINVAL;
922
923         /* init RSS LUT table */
924         for (i = 0; i < vsi->rss_lut_size; i++)
925                 vsi->rss_lut[i] = i % nb_q;
926
927         ret = ice_aq_set_rss_lut(hw, vsi->idx,
928                                  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
929                                  vsi->rss_lut, vsi->rss_lut_size);
930         if (ret)
931                 return -EINVAL;
932
933         return 0;
934 }
935
936 static int
937 ice_dev_start(struct rte_eth_dev *dev)
938 {
939         struct rte_eth_dev_data *data = dev->data;
940         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
941         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
942         uint16_t nb_rxq = 0;
943         uint16_t nb_txq, i;
944         int ret;
945
946         /* program Tx queues' context in hardware */
947         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
948                 ret = ice_tx_queue_start(dev, nb_txq);
949                 if (ret) {
950                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
951                         goto tx_err;
952                 }
953         }
954
955         /* program Rx queues' context in hardware*/
956         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
957                 ret = ice_rx_queue_start(dev, nb_rxq);
958                 if (ret) {
959                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
960                         goto rx_err;
961                 }
962         }
963
964         ret = ice_init_rss(pf);
965         if (ret) {
966                 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
967                 goto rx_err;
968         }
969
970         ice_set_rx_function(dev);
971
972         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
973                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
974                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
975                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
976                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
977                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
978                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
979                                      NULL);
980         if (ret != ICE_SUCCESS)
981                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
982
983         /* Call get_link_info aq commond to enable/disable LSE */
984         ice_link_update(dev, 0);
985
986         pf->adapter_stopped = false;
987
988         return 0;
989
990         /* stop the started queues if failed to start all queues */
991 rx_err:
992         for (i = 0; i < nb_rxq; i++)
993                 ice_rx_queue_stop(dev, i);
994 tx_err:
995         for (i = 0; i < nb_txq; i++)
996                 ice_tx_queue_stop(dev, i);
997
998         return -EIO;
999 }
1000
1001 static int
1002 ice_dev_reset(struct rte_eth_dev *dev)
1003 {
1004         int ret;
1005
1006         if (dev->data->sriov.active)
1007                 return -ENOTSUP;
1008
1009         ret = ice_dev_uninit(dev);
1010         if (ret) {
1011                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1012                 return -ENXIO;
1013         }
1014
1015         ret = ice_dev_init(dev);
1016         if (ret) {
1017                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1018                 return -ENXIO;
1019         }
1020
1021         return 0;
1022 }
1023
1024 static void
1025 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1026 {
1027         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1028         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1029         struct ice_vsi *vsi = pf->main_vsi;
1030         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1031
1032         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1033         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1034         dev_info->max_rx_queues = vsi->nb_qps;
1035         dev_info->max_tx_queues = vsi->nb_qps;
1036         dev_info->max_mac_addrs = vsi->max_macaddrs;
1037         dev_info->max_vfs = pci_dev->max_vfs;
1038
1039         dev_info->rx_offload_capa =
1040                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1041                 DEV_RX_OFFLOAD_UDP_CKSUM |
1042                 DEV_RX_OFFLOAD_TCP_CKSUM |
1043                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1044                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1045                 DEV_RX_OFFLOAD_KEEP_CRC;
1046         dev_info->tx_offload_capa =
1047                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1048                 DEV_TX_OFFLOAD_UDP_CKSUM |
1049                 DEV_TX_OFFLOAD_TCP_CKSUM |
1050                 DEV_TX_OFFLOAD_SCTP_CKSUM |
1051                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1052                 DEV_TX_OFFLOAD_TCP_TSO |
1053                 DEV_TX_OFFLOAD_MULTI_SEGS;
1054         dev_info->rx_queue_offload_capa = 0;
1055         dev_info->tx_queue_offload_capa = 0;
1056
1057         dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1058         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1059
1060         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1061                 .rx_thresh = {
1062                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
1063                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
1064                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
1065                 },
1066                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1067                 .rx_drop_en = 0,
1068                 .offloads = 0,
1069         };
1070
1071         dev_info->default_txconf = (struct rte_eth_txconf) {
1072                 .tx_thresh = {
1073                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
1074                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
1075                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
1076                 },
1077                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1078                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1079                 .offloads = 0,
1080         };
1081
1082         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1083                 .nb_max = ICE_MAX_RING_DESC,
1084                 .nb_min = ICE_MIN_RING_DESC,
1085                 .nb_align = ICE_ALIGN_RING_DESC,
1086         };
1087
1088         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1089                 .nb_max = ICE_MAX_RING_DESC,
1090                 .nb_min = ICE_MIN_RING_DESC,
1091                 .nb_align = ICE_ALIGN_RING_DESC,
1092         };
1093
1094         dev_info->speed_capa = ETH_LINK_SPEED_10M |
1095                                ETH_LINK_SPEED_100M |
1096                                ETH_LINK_SPEED_1G |
1097                                ETH_LINK_SPEED_2_5G |
1098                                ETH_LINK_SPEED_5G |
1099                                ETH_LINK_SPEED_10G |
1100                                ETH_LINK_SPEED_20G |
1101                                ETH_LINK_SPEED_25G |
1102                                ETH_LINK_SPEED_40G;
1103
1104         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1105         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1106
1107         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1108         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1109         dev_info->default_rxportconf.nb_queues = 1;
1110         dev_info->default_txportconf.nb_queues = 1;
1111         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1112         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1113 }
1114
1115 static inline int
1116 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1117                             struct rte_eth_link *link)
1118 {
1119         struct rte_eth_link *dst = link;
1120         struct rte_eth_link *src = &dev->data->dev_link;
1121
1122         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1123                                 *(uint64_t *)src) == 0)
1124                 return -1;
1125
1126         return 0;
1127 }
1128
1129 static inline int
1130 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1131                              struct rte_eth_link *link)
1132 {
1133         struct rte_eth_link *dst = &dev->data->dev_link;
1134         struct rte_eth_link *src = link;
1135
1136         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1137                                 *(uint64_t *)src) == 0)
1138                 return -1;
1139
1140         return 0;
1141 }
1142
1143 static int
1144 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1145 {
1146 #define CHECK_INTERVAL 100  /* 100ms */
1147 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1148         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1149         struct ice_link_status link_status;
1150         struct rte_eth_link link, old;
1151         int status;
1152         unsigned int rep_cnt = MAX_REPEAT_TIME;
1153         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1154
1155         memset(&link, 0, sizeof(link));
1156         memset(&old, 0, sizeof(old));
1157         memset(&link_status, 0, sizeof(link_status));
1158         ice_atomic_read_link_status(dev, &old);
1159
1160         do {
1161                 /* Get link status information from hardware */
1162                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1163                                               &link_status, NULL);
1164                 if (status != ICE_SUCCESS) {
1165                         link.link_speed = ETH_SPEED_NUM_100M;
1166                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1167                         PMD_DRV_LOG(ERR, "Failed to get link info");
1168                         goto out;
1169                 }
1170
1171                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1172                 if (!wait_to_complete || link.link_status)
1173                         break;
1174
1175                 rte_delay_ms(CHECK_INTERVAL);
1176         } while (--rep_cnt);
1177
1178         if (!link.link_status)
1179                 goto out;
1180
1181         /* Full-duplex operation at all supported speeds */
1182         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1183
1184         /* Parse the link status */
1185         switch (link_status.link_speed) {
1186         case ICE_AQ_LINK_SPEED_10MB:
1187                 link.link_speed = ETH_SPEED_NUM_10M;
1188                 break;
1189         case ICE_AQ_LINK_SPEED_100MB:
1190                 link.link_speed = ETH_SPEED_NUM_100M;
1191                 break;
1192         case ICE_AQ_LINK_SPEED_1000MB:
1193                 link.link_speed = ETH_SPEED_NUM_1G;
1194                 break;
1195         case ICE_AQ_LINK_SPEED_2500MB:
1196                 link.link_speed = ETH_SPEED_NUM_2_5G;
1197                 break;
1198         case ICE_AQ_LINK_SPEED_5GB:
1199                 link.link_speed = ETH_SPEED_NUM_5G;
1200                 break;
1201         case ICE_AQ_LINK_SPEED_10GB:
1202                 link.link_speed = ETH_SPEED_NUM_10G;
1203                 break;
1204         case ICE_AQ_LINK_SPEED_20GB:
1205                 link.link_speed = ETH_SPEED_NUM_20G;
1206                 break;
1207         case ICE_AQ_LINK_SPEED_25GB:
1208                 link.link_speed = ETH_SPEED_NUM_25G;
1209                 break;
1210         case ICE_AQ_LINK_SPEED_40GB:
1211                 link.link_speed = ETH_SPEED_NUM_40G;
1212                 break;
1213         case ICE_AQ_LINK_SPEED_UNKNOWN:
1214         default:
1215                 PMD_DRV_LOG(ERR, "Unknown link speed");
1216                 link.link_speed = ETH_SPEED_NUM_NONE;
1217                 break;
1218         }
1219
1220         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1221                               ETH_LINK_SPEED_FIXED);
1222
1223 out:
1224         ice_atomic_write_link_status(dev, &link);
1225         if (link.link_status == old.link_status)
1226                 return -1;
1227
1228         return 0;
1229 }
1230
1231 static int
1232 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1233 {
1234         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1235         struct rte_eth_dev_data *dev_data = pf->dev_data;
1236         uint32_t frame_size = mtu + ETHER_HDR_LEN
1237                               + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE;
1238
1239         /* check if mtu is within the allowed range */
1240         if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
1241                 return -EINVAL;
1242
1243         /* mtu setting is forbidden if port is start */
1244         if (dev_data->dev_started) {
1245                 PMD_DRV_LOG(ERR,
1246                             "port %d must be stopped before configuration",
1247                             dev_data->port_id);
1248                 return -EBUSY;
1249         }
1250
1251         if (frame_size > ETHER_MAX_LEN)
1252                 dev_data->dev_conf.rxmode.offloads |=
1253                         DEV_RX_OFFLOAD_JUMBO_FRAME;
1254         else
1255                 dev_data->dev_conf.rxmode.offloads &=
1256                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1257
1258         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1259
1260         return 0;
1261 }
1262
1263 static int
1264 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1265               struct rte_pci_device *pci_dev)
1266 {
1267         return rte_eth_dev_pci_generic_probe(pci_dev,
1268                                              sizeof(struct ice_adapter),
1269                                              ice_dev_init);
1270 }
1271
1272 static int
1273 ice_pci_remove(struct rte_pci_device *pci_dev)
1274 {
1275         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
1276 }
1277
1278 static struct rte_pci_driver rte_ice_pmd = {
1279         .id_table = pci_id_ice_map,
1280         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1281                      RTE_PCI_DRV_IOVA_AS_VA,
1282         .probe = ice_pci_probe,
1283         .remove = ice_pci_remove,
1284 };
1285
1286 /**
1287  * Driver initialization routine.
1288  * Invoked once at EAL init time.
1289  * Register itself as the [Poll Mode] Driver of PCI devices.
1290  */
1291 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
1292 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
1293 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
1294 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
1295                               ICE_MAX_QP_NUM "=<int>");
1296
1297 RTE_INIT(ice_init_log)
1298 {
1299         ice_logtype_init = rte_log_register("pmd.net.ice.init");
1300         if (ice_logtype_init >= 0)
1301                 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
1302         ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
1303         if (ice_logtype_driver >= 0)
1304                 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
1305 }