0b83bc6b213738623c0e063cc2fd3201b75a38d2
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
13
14 int ice_logtype_init;
15 int ice_logtype_driver;
16
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23                              struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25                            int wait_to_complete);
26
27 static const struct rte_pci_id pci_id_ice_map[] = {
28         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
29         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
30         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
31         { .vendor_id = 0, /* sentinel */ },
32 };
33
34 static const struct eth_dev_ops ice_eth_dev_ops = {
35         .dev_configure                = ice_dev_configure,
36         .dev_start                    = ice_dev_start,
37         .dev_stop                     = ice_dev_stop,
38         .dev_close                    = ice_dev_close,
39         .dev_reset                    = ice_dev_reset,
40         .rx_queue_start               = ice_rx_queue_start,
41         .rx_queue_stop                = ice_rx_queue_stop,
42         .tx_queue_start               = ice_tx_queue_start,
43         .tx_queue_stop                = ice_tx_queue_stop,
44         .rx_queue_setup               = ice_rx_queue_setup,
45         .rx_queue_release             = ice_rx_queue_release,
46         .tx_queue_setup               = ice_tx_queue_setup,
47         .tx_queue_release             = ice_tx_queue_release,
48         .dev_infos_get                = ice_dev_info_get,
49         .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
50         .link_update                  = ice_link_update,
51         .rxq_info_get                 = ice_rxq_info_get,
52         .txq_info_get                 = ice_txq_info_get,
53         .rx_queue_count               = ice_rx_queue_count,
54 };
55
56 static void
57 ice_init_controlq_parameter(struct ice_hw *hw)
58 {
59         /* fields for adminq */
60         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
61         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
62         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
63         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
64
65         /* fields for mailboxq, DPDK used as PF host */
66         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
67         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
68         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
69         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
70 }
71
72 static int
73 ice_check_qp_num(const char *key, const char *qp_value,
74                  __rte_unused void *opaque)
75 {
76         char *end = NULL;
77         int num = 0;
78
79         while (isblank(*qp_value))
80                 qp_value++;
81
82         num = strtoul(qp_value, &end, 10);
83
84         if (!num || (*end == '-') || errno) {
85                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
86                             "value must be > 0",
87                             qp_value, key);
88                 return -1;
89         }
90
91         return num;
92 }
93
94 static int
95 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
96 {
97         struct rte_kvargs *kvlist;
98         const char *queue_num_key = ICE_MAX_QP_NUM;
99         int ret;
100
101         if (!devargs)
102                 return 0;
103
104         kvlist = rte_kvargs_parse(devargs->args, NULL);
105         if (!kvlist)
106                 return 0;
107
108         if (!rte_kvargs_count(kvlist, queue_num_key)) {
109                 rte_kvargs_free(kvlist);
110                 return 0;
111         }
112
113         if (rte_kvargs_process(kvlist, queue_num_key,
114                                ice_check_qp_num, NULL) < 0) {
115                 rte_kvargs_free(kvlist);
116                 return 0;
117         }
118         ret = rte_kvargs_process(kvlist, queue_num_key,
119                                  ice_check_qp_num, NULL);
120         rte_kvargs_free(kvlist);
121
122         return ret;
123 }
124
125 static int
126 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
127                   uint32_t num)
128 {
129         struct pool_entry *entry;
130
131         if (!pool || !num)
132                 return -EINVAL;
133
134         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
135         if (!entry) {
136                 PMD_INIT_LOG(ERR,
137                              "Failed to allocate memory for resource pool");
138                 return -ENOMEM;
139         }
140
141         /* queue heap initialize */
142         pool->num_free = num;
143         pool->num_alloc = 0;
144         pool->base = base;
145         LIST_INIT(&pool->alloc_list);
146         LIST_INIT(&pool->free_list);
147
148         /* Initialize element  */
149         entry->base = 0;
150         entry->len = num;
151
152         LIST_INSERT_HEAD(&pool->free_list, entry, next);
153         return 0;
154 }
155
156 static int
157 ice_res_pool_alloc(struct ice_res_pool_info *pool,
158                    uint16_t num)
159 {
160         struct pool_entry *entry, *valid_entry;
161
162         if (!pool || !num) {
163                 PMD_INIT_LOG(ERR, "Invalid parameter");
164                 return -EINVAL;
165         }
166
167         if (pool->num_free < num) {
168                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
169                              num, pool->num_free);
170                 return -ENOMEM;
171         }
172
173         valid_entry = NULL;
174         /* Lookup  in free list and find most fit one */
175         LIST_FOREACH(entry, &pool->free_list, next) {
176                 if (entry->len >= num) {
177                         /* Find best one */
178                         if (entry->len == num) {
179                                 valid_entry = entry;
180                                 break;
181                         }
182                         if (!valid_entry ||
183                             valid_entry->len > entry->len)
184                                 valid_entry = entry;
185                 }
186         }
187
188         /* Not find one to satisfy the request, return */
189         if (!valid_entry) {
190                 PMD_INIT_LOG(ERR, "No valid entry found");
191                 return -ENOMEM;
192         }
193         /**
194          * The entry have equal queue number as requested,
195          * remove it from alloc_list.
196          */
197         if (valid_entry->len == num) {
198                 LIST_REMOVE(valid_entry, next);
199         } else {
200                 /**
201                  * The entry have more numbers than requested,
202                  * create a new entry for alloc_list and minus its
203                  * queue base and number in free_list.
204                  */
205                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
206                 if (!entry) {
207                         PMD_INIT_LOG(ERR,
208                                      "Failed to allocate memory for "
209                                      "resource pool");
210                         return -ENOMEM;
211                 }
212                 entry->base = valid_entry->base;
213                 entry->len = num;
214                 valid_entry->base += num;
215                 valid_entry->len -= num;
216                 valid_entry = entry;
217         }
218
219         /* Insert it into alloc list, not sorted */
220         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
221
222         pool->num_free -= valid_entry->len;
223         pool->num_alloc += valid_entry->len;
224
225         return valid_entry->base + pool->base;
226 }
227
228 static void
229 ice_res_pool_destroy(struct ice_res_pool_info *pool)
230 {
231         struct pool_entry *entry, *next_entry;
232
233         if (!pool)
234                 return;
235
236         for (entry = LIST_FIRST(&pool->alloc_list);
237              entry && (next_entry = LIST_NEXT(entry, next), 1);
238              entry = next_entry) {
239                 LIST_REMOVE(entry, next);
240                 rte_free(entry);
241         }
242
243         for (entry = LIST_FIRST(&pool->free_list);
244              entry && (next_entry = LIST_NEXT(entry, next), 1);
245              entry = next_entry) {
246                 LIST_REMOVE(entry, next);
247                 rte_free(entry);
248         }
249
250         pool->num_free = 0;
251         pool->num_alloc = 0;
252         pool->base = 0;
253         LIST_INIT(&pool->alloc_list);
254         LIST_INIT(&pool->free_list);
255 }
256
257 static void
258 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
259 {
260         /* Set VSI LUT selection */
261         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
262                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
263         /* Set Hash scheme */
264         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
265                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
266         /* enable TC */
267         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
268 }
269
270 static enum ice_status
271 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
272                                 struct ice_aqc_vsi_props *info,
273                                 uint8_t enabled_tcmap)
274 {
275         uint16_t bsf, qp_idx;
276
277         /* default tc 0 now. Multi-TC supporting need to be done later.
278          * Configure TC and queue mapping parameters, for enabled TC,
279          * allocate qpnum_per_tc queues to this traffic.
280          */
281         if (enabled_tcmap != 0x01) {
282                 PMD_INIT_LOG(ERR, "only TC0 is supported");
283                 return -ENOTSUP;
284         }
285
286         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
287         bsf = rte_bsf32(vsi->nb_qps);
288         /* Adjust the queue number to actual queues that can be applied */
289         vsi->nb_qps = 0x1 << bsf;
290
291         qp_idx = 0;
292         /* Set tc and queue mapping with VSI */
293         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
294                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
295                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
296
297         /* Associate queue number with VSI */
298         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
299         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
300         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
301         info->valid_sections |=
302                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
303         /* Set the info.ingress_table and info.egress_table
304          * for UP translate table. Now just set it to 1:1 map by default
305          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
306          */
307 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
308         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
309         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
310         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
311         return 0;
312 }
313
314 static int
315 ice_init_mac_address(struct rte_eth_dev *dev)
316 {
317         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
318
319         if (!is_unicast_ether_addr
320                 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
321                 PMD_INIT_LOG(ERR, "Invalid MAC address");
322                 return -EINVAL;
323         }
324
325         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
326                         (struct ether_addr *)hw->port_info[0].mac.perm_addr);
327
328         dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
329         if (!dev->data->mac_addrs) {
330                 PMD_INIT_LOG(ERR,
331                              "Failed to allocate memory to store mac address");
332                 return -ENOMEM;
333         }
334         /* store it to dev data */
335         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
336                         &dev->data->mac_addrs[0]);
337         return 0;
338 }
339
340 /* Enable IRQ0 */
341 static void
342 ice_pf_enable_irq0(struct ice_hw *hw)
343 {
344         /* reset the registers */
345         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
346         ICE_READ_REG(hw, PFINT_OICR);
347
348 #ifdef ICE_LSE_SPT
349         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
350                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
351                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
352
353         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
354                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
355                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
356                        PFINT_OICR_CTL_ITR_INDX_M) |
357                       PFINT_OICR_CTL_CAUSE_ENA_M);
358
359         ICE_WRITE_REG(hw, PFINT_FW_CTL,
360                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
361                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
362                        PFINT_FW_CTL_ITR_INDX_M) |
363                       PFINT_FW_CTL_CAUSE_ENA_M);
364 #else
365         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
366 #endif
367
368         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
369                       GLINT_DYN_CTL_INTENA_M |
370                       GLINT_DYN_CTL_CLEARPBA_M |
371                       GLINT_DYN_CTL_ITR_INDX_M);
372
373         ice_flush(hw);
374 }
375
376 /* Disable IRQ0 */
377 static void
378 ice_pf_disable_irq0(struct ice_hw *hw)
379 {
380         /* Disable all interrupt types */
381         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
382         ice_flush(hw);
383 }
384
385 #ifdef ICE_LSE_SPT
386 static void
387 ice_handle_aq_msg(struct rte_eth_dev *dev)
388 {
389         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
390         struct ice_ctl_q_info *cq = &hw->adminq;
391         struct ice_rq_event_info event;
392         uint16_t pending, opcode;
393         int ret;
394
395         event.buf_len = ICE_AQ_MAX_BUF_LEN;
396         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
397         if (!event.msg_buf) {
398                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
399                 return;
400         }
401
402         pending = 1;
403         while (pending) {
404                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
405
406                 if (ret != ICE_SUCCESS) {
407                         PMD_DRV_LOG(INFO,
408                                     "Failed to read msg from AdminQ, "
409                                     "adminq_err: %u",
410                                     hw->adminq.sq_last_status);
411                         break;
412                 }
413                 opcode = rte_le_to_cpu_16(event.desc.opcode);
414
415                 switch (opcode) {
416                 case ice_aqc_opc_get_link_status:
417                         ret = ice_link_update(dev, 0);
418                         if (!ret)
419                                 _rte_eth_dev_callback_process
420                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
421                         break;
422                 default:
423                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
424                                     opcode);
425                         break;
426                 }
427         }
428         rte_free(event.msg_buf);
429 }
430 #endif
431
432 /**
433  * Interrupt handler triggered by NIC for handling
434  * specific interrupt.
435  *
436  * @param handle
437  *  Pointer to interrupt handle.
438  * @param param
439  *  The address of parameter (struct rte_eth_dev *) regsitered before.
440  *
441  * @return
442  *  void
443  */
444 static void
445 ice_interrupt_handler(void *param)
446 {
447         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
448         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
449         uint32_t oicr;
450         uint32_t reg;
451         uint8_t pf_num;
452         uint8_t event;
453         uint16_t queue;
454 #ifdef ICE_LSE_SPT
455         uint32_t int_fw_ctl;
456 #endif
457
458         /* Disable interrupt */
459         ice_pf_disable_irq0(hw);
460
461         /* read out interrupt causes */
462         oicr = ICE_READ_REG(hw, PFINT_OICR);
463 #ifdef ICE_LSE_SPT
464         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
465 #endif
466
467         /* No interrupt event indicated */
468         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
469                 PMD_DRV_LOG(INFO, "No interrupt event");
470                 goto done;
471         }
472
473 #ifdef ICE_LSE_SPT
474         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
475                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
476                 ice_handle_aq_msg(dev);
477         }
478 #else
479         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
480                 PMD_DRV_LOG(INFO, "OICR: link state change event");
481                 ice_link_update(dev, 0);
482         }
483 #endif
484
485         if (oicr & PFINT_OICR_MAL_DETECT_M) {
486                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
487                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
488                 if (reg & GL_MDET_TX_PQM_VALID_M) {
489                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
490                                  GL_MDET_TX_PQM_PF_NUM_S;
491                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
492                                 GL_MDET_TX_PQM_MAL_TYPE_S;
493                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
494                                 GL_MDET_TX_PQM_QNUM_S;
495
496                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
497                                     "%d by PQM on TX queue %d PF# %d",
498                                     event, queue, pf_num);
499                 }
500
501                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
502                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
503                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
504                                  GL_MDET_TX_TCLAN_PF_NUM_S;
505                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
506                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
507                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
508                                 GL_MDET_TX_TCLAN_QNUM_S;
509
510                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
511                                     "%d by TCLAN on TX queue %d PF# %d",
512                                     event, queue, pf_num);
513                 }
514         }
515 done:
516         /* Enable interrupt */
517         ice_pf_enable_irq0(hw);
518         rte_intr_enable(dev->intr_handle);
519 }
520
521 /*  Initialize SW parameters of PF */
522 static int
523 ice_pf_sw_init(struct rte_eth_dev *dev)
524 {
525         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
526         struct ice_hw *hw = ICE_PF_TO_HW(pf);
527
528         if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
529                 pf->lan_nb_qp_max =
530                         ice_config_max_queue_pair_num(dev->device->devargs);
531         else
532                 pf->lan_nb_qp_max =
533                         (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
534                                           hw->func_caps.common_cap.num_rxq);
535
536         pf->lan_nb_qps = pf->lan_nb_qp_max;
537
538         return 0;
539 }
540
541 static struct ice_vsi *
542 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
543 {
544         struct ice_hw *hw = ICE_PF_TO_HW(pf);
545         struct ice_vsi *vsi = NULL;
546         struct ice_vsi_ctx vsi_ctx;
547         int ret;
548         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
549         uint8_t tc_bitmap = 0x1;
550
551         /* hw->num_lports = 1 in NIC mode */
552         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
553         if (!vsi)
554                 return NULL;
555
556         vsi->idx = pf->next_vsi_idx;
557         pf->next_vsi_idx++;
558         vsi->type = type;
559         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
560         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
561         vsi->vlan_anti_spoof_on = 0;
562         vsi->vlan_filter_on = 1;
563         TAILQ_INIT(&vsi->mac_list);
564         TAILQ_INIT(&vsi->vlan_list);
565
566         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
567         /* base_queue in used in queue mapping of VSI add/update command.
568          * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
569          * cases in the first stage. Only Main VSI.
570          */
571         vsi->base_queue = 0;
572         switch (type) {
573         case ICE_VSI_PF:
574                 vsi->nb_qps = pf->lan_nb_qps;
575                 ice_vsi_config_default_rss(&vsi_ctx.info);
576                 vsi_ctx.alloc_from_pool = true;
577                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
578                 /* switch_id is queried by get_switch_config aq, which is done
579                  * by ice_init_hw
580                  */
581                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
582                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
583                 /* Allow all untagged or tagged packets */
584                 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
585                 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
586                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
587                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
588                 /* Enable VLAN/UP trip */
589                 ret = ice_vsi_config_tc_queue_mapping(vsi,
590                                                       &vsi_ctx.info,
591                                                       ICE_DEFAULT_TCMAP);
592                 if (ret) {
593                         PMD_INIT_LOG(ERR,
594                                      "tc queue mapping with vsi failed, "
595                                      "err = %d",
596                                      ret);
597                         goto fail_mem;
598                 }
599
600                 break;
601         default:
602                 /* for other types of VSI */
603                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
604                 goto fail_mem;
605         }
606
607         /* VF has MSIX interrupt in VF range, don't allocate here */
608         if (type == ICE_VSI_PF) {
609                 ret = ice_res_pool_alloc(&pf->msix_pool,
610                                          RTE_MIN(vsi->nb_qps,
611                                                  RTE_MAX_RXTX_INTR_VEC_ID));
612                 if (ret < 0) {
613                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
614                                      vsi->vsi_id, ret);
615                 }
616                 vsi->msix_intr = ret;
617                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
618         } else {
619                 vsi->msix_intr = 0;
620                 vsi->nb_msix = 0;
621         }
622         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
623         if (ret != ICE_SUCCESS) {
624                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
625                 goto fail_mem;
626         }
627         /* store vsi information is SW structure */
628         vsi->vsi_id = vsi_ctx.vsi_num;
629         vsi->info = vsi_ctx.info;
630         pf->vsis_allocated = vsi_ctx.vsis_allocd;
631         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
632
633         /* At the beginning, only TC0. */
634         /* What we need here is the maximam number of the TX queues.
635          * Currently vsi->nb_qps means it.
636          * Correct it if any change.
637          */
638         max_txqs[0] = vsi->nb_qps;
639         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
640                               tc_bitmap, max_txqs);
641         if (ret != ICE_SUCCESS)
642                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
643
644         return vsi;
645 fail_mem:
646         rte_free(vsi);
647         pf->next_vsi_idx--;
648         return NULL;
649 }
650
651 static int
652 ice_pf_setup(struct ice_pf *pf)
653 {
654         struct ice_vsi *vsi;
655
656         /* Clear all stats counters */
657         pf->offset_loaded = FALSE;
658         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
659         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
660         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
661         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
662
663         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
664         if (!vsi) {
665                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
666                 return -EINVAL;
667         }
668
669         pf->main_vsi = vsi;
670
671         return 0;
672 }
673
674 static int
675 ice_dev_init(struct rte_eth_dev *dev)
676 {
677         struct rte_pci_device *pci_dev;
678         struct rte_intr_handle *intr_handle;
679         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
681         int ret;
682
683         dev->dev_ops = &ice_eth_dev_ops;
684
685         ice_set_default_ptype_table(dev);
686         pci_dev = RTE_DEV_TO_PCI(dev->device);
687         intr_handle = &pci_dev->intr_handle;
688
689         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
690         pf->adapter->eth_dev = dev;
691         pf->dev_data = dev->data;
692         hw->back = pf->adapter;
693         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
694         hw->vendor_id = pci_dev->id.vendor_id;
695         hw->device_id = pci_dev->id.device_id;
696         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
697         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
698         hw->bus.device = pci_dev->addr.devid;
699         hw->bus.func = pci_dev->addr.function;
700
701         ice_init_controlq_parameter(hw);
702
703         ret = ice_init_hw(hw);
704         if (ret) {
705                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
706                 return -EINVAL;
707         }
708
709         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
710                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
711                      hw->api_maj_ver, hw->api_min_ver);
712
713         ice_pf_sw_init(dev);
714         ret = ice_init_mac_address(dev);
715         if (ret) {
716                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
717                 goto err_init_mac;
718         }
719
720         ret = ice_res_pool_init(&pf->msix_pool, 1,
721                                 hw->func_caps.common_cap.num_msix_vectors - 1);
722         if (ret) {
723                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
724                 goto err_msix_pool_init;
725         }
726
727         ret = ice_pf_setup(pf);
728         if (ret) {
729                 PMD_INIT_LOG(ERR, "Failed to setup PF");
730                 goto err_pf_setup;
731         }
732
733         /* register callback func to eal lib */
734         rte_intr_callback_register(intr_handle,
735                                    ice_interrupt_handler, dev);
736
737         ice_pf_enable_irq0(hw);
738
739         /* enable uio intr after callback register */
740         rte_intr_enable(intr_handle);
741
742         return 0;
743
744 err_pf_setup:
745         ice_res_pool_destroy(&pf->msix_pool);
746 err_msix_pool_init:
747         rte_free(dev->data->mac_addrs);
748 err_init_mac:
749         ice_sched_cleanup_all(hw);
750         rte_free(hw->port_info);
751         ice_shutdown_all_ctrlq(hw);
752
753         return ret;
754 }
755
756 static int
757 ice_release_vsi(struct ice_vsi *vsi)
758 {
759         struct ice_hw *hw;
760         struct ice_vsi_ctx vsi_ctx;
761         enum ice_status ret;
762
763         if (!vsi)
764                 return 0;
765
766         hw = ICE_VSI_TO_HW(vsi);
767
768         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
769
770         vsi_ctx.vsi_num = vsi->vsi_id;
771         vsi_ctx.info = vsi->info;
772         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
773         if (ret != ICE_SUCCESS) {
774                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
775                 rte_free(vsi);
776                 return -1;
777         }
778
779         rte_free(vsi);
780         return 0;
781 }
782
783 static void
784 ice_dev_stop(struct rte_eth_dev *dev)
785 {
786         struct rte_eth_dev_data *data = dev->data;
787         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
788         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
789         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
790         uint16_t i;
791
792         /* avoid stopping again */
793         if (pf->adapter_stopped)
794                 return;
795
796         /* stop and clear all Rx queues */
797         for (i = 0; i < data->nb_rx_queues; i++)
798                 ice_rx_queue_stop(dev, i);
799
800         /* stop and clear all Tx queues */
801         for (i = 0; i < data->nb_tx_queues; i++)
802                 ice_tx_queue_stop(dev, i);
803
804         /* Clear all queues and release mbufs */
805         ice_clear_queues(dev);
806
807         /* Clean datapath event and queue/vec mapping */
808         rte_intr_efd_disable(intr_handle);
809         if (intr_handle->intr_vec) {
810                 rte_free(intr_handle->intr_vec);
811                 intr_handle->intr_vec = NULL;
812         }
813
814         pf->adapter_stopped = true;
815 }
816
817 static void
818 ice_dev_close(struct rte_eth_dev *dev)
819 {
820         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
821         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
822
823         ice_dev_stop(dev);
824
825         /* release all queue resource */
826         ice_free_queues(dev);
827
828         ice_res_pool_destroy(&pf->msix_pool);
829         ice_release_vsi(pf->main_vsi);
830
831         ice_shutdown_all_ctrlq(hw);
832 }
833
834 static int
835 ice_dev_uninit(struct rte_eth_dev *dev)
836 {
837         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
838         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
839         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
840         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
841
842         ice_dev_close(dev);
843
844         dev->dev_ops = NULL;
845         dev->rx_pkt_burst = NULL;
846         dev->tx_pkt_burst = NULL;
847
848         rte_free(dev->data->mac_addrs);
849         dev->data->mac_addrs = NULL;
850
851         /* disable uio intr before callback unregister */
852         rte_intr_disable(intr_handle);
853
854         /* register callback func to eal lib */
855         rte_intr_callback_unregister(intr_handle,
856                                      ice_interrupt_handler, dev);
857
858         ice_release_vsi(pf->main_vsi);
859         ice_sched_cleanup_all(hw);
860         rte_free(hw->port_info);
861         ice_shutdown_all_ctrlq(hw);
862
863         return 0;
864 }
865
866 static int
867 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
868 {
869         struct ice_adapter *ad =
870                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
871
872         /* Initialize to TRUE. If any of Rx queues doesn't meet the
873          * bulk allocation or vector Rx preconditions we will reset it.
874          */
875         ad->rx_bulk_alloc_allowed = true;
876         ad->tx_simple_allowed = true;
877
878         return 0;
879 }
880
881 static int ice_init_rss(struct ice_pf *pf)
882 {
883         struct ice_hw *hw = ICE_PF_TO_HW(pf);
884         struct ice_vsi *vsi = pf->main_vsi;
885         struct rte_eth_dev *dev = pf->adapter->eth_dev;
886         struct rte_eth_rss_conf *rss_conf;
887         struct ice_aqc_get_set_rss_keys key;
888         uint16_t i, nb_q;
889         int ret = 0;
890
891         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
892         nb_q = dev->data->nb_rx_queues;
893         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
894         vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
895
896         if (!vsi->rss_key)
897                 vsi->rss_key = rte_zmalloc(NULL,
898                                            vsi->rss_key_size, 0);
899         if (!vsi->rss_lut)
900                 vsi->rss_lut = rte_zmalloc(NULL,
901                                            vsi->rss_lut_size, 0);
902
903         /* configure RSS key */
904         if (!rss_conf->rss_key) {
905                 /* Calculate the default hash key */
906                 for (i = 0; i <= vsi->rss_key_size; i++)
907                         vsi->rss_key[i] = (uint8_t)rte_rand();
908         } else {
909                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
910                            RTE_MIN(rss_conf->rss_key_len,
911                                    vsi->rss_key_size));
912         }
913         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
914         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
915         if (ret)
916                 return -EINVAL;
917
918         /* init RSS LUT table */
919         for (i = 0; i < vsi->rss_lut_size; i++)
920                 vsi->rss_lut[i] = i % nb_q;
921
922         ret = ice_aq_set_rss_lut(hw, vsi->idx,
923                                  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
924                                  vsi->rss_lut, vsi->rss_lut_size);
925         if (ret)
926                 return -EINVAL;
927
928         return 0;
929 }
930
931 static int
932 ice_dev_start(struct rte_eth_dev *dev)
933 {
934         struct rte_eth_dev_data *data = dev->data;
935         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
936         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
937         uint16_t nb_rxq = 0;
938         uint16_t nb_txq, i;
939         int ret;
940
941         /* program Tx queues' context in hardware */
942         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
943                 ret = ice_tx_queue_start(dev, nb_txq);
944                 if (ret) {
945                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
946                         goto tx_err;
947                 }
948         }
949
950         /* program Rx queues' context in hardware*/
951         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
952                 ret = ice_rx_queue_start(dev, nb_rxq);
953                 if (ret) {
954                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
955                         goto rx_err;
956                 }
957         }
958
959         ret = ice_init_rss(pf);
960         if (ret) {
961                 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
962                 goto rx_err;
963         }
964
965         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
966                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
967                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
968                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
969                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
970                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
971                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
972                                      NULL);
973         if (ret != ICE_SUCCESS)
974                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
975
976         /* Call get_link_info aq commond to enable/disable LSE */
977         ice_link_update(dev, 0);
978
979         pf->adapter_stopped = false;
980
981         return 0;
982
983         /* stop the started queues if failed to start all queues */
984 rx_err:
985         for (i = 0; i < nb_rxq; i++)
986                 ice_rx_queue_stop(dev, i);
987 tx_err:
988         for (i = 0; i < nb_txq; i++)
989                 ice_tx_queue_stop(dev, i);
990
991         return -EIO;
992 }
993
994 static int
995 ice_dev_reset(struct rte_eth_dev *dev)
996 {
997         int ret;
998
999         if (dev->data->sriov.active)
1000                 return -ENOTSUP;
1001
1002         ret = ice_dev_uninit(dev);
1003         if (ret) {
1004                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1005                 return -ENXIO;
1006         }
1007
1008         ret = ice_dev_init(dev);
1009         if (ret) {
1010                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1011                 return -ENXIO;
1012         }
1013
1014         return 0;
1015 }
1016
1017 static void
1018 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1019 {
1020         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1021         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1022         struct ice_vsi *vsi = pf->main_vsi;
1023         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1024
1025         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1026         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1027         dev_info->max_rx_queues = vsi->nb_qps;
1028         dev_info->max_tx_queues = vsi->nb_qps;
1029         dev_info->max_mac_addrs = vsi->max_macaddrs;
1030         dev_info->max_vfs = pci_dev->max_vfs;
1031
1032         dev_info->rx_offload_capa = 0;
1033         dev_info->tx_offload_capa = 0;
1034         dev_info->rx_queue_offload_capa = 0;
1035         dev_info->tx_queue_offload_capa = 0;
1036
1037         dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1038         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1039
1040         dev_info->speed_capa = ETH_LINK_SPEED_10M |
1041                                ETH_LINK_SPEED_100M |
1042                                ETH_LINK_SPEED_1G |
1043                                ETH_LINK_SPEED_2_5G |
1044                                ETH_LINK_SPEED_5G |
1045                                ETH_LINK_SPEED_10G |
1046                                ETH_LINK_SPEED_20G |
1047                                ETH_LINK_SPEED_25G |
1048                                ETH_LINK_SPEED_40G;
1049
1050         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1051         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1052
1053         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1054         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1055         dev_info->default_rxportconf.nb_queues = 1;
1056         dev_info->default_txportconf.nb_queues = 1;
1057         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1058         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1059 }
1060
1061 static inline int
1062 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1063                             struct rte_eth_link *link)
1064 {
1065         struct rte_eth_link *dst = link;
1066         struct rte_eth_link *src = &dev->data->dev_link;
1067
1068         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1069                                 *(uint64_t *)src) == 0)
1070                 return -1;
1071
1072         return 0;
1073 }
1074
1075 static inline int
1076 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1077                              struct rte_eth_link *link)
1078 {
1079         struct rte_eth_link *dst = &dev->data->dev_link;
1080         struct rte_eth_link *src = link;
1081
1082         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1083                                 *(uint64_t *)src) == 0)
1084                 return -1;
1085
1086         return 0;
1087 }
1088
1089 static int
1090 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1091 {
1092 #define CHECK_INTERVAL 100  /* 100ms */
1093 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1094         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1095         struct ice_link_status link_status;
1096         struct rte_eth_link link, old;
1097         int status;
1098         unsigned int rep_cnt = MAX_REPEAT_TIME;
1099         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1100
1101         memset(&link, 0, sizeof(link));
1102         memset(&old, 0, sizeof(old));
1103         memset(&link_status, 0, sizeof(link_status));
1104         ice_atomic_read_link_status(dev, &old);
1105
1106         do {
1107                 /* Get link status information from hardware */
1108                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1109                                               &link_status, NULL);
1110                 if (status != ICE_SUCCESS) {
1111                         link.link_speed = ETH_SPEED_NUM_100M;
1112                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1113                         PMD_DRV_LOG(ERR, "Failed to get link info");
1114                         goto out;
1115                 }
1116
1117                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1118                 if (!wait_to_complete || link.link_status)
1119                         break;
1120
1121                 rte_delay_ms(CHECK_INTERVAL);
1122         } while (--rep_cnt);
1123
1124         if (!link.link_status)
1125                 goto out;
1126
1127         /* Full-duplex operation at all supported speeds */
1128         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1129
1130         /* Parse the link status */
1131         switch (link_status.link_speed) {
1132         case ICE_AQ_LINK_SPEED_10MB:
1133                 link.link_speed = ETH_SPEED_NUM_10M;
1134                 break;
1135         case ICE_AQ_LINK_SPEED_100MB:
1136                 link.link_speed = ETH_SPEED_NUM_100M;
1137                 break;
1138         case ICE_AQ_LINK_SPEED_1000MB:
1139                 link.link_speed = ETH_SPEED_NUM_1G;
1140                 break;
1141         case ICE_AQ_LINK_SPEED_2500MB:
1142                 link.link_speed = ETH_SPEED_NUM_2_5G;
1143                 break;
1144         case ICE_AQ_LINK_SPEED_5GB:
1145                 link.link_speed = ETH_SPEED_NUM_5G;
1146                 break;
1147         case ICE_AQ_LINK_SPEED_10GB:
1148                 link.link_speed = ETH_SPEED_NUM_10G;
1149                 break;
1150         case ICE_AQ_LINK_SPEED_20GB:
1151                 link.link_speed = ETH_SPEED_NUM_20G;
1152                 break;
1153         case ICE_AQ_LINK_SPEED_25GB:
1154                 link.link_speed = ETH_SPEED_NUM_25G;
1155                 break;
1156         case ICE_AQ_LINK_SPEED_40GB:
1157                 link.link_speed = ETH_SPEED_NUM_40G;
1158                 break;
1159         case ICE_AQ_LINK_SPEED_UNKNOWN:
1160         default:
1161                 PMD_DRV_LOG(ERR, "Unknown link speed");
1162                 link.link_speed = ETH_SPEED_NUM_NONE;
1163                 break;
1164         }
1165
1166         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1167                               ETH_LINK_SPEED_FIXED);
1168
1169 out:
1170         ice_atomic_write_link_status(dev, &link);
1171         if (link.link_status == old.link_status)
1172                 return -1;
1173
1174         return 0;
1175 }
1176
1177 static int
1178 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1179               struct rte_pci_device *pci_dev)
1180 {
1181         return rte_eth_dev_pci_generic_probe(pci_dev,
1182                                              sizeof(struct ice_adapter),
1183                                              ice_dev_init);
1184 }
1185
1186 static int
1187 ice_pci_remove(struct rte_pci_device *pci_dev)
1188 {
1189         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
1190 }
1191
1192 static struct rte_pci_driver rte_ice_pmd = {
1193         .id_table = pci_id_ice_map,
1194         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1195                      RTE_PCI_DRV_IOVA_AS_VA,
1196         .probe = ice_pci_probe,
1197         .remove = ice_pci_remove,
1198 };
1199
1200 /**
1201  * Driver initialization routine.
1202  * Invoked once at EAL init time.
1203  * Register itself as the [Poll Mode] Driver of PCI devices.
1204  */
1205 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
1206 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
1207 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
1208 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
1209                               ICE_MAX_QP_NUM "=<int>");
1210
1211 RTE_INIT(ice_init_log)
1212 {
1213         ice_logtype_init = rte_log_register("pmd.net.ice.init");
1214         if (ice_logtype_init >= 0)
1215                 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
1216         ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
1217         if (ice_logtype_driver >= 0)
1218                 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
1219 }