net/ice: support queue information getting
[dpdk.git] / drivers / net / ice / ice_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
13
14 int ice_logtype_init;
15 int ice_logtype_driver;
16
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23                              struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25                            int wait_to_complete);
26
27 static const struct rte_pci_id pci_id_ice_map[] = {
28         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
29         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
30         { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
31         { .vendor_id = 0, /* sentinel */ },
32 };
33
34 static const struct eth_dev_ops ice_eth_dev_ops = {
35         .dev_configure                = ice_dev_configure,
36         .dev_start                    = ice_dev_start,
37         .dev_stop                     = ice_dev_stop,
38         .dev_close                    = ice_dev_close,
39         .dev_reset                    = ice_dev_reset,
40         .rx_queue_start               = ice_rx_queue_start,
41         .rx_queue_stop                = ice_rx_queue_stop,
42         .tx_queue_start               = ice_tx_queue_start,
43         .tx_queue_stop                = ice_tx_queue_stop,
44         .rx_queue_setup               = ice_rx_queue_setup,
45         .rx_queue_release             = ice_rx_queue_release,
46         .tx_queue_setup               = ice_tx_queue_setup,
47         .tx_queue_release             = ice_tx_queue_release,
48         .dev_infos_get                = ice_dev_info_get,
49         .link_update                  = ice_link_update,
50         .rxq_info_get                 = ice_rxq_info_get,
51         .txq_info_get                 = ice_txq_info_get,
52         .rx_queue_count               = ice_rx_queue_count,
53 };
54
55 static void
56 ice_init_controlq_parameter(struct ice_hw *hw)
57 {
58         /* fields for adminq */
59         hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
60         hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
61         hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
62         hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
63
64         /* fields for mailboxq, DPDK used as PF host */
65         hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
66         hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
67         hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
68         hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
69 }
70
71 static int
72 ice_check_qp_num(const char *key, const char *qp_value,
73                  __rte_unused void *opaque)
74 {
75         char *end = NULL;
76         int num = 0;
77
78         while (isblank(*qp_value))
79                 qp_value++;
80
81         num = strtoul(qp_value, &end, 10);
82
83         if (!num || (*end == '-') || errno) {
84                 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
85                             "value must be > 0",
86                             qp_value, key);
87                 return -1;
88         }
89
90         return num;
91 }
92
93 static int
94 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
95 {
96         struct rte_kvargs *kvlist;
97         const char *queue_num_key = ICE_MAX_QP_NUM;
98         int ret;
99
100         if (!devargs)
101                 return 0;
102
103         kvlist = rte_kvargs_parse(devargs->args, NULL);
104         if (!kvlist)
105                 return 0;
106
107         if (!rte_kvargs_count(kvlist, queue_num_key)) {
108                 rte_kvargs_free(kvlist);
109                 return 0;
110         }
111
112         if (rte_kvargs_process(kvlist, queue_num_key,
113                                ice_check_qp_num, NULL) < 0) {
114                 rte_kvargs_free(kvlist);
115                 return 0;
116         }
117         ret = rte_kvargs_process(kvlist, queue_num_key,
118                                  ice_check_qp_num, NULL);
119         rte_kvargs_free(kvlist);
120
121         return ret;
122 }
123
124 static int
125 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
126                   uint32_t num)
127 {
128         struct pool_entry *entry;
129
130         if (!pool || !num)
131                 return -EINVAL;
132
133         entry = rte_zmalloc(NULL, sizeof(*entry), 0);
134         if (!entry) {
135                 PMD_INIT_LOG(ERR,
136                              "Failed to allocate memory for resource pool");
137                 return -ENOMEM;
138         }
139
140         /* queue heap initialize */
141         pool->num_free = num;
142         pool->num_alloc = 0;
143         pool->base = base;
144         LIST_INIT(&pool->alloc_list);
145         LIST_INIT(&pool->free_list);
146
147         /* Initialize element  */
148         entry->base = 0;
149         entry->len = num;
150
151         LIST_INSERT_HEAD(&pool->free_list, entry, next);
152         return 0;
153 }
154
155 static int
156 ice_res_pool_alloc(struct ice_res_pool_info *pool,
157                    uint16_t num)
158 {
159         struct pool_entry *entry, *valid_entry;
160
161         if (!pool || !num) {
162                 PMD_INIT_LOG(ERR, "Invalid parameter");
163                 return -EINVAL;
164         }
165
166         if (pool->num_free < num) {
167                 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
168                              num, pool->num_free);
169                 return -ENOMEM;
170         }
171
172         valid_entry = NULL;
173         /* Lookup  in free list and find most fit one */
174         LIST_FOREACH(entry, &pool->free_list, next) {
175                 if (entry->len >= num) {
176                         /* Find best one */
177                         if (entry->len == num) {
178                                 valid_entry = entry;
179                                 break;
180                         }
181                         if (!valid_entry ||
182                             valid_entry->len > entry->len)
183                                 valid_entry = entry;
184                 }
185         }
186
187         /* Not find one to satisfy the request, return */
188         if (!valid_entry) {
189                 PMD_INIT_LOG(ERR, "No valid entry found");
190                 return -ENOMEM;
191         }
192         /**
193          * The entry have equal queue number as requested,
194          * remove it from alloc_list.
195          */
196         if (valid_entry->len == num) {
197                 LIST_REMOVE(valid_entry, next);
198         } else {
199                 /**
200                  * The entry have more numbers than requested,
201                  * create a new entry for alloc_list and minus its
202                  * queue base and number in free_list.
203                  */
204                 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
205                 if (!entry) {
206                         PMD_INIT_LOG(ERR,
207                                      "Failed to allocate memory for "
208                                      "resource pool");
209                         return -ENOMEM;
210                 }
211                 entry->base = valid_entry->base;
212                 entry->len = num;
213                 valid_entry->base += num;
214                 valid_entry->len -= num;
215                 valid_entry = entry;
216         }
217
218         /* Insert it into alloc list, not sorted */
219         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
220
221         pool->num_free -= valid_entry->len;
222         pool->num_alloc += valid_entry->len;
223
224         return valid_entry->base + pool->base;
225 }
226
227 static void
228 ice_res_pool_destroy(struct ice_res_pool_info *pool)
229 {
230         struct pool_entry *entry, *next_entry;
231
232         if (!pool)
233                 return;
234
235         for (entry = LIST_FIRST(&pool->alloc_list);
236              entry && (next_entry = LIST_NEXT(entry, next), 1);
237              entry = next_entry) {
238                 LIST_REMOVE(entry, next);
239                 rte_free(entry);
240         }
241
242         for (entry = LIST_FIRST(&pool->free_list);
243              entry && (next_entry = LIST_NEXT(entry, next), 1);
244              entry = next_entry) {
245                 LIST_REMOVE(entry, next);
246                 rte_free(entry);
247         }
248
249         pool->num_free = 0;
250         pool->num_alloc = 0;
251         pool->base = 0;
252         LIST_INIT(&pool->alloc_list);
253         LIST_INIT(&pool->free_list);
254 }
255
256 static void
257 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
258 {
259         /* Set VSI LUT selection */
260         info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
261                           ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
262         /* Set Hash scheme */
263         info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
264                            ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
265         /* enable TC */
266         info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
267 }
268
269 static enum ice_status
270 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
271                                 struct ice_aqc_vsi_props *info,
272                                 uint8_t enabled_tcmap)
273 {
274         uint16_t bsf, qp_idx;
275
276         /* default tc 0 now. Multi-TC supporting need to be done later.
277          * Configure TC and queue mapping parameters, for enabled TC,
278          * allocate qpnum_per_tc queues to this traffic.
279          */
280         if (enabled_tcmap != 0x01) {
281                 PMD_INIT_LOG(ERR, "only TC0 is supported");
282                 return -ENOTSUP;
283         }
284
285         vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
286         bsf = rte_bsf32(vsi->nb_qps);
287         /* Adjust the queue number to actual queues that can be applied */
288         vsi->nb_qps = 0x1 << bsf;
289
290         qp_idx = 0;
291         /* Set tc and queue mapping with VSI */
292         info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
293                                                 ICE_AQ_VSI_TC_Q_OFFSET_S) |
294                                                (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
295
296         /* Associate queue number with VSI */
297         info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
298         info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
299         info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
300         info->valid_sections |=
301                 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
302         /* Set the info.ingress_table and info.egress_table
303          * for UP translate table. Now just set it to 1:1 map by default
304          * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
305          */
306 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
307         info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
308         info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
309         info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
310         return 0;
311 }
312
313 static int
314 ice_init_mac_address(struct rte_eth_dev *dev)
315 {
316         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
317
318         if (!is_unicast_ether_addr
319                 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
320                 PMD_INIT_LOG(ERR, "Invalid MAC address");
321                 return -EINVAL;
322         }
323
324         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
325                         (struct ether_addr *)hw->port_info[0].mac.perm_addr);
326
327         dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
328         if (!dev->data->mac_addrs) {
329                 PMD_INIT_LOG(ERR,
330                              "Failed to allocate memory to store mac address");
331                 return -ENOMEM;
332         }
333         /* store it to dev data */
334         ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
335                         &dev->data->mac_addrs[0]);
336         return 0;
337 }
338
339 /* Enable IRQ0 */
340 static void
341 ice_pf_enable_irq0(struct ice_hw *hw)
342 {
343         /* reset the registers */
344         ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
345         ICE_READ_REG(hw, PFINT_OICR);
346
347 #ifdef ICE_LSE_SPT
348         ICE_WRITE_REG(hw, PFINT_OICR_ENA,
349                       (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
350                                  (~PFINT_OICR_LINK_STAT_CHANGE_M)));
351
352         ICE_WRITE_REG(hw, PFINT_OICR_CTL,
353                       (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
354                       ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
355                        PFINT_OICR_CTL_ITR_INDX_M) |
356                       PFINT_OICR_CTL_CAUSE_ENA_M);
357
358         ICE_WRITE_REG(hw, PFINT_FW_CTL,
359                       (0 & PFINT_FW_CTL_MSIX_INDX_M) |
360                       ((0 << PFINT_FW_CTL_ITR_INDX_S) &
361                        PFINT_FW_CTL_ITR_INDX_M) |
362                       PFINT_FW_CTL_CAUSE_ENA_M);
363 #else
364         ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
365 #endif
366
367         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
368                       GLINT_DYN_CTL_INTENA_M |
369                       GLINT_DYN_CTL_CLEARPBA_M |
370                       GLINT_DYN_CTL_ITR_INDX_M);
371
372         ice_flush(hw);
373 }
374
375 /* Disable IRQ0 */
376 static void
377 ice_pf_disable_irq0(struct ice_hw *hw)
378 {
379         /* Disable all interrupt types */
380         ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
381         ice_flush(hw);
382 }
383
384 #ifdef ICE_LSE_SPT
385 static void
386 ice_handle_aq_msg(struct rte_eth_dev *dev)
387 {
388         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
389         struct ice_ctl_q_info *cq = &hw->adminq;
390         struct ice_rq_event_info event;
391         uint16_t pending, opcode;
392         int ret;
393
394         event.buf_len = ICE_AQ_MAX_BUF_LEN;
395         event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
396         if (!event.msg_buf) {
397                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
398                 return;
399         }
400
401         pending = 1;
402         while (pending) {
403                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
404
405                 if (ret != ICE_SUCCESS) {
406                         PMD_DRV_LOG(INFO,
407                                     "Failed to read msg from AdminQ, "
408                                     "adminq_err: %u",
409                                     hw->adminq.sq_last_status);
410                         break;
411                 }
412                 opcode = rte_le_to_cpu_16(event.desc.opcode);
413
414                 switch (opcode) {
415                 case ice_aqc_opc_get_link_status:
416                         ret = ice_link_update(dev, 0);
417                         if (!ret)
418                                 _rte_eth_dev_callback_process
419                                         (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
420                         break;
421                 default:
422                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
423                                     opcode);
424                         break;
425                 }
426         }
427         rte_free(event.msg_buf);
428 }
429 #endif
430
431 /**
432  * Interrupt handler triggered by NIC for handling
433  * specific interrupt.
434  *
435  * @param handle
436  *  Pointer to interrupt handle.
437  * @param param
438  *  The address of parameter (struct rte_eth_dev *) regsitered before.
439  *
440  * @return
441  *  void
442  */
443 static void
444 ice_interrupt_handler(void *param)
445 {
446         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
447         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
448         uint32_t oicr;
449         uint32_t reg;
450         uint8_t pf_num;
451         uint8_t event;
452         uint16_t queue;
453 #ifdef ICE_LSE_SPT
454         uint32_t int_fw_ctl;
455 #endif
456
457         /* Disable interrupt */
458         ice_pf_disable_irq0(hw);
459
460         /* read out interrupt causes */
461         oicr = ICE_READ_REG(hw, PFINT_OICR);
462 #ifdef ICE_LSE_SPT
463         int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
464 #endif
465
466         /* No interrupt event indicated */
467         if (!(oicr & PFINT_OICR_INTEVENT_M)) {
468                 PMD_DRV_LOG(INFO, "No interrupt event");
469                 goto done;
470         }
471
472 #ifdef ICE_LSE_SPT
473         if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
474                 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
475                 ice_handle_aq_msg(dev);
476         }
477 #else
478         if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
479                 PMD_DRV_LOG(INFO, "OICR: link state change event");
480                 ice_link_update(dev, 0);
481         }
482 #endif
483
484         if (oicr & PFINT_OICR_MAL_DETECT_M) {
485                 PMD_DRV_LOG(WARNING, "OICR: MDD event");
486                 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
487                 if (reg & GL_MDET_TX_PQM_VALID_M) {
488                         pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
489                                  GL_MDET_TX_PQM_PF_NUM_S;
490                         event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
491                                 GL_MDET_TX_PQM_MAL_TYPE_S;
492                         queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
493                                 GL_MDET_TX_PQM_QNUM_S;
494
495                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
496                                     "%d by PQM on TX queue %d PF# %d",
497                                     event, queue, pf_num);
498                 }
499
500                 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
501                 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
502                         pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
503                                  GL_MDET_TX_TCLAN_PF_NUM_S;
504                         event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
505                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
506                         queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
507                                 GL_MDET_TX_TCLAN_QNUM_S;
508
509                         PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
510                                     "%d by TCLAN on TX queue %d PF# %d",
511                                     event, queue, pf_num);
512                 }
513         }
514 done:
515         /* Enable interrupt */
516         ice_pf_enable_irq0(hw);
517         rte_intr_enable(dev->intr_handle);
518 }
519
520 /*  Initialize SW parameters of PF */
521 static int
522 ice_pf_sw_init(struct rte_eth_dev *dev)
523 {
524         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
525         struct ice_hw *hw = ICE_PF_TO_HW(pf);
526
527         if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
528                 pf->lan_nb_qp_max =
529                         ice_config_max_queue_pair_num(dev->device->devargs);
530         else
531                 pf->lan_nb_qp_max =
532                         (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
533                                           hw->func_caps.common_cap.num_rxq);
534
535         pf->lan_nb_qps = pf->lan_nb_qp_max;
536
537         return 0;
538 }
539
540 static struct ice_vsi *
541 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
542 {
543         struct ice_hw *hw = ICE_PF_TO_HW(pf);
544         struct ice_vsi *vsi = NULL;
545         struct ice_vsi_ctx vsi_ctx;
546         int ret;
547         uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
548         uint8_t tc_bitmap = 0x1;
549
550         /* hw->num_lports = 1 in NIC mode */
551         vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
552         if (!vsi)
553                 return NULL;
554
555         vsi->idx = pf->next_vsi_idx;
556         pf->next_vsi_idx++;
557         vsi->type = type;
558         vsi->adapter = ICE_PF_TO_ADAPTER(pf);
559         vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
560         vsi->vlan_anti_spoof_on = 0;
561         vsi->vlan_filter_on = 1;
562         TAILQ_INIT(&vsi->mac_list);
563         TAILQ_INIT(&vsi->vlan_list);
564
565         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
566         /* base_queue in used in queue mapping of VSI add/update command.
567          * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
568          * cases in the first stage. Only Main VSI.
569          */
570         vsi->base_queue = 0;
571         switch (type) {
572         case ICE_VSI_PF:
573                 vsi->nb_qps = pf->lan_nb_qps;
574                 ice_vsi_config_default_rss(&vsi_ctx.info);
575                 vsi_ctx.alloc_from_pool = true;
576                 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
577                 /* switch_id is queried by get_switch_config aq, which is done
578                  * by ice_init_hw
579                  */
580                 vsi_ctx.info.sw_id = hw->port_info->sw_id;
581                 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
582                 /* Allow all untagged or tagged packets */
583                 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
584                 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
585                 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
586                                          ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
587                 /* Enable VLAN/UP trip */
588                 ret = ice_vsi_config_tc_queue_mapping(vsi,
589                                                       &vsi_ctx.info,
590                                                       ICE_DEFAULT_TCMAP);
591                 if (ret) {
592                         PMD_INIT_LOG(ERR,
593                                      "tc queue mapping with vsi failed, "
594                                      "err = %d",
595                                      ret);
596                         goto fail_mem;
597                 }
598
599                 break;
600         default:
601                 /* for other types of VSI */
602                 PMD_INIT_LOG(ERR, "other types of VSI not supported");
603                 goto fail_mem;
604         }
605
606         /* VF has MSIX interrupt in VF range, don't allocate here */
607         if (type == ICE_VSI_PF) {
608                 ret = ice_res_pool_alloc(&pf->msix_pool,
609                                          RTE_MIN(vsi->nb_qps,
610                                                  RTE_MAX_RXTX_INTR_VEC_ID));
611                 if (ret < 0) {
612                         PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
613                                      vsi->vsi_id, ret);
614                 }
615                 vsi->msix_intr = ret;
616                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
617         } else {
618                 vsi->msix_intr = 0;
619                 vsi->nb_msix = 0;
620         }
621         ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
622         if (ret != ICE_SUCCESS) {
623                 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
624                 goto fail_mem;
625         }
626         /* store vsi information is SW structure */
627         vsi->vsi_id = vsi_ctx.vsi_num;
628         vsi->info = vsi_ctx.info;
629         pf->vsis_allocated = vsi_ctx.vsis_allocd;
630         pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
631
632         /* At the beginning, only TC0. */
633         /* What we need here is the maximam number of the TX queues.
634          * Currently vsi->nb_qps means it.
635          * Correct it if any change.
636          */
637         max_txqs[0] = vsi->nb_qps;
638         ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
639                               tc_bitmap, max_txqs);
640         if (ret != ICE_SUCCESS)
641                 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
642
643         return vsi;
644 fail_mem:
645         rte_free(vsi);
646         pf->next_vsi_idx--;
647         return NULL;
648 }
649
650 static int
651 ice_pf_setup(struct ice_pf *pf)
652 {
653         struct ice_vsi *vsi;
654
655         /* Clear all stats counters */
656         pf->offset_loaded = FALSE;
657         memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
658         memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
659         memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
660         memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
661
662         vsi = ice_setup_vsi(pf, ICE_VSI_PF);
663         if (!vsi) {
664                 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
665                 return -EINVAL;
666         }
667
668         pf->main_vsi = vsi;
669
670         return 0;
671 }
672
673 static int
674 ice_dev_init(struct rte_eth_dev *dev)
675 {
676         struct rte_pci_device *pci_dev;
677         struct rte_intr_handle *intr_handle;
678         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
679         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
680         int ret;
681
682         dev->dev_ops = &ice_eth_dev_ops;
683
684         pci_dev = RTE_DEV_TO_PCI(dev->device);
685         intr_handle = &pci_dev->intr_handle;
686
687         pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
688         pf->adapter->eth_dev = dev;
689         pf->dev_data = dev->data;
690         hw->back = pf->adapter;
691         hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
692         hw->vendor_id = pci_dev->id.vendor_id;
693         hw->device_id = pci_dev->id.device_id;
694         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
695         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
696         hw->bus.device = pci_dev->addr.devid;
697         hw->bus.func = pci_dev->addr.function;
698
699         ice_init_controlq_parameter(hw);
700
701         ret = ice_init_hw(hw);
702         if (ret) {
703                 PMD_INIT_LOG(ERR, "Failed to initialize HW");
704                 return -EINVAL;
705         }
706
707         PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
708                      hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
709                      hw->api_maj_ver, hw->api_min_ver);
710
711         ice_pf_sw_init(dev);
712         ret = ice_init_mac_address(dev);
713         if (ret) {
714                 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
715                 goto err_init_mac;
716         }
717
718         ret = ice_res_pool_init(&pf->msix_pool, 1,
719                                 hw->func_caps.common_cap.num_msix_vectors - 1);
720         if (ret) {
721                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
722                 goto err_msix_pool_init;
723         }
724
725         ret = ice_pf_setup(pf);
726         if (ret) {
727                 PMD_INIT_LOG(ERR, "Failed to setup PF");
728                 goto err_pf_setup;
729         }
730
731         /* register callback func to eal lib */
732         rte_intr_callback_register(intr_handle,
733                                    ice_interrupt_handler, dev);
734
735         ice_pf_enable_irq0(hw);
736
737         /* enable uio intr after callback register */
738         rte_intr_enable(intr_handle);
739
740         return 0;
741
742 err_pf_setup:
743         ice_res_pool_destroy(&pf->msix_pool);
744 err_msix_pool_init:
745         rte_free(dev->data->mac_addrs);
746 err_init_mac:
747         ice_sched_cleanup_all(hw);
748         rte_free(hw->port_info);
749         ice_shutdown_all_ctrlq(hw);
750
751         return ret;
752 }
753
754 static int
755 ice_release_vsi(struct ice_vsi *vsi)
756 {
757         struct ice_hw *hw;
758         struct ice_vsi_ctx vsi_ctx;
759         enum ice_status ret;
760
761         if (!vsi)
762                 return 0;
763
764         hw = ICE_VSI_TO_HW(vsi);
765
766         memset(&vsi_ctx, 0, sizeof(vsi_ctx));
767
768         vsi_ctx.vsi_num = vsi->vsi_id;
769         vsi_ctx.info = vsi->info;
770         ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
771         if (ret != ICE_SUCCESS) {
772                 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
773                 rte_free(vsi);
774                 return -1;
775         }
776
777         rte_free(vsi);
778         return 0;
779 }
780
781 static void
782 ice_dev_stop(struct rte_eth_dev *dev)
783 {
784         struct rte_eth_dev_data *data = dev->data;
785         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
786         struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
787         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
788         uint16_t i;
789
790         /* avoid stopping again */
791         if (pf->adapter_stopped)
792                 return;
793
794         /* stop and clear all Rx queues */
795         for (i = 0; i < data->nb_rx_queues; i++)
796                 ice_rx_queue_stop(dev, i);
797
798         /* stop and clear all Tx queues */
799         for (i = 0; i < data->nb_tx_queues; i++)
800                 ice_tx_queue_stop(dev, i);
801
802         /* Clear all queues and release mbufs */
803         ice_clear_queues(dev);
804
805         /* Clean datapath event and queue/vec mapping */
806         rte_intr_efd_disable(intr_handle);
807         if (intr_handle->intr_vec) {
808                 rte_free(intr_handle->intr_vec);
809                 intr_handle->intr_vec = NULL;
810         }
811
812         pf->adapter_stopped = true;
813 }
814
815 static void
816 ice_dev_close(struct rte_eth_dev *dev)
817 {
818         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
819         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
820
821         ice_dev_stop(dev);
822
823         /* release all queue resource */
824         ice_free_queues(dev);
825
826         ice_res_pool_destroy(&pf->msix_pool);
827         ice_release_vsi(pf->main_vsi);
828
829         ice_shutdown_all_ctrlq(hw);
830 }
831
832 static int
833 ice_dev_uninit(struct rte_eth_dev *dev)
834 {
835         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
836         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
837         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
838         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
839
840         ice_dev_close(dev);
841
842         dev->dev_ops = NULL;
843         dev->rx_pkt_burst = NULL;
844         dev->tx_pkt_burst = NULL;
845
846         rte_free(dev->data->mac_addrs);
847         dev->data->mac_addrs = NULL;
848
849         /* disable uio intr before callback unregister */
850         rte_intr_disable(intr_handle);
851
852         /* register callback func to eal lib */
853         rte_intr_callback_unregister(intr_handle,
854                                      ice_interrupt_handler, dev);
855
856         ice_release_vsi(pf->main_vsi);
857         ice_sched_cleanup_all(hw);
858         rte_free(hw->port_info);
859         ice_shutdown_all_ctrlq(hw);
860
861         return 0;
862 }
863
864 static int
865 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
866 {
867         struct ice_adapter *ad =
868                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
869
870         /* Initialize to TRUE. If any of Rx queues doesn't meet the
871          * bulk allocation or vector Rx preconditions we will reset it.
872          */
873         ad->rx_bulk_alloc_allowed = true;
874         ad->tx_simple_allowed = true;
875
876         return 0;
877 }
878
879 static int ice_init_rss(struct ice_pf *pf)
880 {
881         struct ice_hw *hw = ICE_PF_TO_HW(pf);
882         struct ice_vsi *vsi = pf->main_vsi;
883         struct rte_eth_dev *dev = pf->adapter->eth_dev;
884         struct rte_eth_rss_conf *rss_conf;
885         struct ice_aqc_get_set_rss_keys key;
886         uint16_t i, nb_q;
887         int ret = 0;
888
889         rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
890         nb_q = dev->data->nb_rx_queues;
891         vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
892         vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
893
894         if (!vsi->rss_key)
895                 vsi->rss_key = rte_zmalloc(NULL,
896                                            vsi->rss_key_size, 0);
897         if (!vsi->rss_lut)
898                 vsi->rss_lut = rte_zmalloc(NULL,
899                                            vsi->rss_lut_size, 0);
900
901         /* configure RSS key */
902         if (!rss_conf->rss_key) {
903                 /* Calculate the default hash key */
904                 for (i = 0; i <= vsi->rss_key_size; i++)
905                         vsi->rss_key[i] = (uint8_t)rte_rand();
906         } else {
907                 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
908                            RTE_MIN(rss_conf->rss_key_len,
909                                    vsi->rss_key_size));
910         }
911         rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
912         ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
913         if (ret)
914                 return -EINVAL;
915
916         /* init RSS LUT table */
917         for (i = 0; i < vsi->rss_lut_size; i++)
918                 vsi->rss_lut[i] = i % nb_q;
919
920         ret = ice_aq_set_rss_lut(hw, vsi->idx,
921                                  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
922                                  vsi->rss_lut, vsi->rss_lut_size);
923         if (ret)
924                 return -EINVAL;
925
926         return 0;
927 }
928
929 static int
930 ice_dev_start(struct rte_eth_dev *dev)
931 {
932         struct rte_eth_dev_data *data = dev->data;
933         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
934         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
935         uint16_t nb_rxq = 0;
936         uint16_t nb_txq, i;
937         int ret;
938
939         /* program Tx queues' context in hardware */
940         for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
941                 ret = ice_tx_queue_start(dev, nb_txq);
942                 if (ret) {
943                         PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
944                         goto tx_err;
945                 }
946         }
947
948         /* program Rx queues' context in hardware*/
949         for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
950                 ret = ice_rx_queue_start(dev, nb_rxq);
951                 if (ret) {
952                         PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
953                         goto rx_err;
954                 }
955         }
956
957         ret = ice_init_rss(pf);
958         if (ret) {
959                 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
960                 goto rx_err;
961         }
962
963         ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
964                                     ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
965                                      ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
966                                      ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
967                                      ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
968                                      ICE_AQ_LINK_EVENT_AN_COMPLETED |
969                                      ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
970                                      NULL);
971         if (ret != ICE_SUCCESS)
972                 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
973
974         /* Call get_link_info aq commond to enable/disable LSE */
975         ice_link_update(dev, 0);
976
977         pf->adapter_stopped = false;
978
979         return 0;
980
981         /* stop the started queues if failed to start all queues */
982 rx_err:
983         for (i = 0; i < nb_rxq; i++)
984                 ice_rx_queue_stop(dev, i);
985 tx_err:
986         for (i = 0; i < nb_txq; i++)
987                 ice_tx_queue_stop(dev, i);
988
989         return -EIO;
990 }
991
992 static int
993 ice_dev_reset(struct rte_eth_dev *dev)
994 {
995         int ret;
996
997         if (dev->data->sriov.active)
998                 return -ENOTSUP;
999
1000         ret = ice_dev_uninit(dev);
1001         if (ret) {
1002                 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1003                 return -ENXIO;
1004         }
1005
1006         ret = ice_dev_init(dev);
1007         if (ret) {
1008                 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1009                 return -ENXIO;
1010         }
1011
1012         return 0;
1013 }
1014
1015 static void
1016 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1017 {
1018         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1019         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1020         struct ice_vsi *vsi = pf->main_vsi;
1021         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1022
1023         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1024         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1025         dev_info->max_rx_queues = vsi->nb_qps;
1026         dev_info->max_tx_queues = vsi->nb_qps;
1027         dev_info->max_mac_addrs = vsi->max_macaddrs;
1028         dev_info->max_vfs = pci_dev->max_vfs;
1029
1030         dev_info->rx_offload_capa = 0;
1031         dev_info->tx_offload_capa = 0;
1032         dev_info->rx_queue_offload_capa = 0;
1033         dev_info->tx_queue_offload_capa = 0;
1034
1035         dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1036         dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1037
1038         dev_info->speed_capa = ETH_LINK_SPEED_10M |
1039                                ETH_LINK_SPEED_100M |
1040                                ETH_LINK_SPEED_1G |
1041                                ETH_LINK_SPEED_2_5G |
1042                                ETH_LINK_SPEED_5G |
1043                                ETH_LINK_SPEED_10G |
1044                                ETH_LINK_SPEED_20G |
1045                                ETH_LINK_SPEED_25G |
1046                                ETH_LINK_SPEED_40G;
1047
1048         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1049         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1050
1051         dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1052         dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1053         dev_info->default_rxportconf.nb_queues = 1;
1054         dev_info->default_txportconf.nb_queues = 1;
1055         dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1056         dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1057 }
1058
1059 static inline int
1060 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1061                             struct rte_eth_link *link)
1062 {
1063         struct rte_eth_link *dst = link;
1064         struct rte_eth_link *src = &dev->data->dev_link;
1065
1066         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1067                                 *(uint64_t *)src) == 0)
1068                 return -1;
1069
1070         return 0;
1071 }
1072
1073 static inline int
1074 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1075                              struct rte_eth_link *link)
1076 {
1077         struct rte_eth_link *dst = &dev->data->dev_link;
1078         struct rte_eth_link *src = link;
1079
1080         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1081                                 *(uint64_t *)src) == 0)
1082                 return -1;
1083
1084         return 0;
1085 }
1086
1087 static int
1088 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1089 {
1090 #define CHECK_INTERVAL 100  /* 100ms */
1091 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1092         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1093         struct ice_link_status link_status;
1094         struct rte_eth_link link, old;
1095         int status;
1096         unsigned int rep_cnt = MAX_REPEAT_TIME;
1097         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1098
1099         memset(&link, 0, sizeof(link));
1100         memset(&old, 0, sizeof(old));
1101         memset(&link_status, 0, sizeof(link_status));
1102         ice_atomic_read_link_status(dev, &old);
1103
1104         do {
1105                 /* Get link status information from hardware */
1106                 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1107                                               &link_status, NULL);
1108                 if (status != ICE_SUCCESS) {
1109                         link.link_speed = ETH_SPEED_NUM_100M;
1110                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1111                         PMD_DRV_LOG(ERR, "Failed to get link info");
1112                         goto out;
1113                 }
1114
1115                 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1116                 if (!wait_to_complete || link.link_status)
1117                         break;
1118
1119                 rte_delay_ms(CHECK_INTERVAL);
1120         } while (--rep_cnt);
1121
1122         if (!link.link_status)
1123                 goto out;
1124
1125         /* Full-duplex operation at all supported speeds */
1126         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1127
1128         /* Parse the link status */
1129         switch (link_status.link_speed) {
1130         case ICE_AQ_LINK_SPEED_10MB:
1131                 link.link_speed = ETH_SPEED_NUM_10M;
1132                 break;
1133         case ICE_AQ_LINK_SPEED_100MB:
1134                 link.link_speed = ETH_SPEED_NUM_100M;
1135                 break;
1136         case ICE_AQ_LINK_SPEED_1000MB:
1137                 link.link_speed = ETH_SPEED_NUM_1G;
1138                 break;
1139         case ICE_AQ_LINK_SPEED_2500MB:
1140                 link.link_speed = ETH_SPEED_NUM_2_5G;
1141                 break;
1142         case ICE_AQ_LINK_SPEED_5GB:
1143                 link.link_speed = ETH_SPEED_NUM_5G;
1144                 break;
1145         case ICE_AQ_LINK_SPEED_10GB:
1146                 link.link_speed = ETH_SPEED_NUM_10G;
1147                 break;
1148         case ICE_AQ_LINK_SPEED_20GB:
1149                 link.link_speed = ETH_SPEED_NUM_20G;
1150                 break;
1151         case ICE_AQ_LINK_SPEED_25GB:
1152                 link.link_speed = ETH_SPEED_NUM_25G;
1153                 break;
1154         case ICE_AQ_LINK_SPEED_40GB:
1155                 link.link_speed = ETH_SPEED_NUM_40G;
1156                 break;
1157         case ICE_AQ_LINK_SPEED_UNKNOWN:
1158         default:
1159                 PMD_DRV_LOG(ERR, "Unknown link speed");
1160                 link.link_speed = ETH_SPEED_NUM_NONE;
1161                 break;
1162         }
1163
1164         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1165                               ETH_LINK_SPEED_FIXED);
1166
1167 out:
1168         ice_atomic_write_link_status(dev, &link);
1169         if (link.link_status == old.link_status)
1170                 return -1;
1171
1172         return 0;
1173 }
1174
1175 static int
1176 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1177               struct rte_pci_device *pci_dev)
1178 {
1179         return rte_eth_dev_pci_generic_probe(pci_dev,
1180                                              sizeof(struct ice_adapter),
1181                                              ice_dev_init);
1182 }
1183
1184 static int
1185 ice_pci_remove(struct rte_pci_device *pci_dev)
1186 {
1187         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
1188 }
1189
1190 static struct rte_pci_driver rte_ice_pmd = {
1191         .id_table = pci_id_ice_map,
1192         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1193                      RTE_PCI_DRV_IOVA_AS_VA,
1194         .probe = ice_pci_probe,
1195         .remove = ice_pci_remove,
1196 };
1197
1198 /**
1199  * Driver initialization routine.
1200  * Invoked once at EAL init time.
1201  * Register itself as the [Poll Mode] Driver of PCI devices.
1202  */
1203 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
1204 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
1205 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
1206 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
1207                               ICE_MAX_QP_NUM "=<int>");
1208
1209 RTE_INIT(ice_init_log)
1210 {
1211         ice_logtype_init = rte_log_register("pmd.net.ice.init");
1212         if (ice_logtype_init >= 0)
1213                 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
1214         ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
1215         if (ice_logtype_driver >= 0)
1216                 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
1217 }