net/ice: add queue config in DCF
[dpdk.git] / drivers / net / ice / ice_dcf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <sys/types.h>
8 #include <sys/ioctl.h>
9 #include <unistd.h>
10
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
13 #include <rte_pci.h>
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <rte_dev.h>
22
23 #include <iavf_devids.h>
24
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27 #include "ice_rxtx.h"
28
29 static uint16_t
30 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
31                   __rte_unused struct rte_mbuf **bufs,
32                   __rte_unused uint16_t nb_pkts)
33 {
34         return 0;
35 }
36
37 static uint16_t
38 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
39                   __rte_unused struct rte_mbuf **bufs,
40                   __rte_unused uint16_t nb_pkts)
41 {
42         return 0;
43 }
44
45 static int
46 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
47 {
48         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
49         struct rte_eth_dev_data *dev_data = dev->data;
50         struct iavf_hw *hw = &dcf_ad->real_hw.avf;
51         uint16_t buf_size, max_pkt_len, len;
52
53         buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
54         rxq->rx_hdr_len = 0;
55         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
56         len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
57         max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
58
59         /* Check if the jumbo frame and maximum packet length are set
60          * correctly.
61          */
62         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
63                 if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
64                     max_pkt_len > ICE_FRAME_SIZE_MAX) {
65                         PMD_DRV_LOG(ERR, "maximum packet length must be "
66                                     "larger than %u and smaller than %u, "
67                                     "as jumbo frame is enabled",
68                                     (uint32_t)RTE_ETHER_MAX_LEN,
69                                     (uint32_t)ICE_FRAME_SIZE_MAX);
70                         return -EINVAL;
71                 }
72         } else {
73                 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
74                     max_pkt_len > RTE_ETHER_MAX_LEN) {
75                         PMD_DRV_LOG(ERR, "maximum packet length must be "
76                                     "larger than %u and smaller than %u, "
77                                     "as jumbo frame is disabled",
78                                     (uint32_t)RTE_ETHER_MIN_LEN,
79                                     (uint32_t)RTE_ETHER_MAX_LEN);
80                         return -EINVAL;
81                 }
82         }
83
84         rxq->max_pkt_len = max_pkt_len;
85         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
86             (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
87                 dev_data->scattered_rx = 1;
88         }
89         rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
90         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
91         IAVF_WRITE_FLUSH(hw);
92
93         return 0;
94 }
95
96 static int
97 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
98 {
99         struct ice_rx_queue **rxq =
100                 (struct ice_rx_queue **)dev->data->rx_queues;
101         int i, ret;
102
103         for (i = 0; i < dev->data->nb_rx_queues; i++) {
104                 if (!rxq[i] || !rxq[i]->q_set)
105                         continue;
106                 ret = ice_dcf_init_rxq(dev, rxq[i]);
107                 if (ret)
108                         return ret;
109         }
110
111         ice_set_rx_function(dev);
112         ice_set_tx_function(dev);
113
114         return 0;
115 }
116
117 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
118 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
119
120 #define IAVF_ITR_INDEX_DEFAULT          0
121 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
122 #define IAVF_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
123
124 static inline uint16_t
125 iavf_calc_itr_interval(int16_t interval)
126 {
127         if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
128                 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
129
130         /* Convert to hardware count, as writing each 1 represents 2 us */
131         return interval / 2;
132 }
133
134 static int
135 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
136                                      struct rte_intr_handle *intr_handle)
137 {
138         struct ice_dcf_adapter *adapter = dev->data->dev_private;
139         struct ice_dcf_hw *hw = &adapter->real_hw;
140         uint16_t interval, i;
141         int vec;
142
143         if (rte_intr_cap_multiple(intr_handle) &&
144             dev->data->dev_conf.intr_conf.rxq) {
145                 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
146                         return -1;
147         }
148
149         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
150                 intr_handle->intr_vec =
151                         rte_zmalloc("intr_vec",
152                                     dev->data->nb_rx_queues * sizeof(int), 0);
153                 if (!intr_handle->intr_vec) {
154                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
155                                     dev->data->nb_rx_queues);
156                         return -1;
157                 }
158         }
159
160         if (!dev->data->dev_conf.intr_conf.rxq ||
161             !rte_intr_dp_is_en(intr_handle)) {
162                 /* Rx interrupt disabled, Map interrupt only for writeback */
163                 hw->nb_msix = 1;
164                 if (hw->vf_res->vf_cap_flags &
165                     VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
166                         /* If WB_ON_ITR supports, enable it */
167                         hw->msix_base = IAVF_RX_VEC_START;
168                         IAVF_WRITE_REG(&hw->avf,
169                                        IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
170                                        IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
171                                        IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
172                 } else {
173                         /* If no WB_ON_ITR offload flags, need to set
174                          * interrupt for descriptor write back.
175                          */
176                         hw->msix_base = IAVF_MISC_VEC_ID;
177
178                         /* set ITR to max */
179                         interval =
180                         iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
181                         IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
182                                        IAVF_VFINT_DYN_CTL01_INTENA_MASK |
183                                        (IAVF_ITR_INDEX_DEFAULT <<
184                                         IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
185                                        (interval <<
186                                         IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
187                 }
188                 IAVF_WRITE_FLUSH(&hw->avf);
189                 /* map all queues to the same interrupt */
190                 for (i = 0; i < dev->data->nb_rx_queues; i++)
191                         hw->rxq_map[hw->msix_base] |= 1 << i;
192         } else {
193                 if (!rte_intr_allow_others(intr_handle)) {
194                         hw->nb_msix = 1;
195                         hw->msix_base = IAVF_MISC_VEC_ID;
196                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
197                                 hw->rxq_map[hw->msix_base] |= 1 << i;
198                                 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
199                         }
200                         PMD_DRV_LOG(DEBUG,
201                                     "vector %u are mapping to all Rx queues",
202                                     hw->msix_base);
203                 } else {
204                         /* If Rx interrupt is reuquired, and we can use
205                          * multi interrupts, then the vec is from 1
206                          */
207                         hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
208                                               intr_handle->nb_efd);
209                         hw->msix_base = IAVF_MISC_VEC_ID;
210                         vec = IAVF_MISC_VEC_ID;
211                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
212                                 hw->rxq_map[vec] |= 1 << i;
213                                 intr_handle->intr_vec[i] = vec++;
214                                 if (vec >= hw->nb_msix)
215                                         vec = IAVF_RX_VEC_START;
216                         }
217                         PMD_DRV_LOG(DEBUG,
218                                     "%u vectors are mapping to %u Rx queues",
219                                     hw->nb_msix, dev->data->nb_rx_queues);
220                 }
221         }
222
223         if (ice_dcf_config_irq_map(hw)) {
224                 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
225                 return -1;
226         }
227         return 0;
228 }
229
230 static int
231 ice_dcf_dev_start(struct rte_eth_dev *dev)
232 {
233         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
234         struct rte_intr_handle *intr_handle = dev->intr_handle;
235         struct ice_adapter *ad = &dcf_ad->parent;
236         struct ice_dcf_hw *hw = &dcf_ad->real_hw;
237         int ret;
238
239         ad->pf.adapter_stopped = 0;
240
241         hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
242                                       dev->data->nb_tx_queues);
243
244         ret = ice_dcf_init_rx_queues(dev);
245         if (ret) {
246                 PMD_DRV_LOG(ERR, "Fail to init queues");
247                 return ret;
248         }
249
250         if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
251                 ret = ice_dcf_init_rss(hw);
252                 if (ret) {
253                         PMD_DRV_LOG(ERR, "Failed to configure RSS");
254                         return ret;
255                 }
256         }
257
258         ret = ice_dcf_configure_queues(hw);
259         if (ret) {
260                 PMD_DRV_LOG(ERR, "Fail to config queues");
261                 return ret;
262         }
263
264         ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
265         if (ret) {
266                 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
267                 return ret;
268         }
269
270         dev->data->dev_link.link_status = ETH_LINK_UP;
271
272         return 0;
273 }
274
275 static void
276 ice_dcf_dev_stop(struct rte_eth_dev *dev)
277 {
278         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
279         struct ice_adapter *ad = &dcf_ad->parent;
280
281         if (ad->pf.adapter_stopped == 1) {
282                 PMD_DRV_LOG(DEBUG, "Port is already stopped");
283                 return;
284         }
285
286         dev->data->dev_link.link_status = ETH_LINK_DOWN;
287         ad->pf.adapter_stopped = 1;
288 }
289
290 static int
291 ice_dcf_dev_configure(struct rte_eth_dev *dev)
292 {
293         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
294         struct ice_adapter *ad = &dcf_ad->parent;
295
296         ad->rx_bulk_alloc_allowed = true;
297         ad->tx_simple_allowed = true;
298
299         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
300                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
301
302         return 0;
303 }
304
305 static int
306 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
307                      struct rte_eth_dev_info *dev_info)
308 {
309         struct ice_dcf_adapter *adapter = dev->data->dev_private;
310         struct ice_dcf_hw *hw = &adapter->real_hw;
311
312         dev_info->max_mac_addrs = 1;
313         dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
314         dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
315         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
316         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
317         dev_info->hash_key_size = hw->vf_res->rss_key_size;
318         dev_info->reta_size = hw->vf_res->rss_lut_size;
319         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
320
321         dev_info->rx_offload_capa =
322                 DEV_RX_OFFLOAD_VLAN_STRIP |
323                 DEV_RX_OFFLOAD_IPV4_CKSUM |
324                 DEV_RX_OFFLOAD_UDP_CKSUM |
325                 DEV_RX_OFFLOAD_TCP_CKSUM |
326                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
327                 DEV_RX_OFFLOAD_SCATTER |
328                 DEV_RX_OFFLOAD_JUMBO_FRAME |
329                 DEV_RX_OFFLOAD_VLAN_FILTER |
330                 DEV_RX_OFFLOAD_RSS_HASH;
331         dev_info->tx_offload_capa =
332                 DEV_TX_OFFLOAD_VLAN_INSERT |
333                 DEV_TX_OFFLOAD_IPV4_CKSUM |
334                 DEV_TX_OFFLOAD_UDP_CKSUM |
335                 DEV_TX_OFFLOAD_TCP_CKSUM |
336                 DEV_TX_OFFLOAD_SCTP_CKSUM |
337                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
338                 DEV_TX_OFFLOAD_TCP_TSO |
339                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
340                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
341                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
342                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
343                 DEV_TX_OFFLOAD_MULTI_SEGS;
344
345         dev_info->default_rxconf = (struct rte_eth_rxconf) {
346                 .rx_thresh = {
347                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
348                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
349                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
350                 },
351                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
352                 .rx_drop_en = 0,
353                 .offloads = 0,
354         };
355
356         dev_info->default_txconf = (struct rte_eth_txconf) {
357                 .tx_thresh = {
358                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
359                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
360                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
361                 },
362                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
363                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
364                 .offloads = 0,
365         };
366
367         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
368                 .nb_max = ICE_MAX_RING_DESC,
369                 .nb_min = ICE_MIN_RING_DESC,
370                 .nb_align = ICE_ALIGN_RING_DESC,
371         };
372
373         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
374                 .nb_max = ICE_MAX_RING_DESC,
375                 .nb_min = ICE_MIN_RING_DESC,
376                 .nb_align = ICE_ALIGN_RING_DESC,
377         };
378
379         return 0;
380 }
381
382 static int
383 ice_dcf_stats_get(__rte_unused struct rte_eth_dev *dev,
384                   __rte_unused struct rte_eth_stats *igb_stats)
385 {
386         return 0;
387 }
388
389 static int
390 ice_dcf_stats_reset(__rte_unused struct rte_eth_dev *dev)
391 {
392         return 0;
393 }
394
395 static int
396 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
397 {
398         return 0;
399 }
400
401 static int
402 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
403 {
404         return 0;
405 }
406
407 static int
408 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
409 {
410         return 0;
411 }
412
413 static int
414 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
415 {
416         return 0;
417 }
418
419 static int
420 ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
421                         enum rte_filter_type filter_type,
422                         enum rte_filter_op filter_op,
423                         void *arg)
424 {
425         int ret = 0;
426
427         if (!dev)
428                 return -EINVAL;
429
430         switch (filter_type) {
431         case RTE_ETH_FILTER_GENERIC:
432                 if (filter_op != RTE_ETH_FILTER_GET)
433                         return -EINVAL;
434                 *(const void **)arg = &ice_flow_ops;
435                 break;
436
437         default:
438                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
439                             filter_type);
440                 ret = -EINVAL;
441                 break;
442         }
443
444         return ret;
445 }
446
447 static void
448 ice_dcf_dev_close(struct rte_eth_dev *dev)
449 {
450         struct ice_dcf_adapter *adapter = dev->data->dev_private;
451
452         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
453                 return;
454
455         dev->dev_ops = NULL;
456         dev->rx_pkt_burst = NULL;
457         dev->tx_pkt_burst = NULL;
458
459         ice_dcf_uninit_parent_adapter(dev);
460         ice_dcf_uninit_hw(dev, &adapter->real_hw);
461 }
462
463 static int
464 ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
465                     __rte_unused int wait_to_complete)
466 {
467         return 0;
468 }
469
470 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
471         .dev_start               = ice_dcf_dev_start,
472         .dev_stop                = ice_dcf_dev_stop,
473         .dev_close               = ice_dcf_dev_close,
474         .dev_configure           = ice_dcf_dev_configure,
475         .dev_infos_get           = ice_dcf_dev_info_get,
476         .rx_queue_setup          = ice_rx_queue_setup,
477         .tx_queue_setup          = ice_tx_queue_setup,
478         .rx_queue_release        = ice_rx_queue_release,
479         .tx_queue_release        = ice_tx_queue_release,
480         .link_update             = ice_dcf_link_update,
481         .stats_get               = ice_dcf_stats_get,
482         .stats_reset             = ice_dcf_stats_reset,
483         .promiscuous_enable      = ice_dcf_dev_promiscuous_enable,
484         .promiscuous_disable     = ice_dcf_dev_promiscuous_disable,
485         .allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
486         .allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
487         .filter_ctrl             = ice_dcf_dev_filter_ctrl,
488 };
489
490 static int
491 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
492 {
493         struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
494
495         eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
496         eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
497         eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
498
499         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
500                 return 0;
501
502         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
503
504         adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
505         if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
506                 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
507                 return -1;
508         }
509
510         if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
511                 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
512                 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
513                 return -1;
514         }
515
516         return 0;
517 }
518
519 static int
520 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
521 {
522         ice_dcf_dev_close(eth_dev);
523
524         return 0;
525 }
526
527 static int
528 ice_dcf_cap_check_handler(__rte_unused const char *key,
529                           const char *value, __rte_unused void *opaque)
530 {
531         if (strcmp(value, "dcf"))
532                 return -1;
533
534         return 0;
535 }
536
537 static int
538 ice_dcf_cap_selected(struct rte_devargs *devargs)
539 {
540         struct rte_kvargs *kvlist;
541         const char *key = "cap";
542         int ret = 0;
543
544         if (devargs == NULL)
545                 return 0;
546
547         kvlist = rte_kvargs_parse(devargs->args, NULL);
548         if (kvlist == NULL)
549                 return 0;
550
551         if (!rte_kvargs_count(kvlist, key))
552                 goto exit;
553
554         /* dcf capability selected when there's a key-value pair: cap=dcf */
555         if (rte_kvargs_process(kvlist, key,
556                                ice_dcf_cap_check_handler, NULL) < 0)
557                 goto exit;
558
559         ret = 1;
560
561 exit:
562         rte_kvargs_free(kvlist);
563         return ret;
564 }
565
566 static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
567                              struct rte_pci_device *pci_dev)
568 {
569         if (!ice_dcf_cap_selected(pci_dev->device.devargs))
570                 return 1;
571
572         return rte_eth_dev_pci_generic_probe(pci_dev,
573                                              sizeof(struct ice_dcf_adapter),
574                                              ice_dcf_dev_init);
575 }
576
577 static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
578 {
579         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
580 }
581
582 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
583         { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
584         { .vendor_id = 0, /* sentinel */ },
585 };
586
587 static struct rte_pci_driver rte_ice_dcf_pmd = {
588         .id_table = pci_id_ice_dcf_map,
589         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
590         .probe = eth_ice_dcf_pci_probe,
591         .remove = eth_ice_dcf_pci_remove,
592 };
593
594 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
595 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
596 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
597 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");