net/ice: support QoS config VF bandwidth in DCF
[dpdk.git] / drivers / net / ice / ice_dcf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <sys/types.h>
8 #include <unistd.h>
9
10 #include <rte_interrupts.h>
11 #include <rte_debug.h>
12 #include <rte_pci.h>
13 #include <rte_atomic.h>
14 #include <rte_eal.h>
15 #include <rte_ether.h>
16 #include <ethdev_pci.h>
17 #include <rte_kvargs.h>
18 #include <rte_malloc.h>
19 #include <rte_memzone.h>
20 #include <rte_dev.h>
21
22 #include <iavf_devids.h>
23
24 #include "ice_generic_flow.h"
25 #include "ice_dcf_ethdev.h"
26 #include "ice_rxtx.h"
27
28 static int
29 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
30                                 struct rte_eth_udp_tunnel *udp_tunnel);
31 static int
32 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
33                                 struct rte_eth_udp_tunnel *udp_tunnel);
34
35 static uint16_t
36 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
37                   __rte_unused struct rte_mbuf **bufs,
38                   __rte_unused uint16_t nb_pkts)
39 {
40         return 0;
41 }
42
43 static uint16_t
44 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
45                   __rte_unused struct rte_mbuf **bufs,
46                   __rte_unused uint16_t nb_pkts)
47 {
48         return 0;
49 }
50
51 static int
52 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
53 {
54         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
55         struct rte_eth_dev_data *dev_data = dev->data;
56         struct iavf_hw *hw = &dcf_ad->real_hw.avf;
57         uint16_t buf_size, max_pkt_len;
58
59         buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
60         rxq->rx_hdr_len = 0;
61         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
62         max_pkt_len = RTE_MIN((uint32_t)
63                               ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
64                               dev->data->dev_conf.rxmode.max_rx_pkt_len);
65
66         /* Check if the jumbo frame and maximum packet length are set
67          * correctly.
68          */
69         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
70                 if (max_pkt_len <= ICE_ETH_MAX_LEN ||
71                     max_pkt_len > ICE_FRAME_SIZE_MAX) {
72                         PMD_DRV_LOG(ERR, "maximum packet length must be "
73                                     "larger than %u and smaller than %u, "
74                                     "as jumbo frame is enabled",
75                                     (uint32_t)ICE_ETH_MAX_LEN,
76                                     (uint32_t)ICE_FRAME_SIZE_MAX);
77                         return -EINVAL;
78                 }
79         } else {
80                 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
81                     max_pkt_len > ICE_ETH_MAX_LEN) {
82                         PMD_DRV_LOG(ERR, "maximum packet length must be "
83                                     "larger than %u and smaller than %u, "
84                                     "as jumbo frame is disabled",
85                                     (uint32_t)RTE_ETHER_MIN_LEN,
86                                     (uint32_t)ICE_ETH_MAX_LEN);
87                         return -EINVAL;
88                 }
89         }
90
91         rxq->max_pkt_len = max_pkt_len;
92         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
93             (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
94                 dev_data->scattered_rx = 1;
95         }
96         rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
97         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
98         IAVF_WRITE_FLUSH(hw);
99
100         return 0;
101 }
102
103 static int
104 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
105 {
106         struct ice_rx_queue **rxq =
107                 (struct ice_rx_queue **)dev->data->rx_queues;
108         int i, ret;
109
110         for (i = 0; i < dev->data->nb_rx_queues; i++) {
111                 if (!rxq[i] || !rxq[i]->q_set)
112                         continue;
113                 ret = ice_dcf_init_rxq(dev, rxq[i]);
114                 if (ret)
115                         return ret;
116         }
117
118         ice_set_rx_function(dev);
119         ice_set_tx_function(dev);
120
121         return 0;
122 }
123
124 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
125 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
126
127 #define IAVF_ITR_INDEX_DEFAULT          0
128 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
129 #define IAVF_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
130
131 static inline uint16_t
132 iavf_calc_itr_interval(int16_t interval)
133 {
134         if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
135                 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
136
137         /* Convert to hardware count, as writing each 1 represents 2 us */
138         return interval / 2;
139 }
140
141 static int
142 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
143                                      struct rte_intr_handle *intr_handle)
144 {
145         struct ice_dcf_adapter *adapter = dev->data->dev_private;
146         struct ice_dcf_hw *hw = &adapter->real_hw;
147         uint16_t interval, i;
148         int vec;
149
150         if (rte_intr_cap_multiple(intr_handle) &&
151             dev->data->dev_conf.intr_conf.rxq) {
152                 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
153                         return -1;
154         }
155
156         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
157                 intr_handle->intr_vec =
158                         rte_zmalloc("intr_vec",
159                                     dev->data->nb_rx_queues * sizeof(int), 0);
160                 if (!intr_handle->intr_vec) {
161                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
162                                     dev->data->nb_rx_queues);
163                         return -1;
164                 }
165         }
166
167         if (!dev->data->dev_conf.intr_conf.rxq ||
168             !rte_intr_dp_is_en(intr_handle)) {
169                 /* Rx interrupt disabled, Map interrupt only for writeback */
170                 hw->nb_msix = 1;
171                 if (hw->vf_res->vf_cap_flags &
172                     VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
173                         /* If WB_ON_ITR supports, enable it */
174                         hw->msix_base = IAVF_RX_VEC_START;
175                         IAVF_WRITE_REG(&hw->avf,
176                                        IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
177                                        IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
178                                        IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
179                 } else {
180                         /* If no WB_ON_ITR offload flags, need to set
181                          * interrupt for descriptor write back.
182                          */
183                         hw->msix_base = IAVF_MISC_VEC_ID;
184
185                         /* set ITR to max */
186                         interval =
187                         iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
188                         IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
189                                        IAVF_VFINT_DYN_CTL01_INTENA_MASK |
190                                        (IAVF_ITR_INDEX_DEFAULT <<
191                                         IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
192                                        (interval <<
193                                         IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
194                 }
195                 IAVF_WRITE_FLUSH(&hw->avf);
196                 /* map all queues to the same interrupt */
197                 for (i = 0; i < dev->data->nb_rx_queues; i++)
198                         hw->rxq_map[hw->msix_base] |= 1 << i;
199         } else {
200                 if (!rte_intr_allow_others(intr_handle)) {
201                         hw->nb_msix = 1;
202                         hw->msix_base = IAVF_MISC_VEC_ID;
203                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
204                                 hw->rxq_map[hw->msix_base] |= 1 << i;
205                                 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
206                         }
207                         PMD_DRV_LOG(DEBUG,
208                                     "vector %u are mapping to all Rx queues",
209                                     hw->msix_base);
210                 } else {
211                         /* If Rx interrupt is reuquired, and we can use
212                          * multi interrupts, then the vec is from 1
213                          */
214                         hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
215                                               intr_handle->nb_efd);
216                         hw->msix_base = IAVF_MISC_VEC_ID;
217                         vec = IAVF_MISC_VEC_ID;
218                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
219                                 hw->rxq_map[vec] |= 1 << i;
220                                 intr_handle->intr_vec[i] = vec++;
221                                 if (vec >= hw->nb_msix)
222                                         vec = IAVF_RX_VEC_START;
223                         }
224                         PMD_DRV_LOG(DEBUG,
225                                     "%u vectors are mapping to %u Rx queues",
226                                     hw->nb_msix, dev->data->nb_rx_queues);
227                 }
228         }
229
230         if (ice_dcf_config_irq_map(hw)) {
231                 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
232                 return -1;
233         }
234         return 0;
235 }
236
237 static int
238 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
239 {
240         volatile union ice_rx_flex_desc *rxd;
241         struct rte_mbuf *mbuf = NULL;
242         uint64_t dma_addr;
243         uint16_t i;
244
245         for (i = 0; i < rxq->nb_rx_desc; i++) {
246                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
247                 if (unlikely(!mbuf)) {
248                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
249                         return -ENOMEM;
250                 }
251
252                 rte_mbuf_refcnt_set(mbuf, 1);
253                 mbuf->next = NULL;
254                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
255                 mbuf->nb_segs = 1;
256                 mbuf->port = rxq->port_id;
257
258                 dma_addr =
259                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
260
261                 rxd = &rxq->rx_ring[i];
262                 rxd->read.pkt_addr = dma_addr;
263                 rxd->read.hdr_addr = 0;
264 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
265                 rxd->read.rsvd1 = 0;
266                 rxd->read.rsvd2 = 0;
267 #endif
268
269                 rxq->sw_ring[i].mbuf = (void *)mbuf;
270         }
271
272         return 0;
273 }
274
275 static int
276 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
277 {
278         struct ice_dcf_adapter *ad = dev->data->dev_private;
279         struct iavf_hw *hw = &ad->real_hw.avf;
280         struct ice_rx_queue *rxq;
281         int err = 0;
282
283         if (rx_queue_id >= dev->data->nb_rx_queues)
284                 return -EINVAL;
285
286         rxq = dev->data->rx_queues[rx_queue_id];
287
288         err = alloc_rxq_mbufs(rxq);
289         if (err) {
290                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
291                 return err;
292         }
293
294         rte_wmb();
295
296         /* Init the RX tail register. */
297         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
298         IAVF_WRITE_FLUSH(hw);
299
300         /* Ready to switch the queue on */
301         err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
302         if (err) {
303                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
304                             rx_queue_id);
305                 return err;
306         }
307
308         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
309
310         return 0;
311 }
312
313 static inline void
314 reset_rx_queue(struct ice_rx_queue *rxq)
315 {
316         uint16_t len;
317         uint32_t i;
318
319         if (!rxq)
320                 return;
321
322         len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
323
324         for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
325                 ((volatile char *)rxq->rx_ring)[i] = 0;
326
327         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
328
329         for (i = 0; i < ICE_RX_MAX_BURST; i++)
330                 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
331
332         /* for rx bulk */
333         rxq->rx_nb_avail = 0;
334         rxq->rx_next_avail = 0;
335         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
336
337         rxq->rx_tail = 0;
338         rxq->nb_rx_hold = 0;
339         rxq->pkt_first_seg = NULL;
340         rxq->pkt_last_seg = NULL;
341 }
342
343 static inline void
344 reset_tx_queue(struct ice_tx_queue *txq)
345 {
346         struct ice_tx_entry *txe;
347         uint32_t i, size;
348         uint16_t prev;
349
350         if (!txq) {
351                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
352                 return;
353         }
354
355         txe = txq->sw_ring;
356         size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
357         for (i = 0; i < size; i++)
358                 ((volatile char *)txq->tx_ring)[i] = 0;
359
360         prev = (uint16_t)(txq->nb_tx_desc - 1);
361         for (i = 0; i < txq->nb_tx_desc; i++) {
362                 txq->tx_ring[i].cmd_type_offset_bsz =
363                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
364                 txe[i].mbuf =  NULL;
365                 txe[i].last_id = i;
366                 txe[prev].next_id = i;
367                 prev = i;
368         }
369
370         txq->tx_tail = 0;
371         txq->nb_tx_used = 0;
372
373         txq->last_desc_cleaned = txq->nb_tx_desc - 1;
374         txq->nb_tx_free = txq->nb_tx_desc - 1;
375
376         txq->tx_next_dd = txq->tx_rs_thresh - 1;
377         txq->tx_next_rs = txq->tx_rs_thresh - 1;
378 }
379
380 static int
381 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
382 {
383         struct ice_dcf_adapter *ad = dev->data->dev_private;
384         struct ice_dcf_hw *hw = &ad->real_hw;
385         struct ice_rx_queue *rxq;
386         int err;
387
388         if (rx_queue_id >= dev->data->nb_rx_queues)
389                 return -EINVAL;
390
391         err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
392         if (err) {
393                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
394                             rx_queue_id);
395                 return err;
396         }
397
398         rxq = dev->data->rx_queues[rx_queue_id];
399         rxq->rx_rel_mbufs(rxq);
400         reset_rx_queue(rxq);
401         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
402
403         return 0;
404 }
405
406 static int
407 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
408 {
409         struct ice_dcf_adapter *ad = dev->data->dev_private;
410         struct iavf_hw *hw = &ad->real_hw.avf;
411         struct ice_tx_queue *txq;
412         int err = 0;
413
414         if (tx_queue_id >= dev->data->nb_tx_queues)
415                 return -EINVAL;
416
417         txq = dev->data->tx_queues[tx_queue_id];
418
419         /* Init the RX tail register. */
420         txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
421         IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
422         IAVF_WRITE_FLUSH(hw);
423
424         /* Ready to switch the queue on */
425         err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
426
427         if (err) {
428                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
429                             tx_queue_id);
430                 return err;
431         }
432
433         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
434
435         return 0;
436 }
437
438 static int
439 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
440 {
441         struct ice_dcf_adapter *ad = dev->data->dev_private;
442         struct ice_dcf_hw *hw = &ad->real_hw;
443         struct ice_tx_queue *txq;
444         int err;
445
446         if (tx_queue_id >= dev->data->nb_tx_queues)
447                 return -EINVAL;
448
449         err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
450         if (err) {
451                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
452                             tx_queue_id);
453                 return err;
454         }
455
456         txq = dev->data->tx_queues[tx_queue_id];
457         txq->tx_rel_mbufs(txq);
458         reset_tx_queue(txq);
459         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
460
461         return 0;
462 }
463
464 static int
465 ice_dcf_start_queues(struct rte_eth_dev *dev)
466 {
467         struct ice_rx_queue *rxq;
468         struct ice_tx_queue *txq;
469         int nb_rxq = 0;
470         int nb_txq, i;
471
472         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
473                 txq = dev->data->tx_queues[nb_txq];
474                 if (txq->tx_deferred_start)
475                         continue;
476                 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
477                         PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
478                         goto tx_err;
479                 }
480         }
481
482         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
483                 rxq = dev->data->rx_queues[nb_rxq];
484                 if (rxq->rx_deferred_start)
485                         continue;
486                 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
487                         PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
488                         goto rx_err;
489                 }
490         }
491
492         return 0;
493
494         /* stop the started queues if failed to start all queues */
495 rx_err:
496         for (i = 0; i < nb_rxq; i++)
497                 ice_dcf_rx_queue_stop(dev, i);
498 tx_err:
499         for (i = 0; i < nb_txq; i++)
500                 ice_dcf_tx_queue_stop(dev, i);
501
502         return -1;
503 }
504
505 static int
506 ice_dcf_dev_start(struct rte_eth_dev *dev)
507 {
508         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
509         struct rte_intr_handle *intr_handle = dev->intr_handle;
510         struct ice_adapter *ad = &dcf_ad->parent;
511         struct ice_dcf_hw *hw = &dcf_ad->real_hw;
512         int ret;
513
514         ad->pf.adapter_stopped = 0;
515
516         hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
517                                       dev->data->nb_tx_queues);
518
519         ret = ice_dcf_init_rx_queues(dev);
520         if (ret) {
521                 PMD_DRV_LOG(ERR, "Fail to init queues");
522                 return ret;
523         }
524
525         if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
526                 ret = ice_dcf_init_rss(hw);
527                 if (ret) {
528                         PMD_DRV_LOG(ERR, "Failed to configure RSS");
529                         return ret;
530                 }
531         }
532
533         ret = ice_dcf_configure_queues(hw);
534         if (ret) {
535                 PMD_DRV_LOG(ERR, "Fail to config queues");
536                 return ret;
537         }
538
539         ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
540         if (ret) {
541                 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
542                 return ret;
543         }
544
545         if (dev->data->dev_conf.intr_conf.rxq != 0) {
546                 rte_intr_disable(intr_handle);
547                 rte_intr_enable(intr_handle);
548         }
549
550         ret = ice_dcf_start_queues(dev);
551         if (ret) {
552                 PMD_DRV_LOG(ERR, "Failed to enable queues");
553                 return ret;
554         }
555
556         ret = ice_dcf_add_del_all_mac_addr(hw, true);
557         if (ret) {
558                 PMD_DRV_LOG(ERR, "Failed to add mac addr");
559                 return ret;
560         }
561
562         dev->data->dev_link.link_status = ETH_LINK_UP;
563
564         return 0;
565 }
566
567 static void
568 ice_dcf_stop_queues(struct rte_eth_dev *dev)
569 {
570         struct ice_dcf_adapter *ad = dev->data->dev_private;
571         struct ice_dcf_hw *hw = &ad->real_hw;
572         struct ice_rx_queue *rxq;
573         struct ice_tx_queue *txq;
574         int ret, i;
575
576         /* Stop All queues */
577         ret = ice_dcf_disable_queues(hw);
578         if (ret)
579                 PMD_DRV_LOG(WARNING, "Fail to stop queues");
580
581         for (i = 0; i < dev->data->nb_tx_queues; i++) {
582                 txq = dev->data->tx_queues[i];
583                 if (!txq)
584                         continue;
585                 txq->tx_rel_mbufs(txq);
586                 reset_tx_queue(txq);
587                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
588         }
589         for (i = 0; i < dev->data->nb_rx_queues; i++) {
590                 rxq = dev->data->rx_queues[i];
591                 if (!rxq)
592                         continue;
593                 rxq->rx_rel_mbufs(rxq);
594                 reset_rx_queue(rxq);
595                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
596         }
597 }
598
599 static int
600 ice_dcf_dev_stop(struct rte_eth_dev *dev)
601 {
602         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
603         struct rte_intr_handle *intr_handle = dev->intr_handle;
604         struct ice_adapter *ad = &dcf_ad->parent;
605
606         if (ad->pf.adapter_stopped == 1) {
607                 PMD_DRV_LOG(DEBUG, "Port is already stopped");
608                 return 0;
609         }
610
611         /* Stop the VF representors for this device */
612         ice_dcf_vf_repr_stop_all(dcf_ad);
613
614         ice_dcf_stop_queues(dev);
615
616         rte_intr_efd_disable(intr_handle);
617         if (intr_handle->intr_vec) {
618                 rte_free(intr_handle->intr_vec);
619                 intr_handle->intr_vec = NULL;
620         }
621
622         ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
623         dev->data->dev_link.link_status = ETH_LINK_DOWN;
624         ad->pf.adapter_stopped = 1;
625         dcf_ad->real_hw.tm_conf.committed = false;
626
627         return 0;
628 }
629
630 static int
631 ice_dcf_dev_configure(struct rte_eth_dev *dev)
632 {
633         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
634         struct ice_adapter *ad = &dcf_ad->parent;
635
636         ad->rx_bulk_alloc_allowed = true;
637         ad->tx_simple_allowed = true;
638
639         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
640                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
641
642         return 0;
643 }
644
645 static int
646 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
647                      struct rte_eth_dev_info *dev_info)
648 {
649         struct ice_dcf_adapter *adapter = dev->data->dev_private;
650         struct ice_dcf_hw *hw = &adapter->real_hw;
651
652         dev_info->max_mac_addrs = 1;
653         dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
654         dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
655         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
656         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
657         dev_info->hash_key_size = hw->vf_res->rss_key_size;
658         dev_info->reta_size = hw->vf_res->rss_lut_size;
659         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
660
661         dev_info->rx_offload_capa =
662                 DEV_RX_OFFLOAD_VLAN_STRIP |
663                 DEV_RX_OFFLOAD_IPV4_CKSUM |
664                 DEV_RX_OFFLOAD_UDP_CKSUM |
665                 DEV_RX_OFFLOAD_TCP_CKSUM |
666                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
667                 DEV_RX_OFFLOAD_SCATTER |
668                 DEV_RX_OFFLOAD_JUMBO_FRAME |
669                 DEV_RX_OFFLOAD_VLAN_FILTER |
670                 DEV_RX_OFFLOAD_RSS_HASH;
671         dev_info->tx_offload_capa =
672                 DEV_TX_OFFLOAD_VLAN_INSERT |
673                 DEV_TX_OFFLOAD_IPV4_CKSUM |
674                 DEV_TX_OFFLOAD_UDP_CKSUM |
675                 DEV_TX_OFFLOAD_TCP_CKSUM |
676                 DEV_TX_OFFLOAD_SCTP_CKSUM |
677                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
678                 DEV_TX_OFFLOAD_TCP_TSO |
679                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
680                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
681                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
682                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
683                 DEV_TX_OFFLOAD_MULTI_SEGS;
684
685         dev_info->default_rxconf = (struct rte_eth_rxconf) {
686                 .rx_thresh = {
687                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
688                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
689                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
690                 },
691                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
692                 .rx_drop_en = 0,
693                 .offloads = 0,
694         };
695
696         dev_info->default_txconf = (struct rte_eth_txconf) {
697                 .tx_thresh = {
698                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
699                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
700                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
701                 },
702                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
703                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
704                 .offloads = 0,
705         };
706
707         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
708                 .nb_max = ICE_MAX_RING_DESC,
709                 .nb_min = ICE_MIN_RING_DESC,
710                 .nb_align = ICE_ALIGN_RING_DESC,
711         };
712
713         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
714                 .nb_max = ICE_MAX_RING_DESC,
715                 .nb_min = ICE_MIN_RING_DESC,
716                 .nb_align = ICE_ALIGN_RING_DESC,
717         };
718
719         return 0;
720 }
721
722 static int
723 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
724 {
725         return 0;
726 }
727
728 static int
729 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
730 {
731         return 0;
732 }
733
734 static int
735 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
736 {
737         return 0;
738 }
739
740 static int
741 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
742 {
743         return 0;
744 }
745
746 static int
747 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
748                          const struct rte_flow_ops **ops)
749 {
750         if (!dev)
751                 return -EINVAL;
752
753         *ops = &ice_flow_ops;
754         return 0;
755 }
756
757 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
758 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
759 #define ICE_DCF_48_BIT_MASK  RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
760
761 static void
762 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
763 {
764         if (*stat >= *offset)
765                 *stat = *stat - *offset;
766         else
767                 *stat = (uint64_t)((*stat +
768                         ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
769
770         *stat &= ICE_DCF_48_BIT_MASK;
771 }
772
773 static void
774 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
775 {
776         if (*stat >= *offset)
777                 *stat = (uint64_t)(*stat - *offset);
778         else
779                 *stat = (uint64_t)((*stat +
780                         ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
781 }
782
783 static void
784 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
785                      struct virtchnl_eth_stats *nes)
786 {
787         ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
788         ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
789         ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
790         ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
791         ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
792         ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
793         ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
794         ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
795         ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
796         ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
797         ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
798 }
799
800
801 static int
802 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
803 {
804         struct ice_dcf_adapter *ad = dev->data->dev_private;
805         struct ice_dcf_hw *hw = &ad->real_hw;
806         struct virtchnl_eth_stats pstats;
807         int ret;
808
809         ret = ice_dcf_query_stats(hw, &pstats);
810         if (ret == 0) {
811                 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
812                 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
813                                 pstats.rx_broadcast - pstats.rx_discards;
814                 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
815                                                 pstats.tx_unicast;
816                 stats->imissed = pstats.rx_discards;
817                 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
818                 stats->ibytes = pstats.rx_bytes;
819                 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
820                 stats->obytes = pstats.tx_bytes;
821         } else {
822                 PMD_DRV_LOG(ERR, "Get statistics failed");
823         }
824         return ret;
825 }
826
827 static int
828 ice_dcf_stats_reset(struct rte_eth_dev *dev)
829 {
830         struct ice_dcf_adapter *ad = dev->data->dev_private;
831         struct ice_dcf_hw *hw = &ad->real_hw;
832         struct virtchnl_eth_stats pstats;
833         int ret;
834
835         /* read stat values to clear hardware registers */
836         ret = ice_dcf_query_stats(hw, &pstats);
837         if (ret != 0)
838                 return ret;
839
840         /* set stats offset base on current values */
841         hw->eth_stats_offset = pstats;
842
843         return 0;
844 }
845
846 static void
847 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
848 {
849         if (dcf_adapter->repr_infos) {
850                 rte_free(dcf_adapter->repr_infos);
851                 dcf_adapter->repr_infos = NULL;
852         }
853 }
854
855 static int
856 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
857 {
858         dcf_adapter->repr_infos =
859                         rte_calloc("ice_dcf_rep_info",
860                                    dcf_adapter->real_hw.num_vfs,
861                                    sizeof(dcf_adapter->repr_infos[0]), 0);
862         if (!dcf_adapter->repr_infos) {
863                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
864                 return -ENOMEM;
865         }
866
867         return 0;
868 }
869
870 static int
871 ice_dcf_dev_close(struct rte_eth_dev *dev)
872 {
873         struct ice_dcf_adapter *adapter = dev->data->dev_private;
874
875         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
876                 return 0;
877
878         ice_dcf_free_repr_info(adapter);
879         ice_dcf_uninit_parent_adapter(dev);
880         ice_dcf_uninit_hw(dev, &adapter->real_hw);
881
882         return 0;
883 }
884
885 int
886 ice_dcf_link_update(struct rte_eth_dev *dev,
887                     __rte_unused int wait_to_complete)
888 {
889         struct ice_dcf_adapter *ad = dev->data->dev_private;
890         struct ice_dcf_hw *hw = &ad->real_hw;
891         struct rte_eth_link new_link;
892
893         memset(&new_link, 0, sizeof(new_link));
894
895         /* Only read status info stored in VF, and the info is updated
896          * when receive LINK_CHANGE event from PF by virtchnl.
897          */
898         switch (hw->link_speed) {
899         case 10:
900                 new_link.link_speed = ETH_SPEED_NUM_10M;
901                 break;
902         case 100:
903                 new_link.link_speed = ETH_SPEED_NUM_100M;
904                 break;
905         case 1000:
906                 new_link.link_speed = ETH_SPEED_NUM_1G;
907                 break;
908         case 10000:
909                 new_link.link_speed = ETH_SPEED_NUM_10G;
910                 break;
911         case 20000:
912                 new_link.link_speed = ETH_SPEED_NUM_20G;
913                 break;
914         case 25000:
915                 new_link.link_speed = ETH_SPEED_NUM_25G;
916                 break;
917         case 40000:
918                 new_link.link_speed = ETH_SPEED_NUM_40G;
919                 break;
920         case 50000:
921                 new_link.link_speed = ETH_SPEED_NUM_50G;
922                 break;
923         case 100000:
924                 new_link.link_speed = ETH_SPEED_NUM_100G;
925                 break;
926         default:
927                 new_link.link_speed = ETH_SPEED_NUM_NONE;
928                 break;
929         }
930
931         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
932         new_link.link_status = hw->link_up ? ETH_LINK_UP :
933                                              ETH_LINK_DOWN;
934         new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
935                                 ETH_LINK_SPEED_FIXED);
936
937         return rte_eth_linkstatus_set(dev, &new_link);
938 }
939
940 /* Add UDP tunneling port */
941 static int
942 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
943                                 struct rte_eth_udp_tunnel *udp_tunnel)
944 {
945         struct ice_dcf_adapter *adapter = dev->data->dev_private;
946         struct ice_adapter *parent_adapter = &adapter->parent;
947         struct ice_hw *parent_hw = &parent_adapter->hw;
948         int ret = 0;
949
950         if (!udp_tunnel)
951                 return -EINVAL;
952
953         switch (udp_tunnel->prot_type) {
954         case RTE_TUNNEL_TYPE_VXLAN:
955                 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
956                                         udp_tunnel->udp_port);
957                 break;
958         case RTE_TUNNEL_TYPE_ECPRI:
959                 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
960                                         udp_tunnel->udp_port);
961                 break;
962         default:
963                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
964                 ret = -EINVAL;
965                 break;
966         }
967
968         return ret;
969 }
970
971 /* Delete UDP tunneling port */
972 static int
973 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
974                                 struct rte_eth_udp_tunnel *udp_tunnel)
975 {
976         struct ice_dcf_adapter *adapter = dev->data->dev_private;
977         struct ice_adapter *parent_adapter = &adapter->parent;
978         struct ice_hw *parent_hw = &parent_adapter->hw;
979         int ret = 0;
980
981         if (!udp_tunnel)
982                 return -EINVAL;
983
984         switch (udp_tunnel->prot_type) {
985         case RTE_TUNNEL_TYPE_VXLAN:
986         case RTE_TUNNEL_TYPE_ECPRI:
987                 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
988                 break;
989         default:
990                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
991                 ret = -EINVAL;
992                 break;
993         }
994
995         return ret;
996 }
997
998 static int
999 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1000                 void *arg)
1001 {
1002         if (!arg)
1003                 return -EINVAL;
1004
1005         *(const void **)arg = &ice_dcf_tm_ops;
1006
1007         return 0;
1008 }
1009
1010 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1011         .dev_start               = ice_dcf_dev_start,
1012         .dev_stop                = ice_dcf_dev_stop,
1013         .dev_close               = ice_dcf_dev_close,
1014         .dev_configure           = ice_dcf_dev_configure,
1015         .dev_infos_get           = ice_dcf_dev_info_get,
1016         .rx_queue_setup          = ice_rx_queue_setup,
1017         .tx_queue_setup          = ice_tx_queue_setup,
1018         .rx_queue_release        = ice_rx_queue_release,
1019         .tx_queue_release        = ice_tx_queue_release,
1020         .rx_queue_start          = ice_dcf_rx_queue_start,
1021         .tx_queue_start          = ice_dcf_tx_queue_start,
1022         .rx_queue_stop           = ice_dcf_rx_queue_stop,
1023         .tx_queue_stop           = ice_dcf_tx_queue_stop,
1024         .link_update             = ice_dcf_link_update,
1025         .stats_get               = ice_dcf_stats_get,
1026         .stats_reset             = ice_dcf_stats_reset,
1027         .promiscuous_enable      = ice_dcf_dev_promiscuous_enable,
1028         .promiscuous_disable     = ice_dcf_dev_promiscuous_disable,
1029         .allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
1030         .allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
1031         .flow_ops_get            = ice_dcf_dev_flow_ops_get,
1032         .udp_tunnel_port_add     = ice_dcf_dev_udp_tunnel_port_add,
1033         .udp_tunnel_port_del     = ice_dcf_dev_udp_tunnel_port_del,
1034         .tm_ops_get              = ice_dcf_tm_ops_get,
1035 };
1036
1037 static int
1038 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1039 {
1040         struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1041
1042         eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1043         eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1044         eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1045
1046         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1047                 return 0;
1048
1049         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1050
1051         adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1052         if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1053                 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1054                 return -1;
1055         }
1056
1057         if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1058                 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1059                 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1060                 return -1;
1061         }
1062
1063         return 0;
1064 }
1065
1066 static int
1067 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1068 {
1069         ice_dcf_dev_close(eth_dev);
1070
1071         return 0;
1072 }
1073
1074 static int
1075 ice_dcf_cap_check_handler(__rte_unused const char *key,
1076                           const char *value, __rte_unused void *opaque)
1077 {
1078         if (strcmp(value, "dcf"))
1079                 return -1;
1080
1081         return 0;
1082 }
1083
1084 static int
1085 ice_dcf_cap_selected(struct rte_devargs *devargs)
1086 {
1087         struct rte_kvargs *kvlist;
1088         const char *key = "cap";
1089         int ret = 0;
1090
1091         if (devargs == NULL)
1092                 return 0;
1093
1094         kvlist = rte_kvargs_parse(devargs->args, NULL);
1095         if (kvlist == NULL)
1096                 return 0;
1097
1098         if (!rte_kvargs_count(kvlist, key))
1099                 goto exit;
1100
1101         /* dcf capability selected when there's a key-value pair: cap=dcf */
1102         if (rte_kvargs_process(kvlist, key,
1103                                ice_dcf_cap_check_handler, NULL) < 0)
1104                 goto exit;
1105
1106         ret = 1;
1107
1108 exit:
1109         rte_kvargs_free(kvlist);
1110         return ret;
1111 }
1112
1113 static int
1114 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1115                       struct rte_pci_device *pci_dev)
1116 {
1117         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1118         struct ice_dcf_vf_repr_param repr_param;
1119         char repr_name[RTE_ETH_NAME_MAX_LEN];
1120         struct ice_dcf_adapter *dcf_adapter;
1121         struct rte_eth_dev *dcf_ethdev;
1122         uint16_t dcf_vsi_id;
1123         int i, ret;
1124
1125         if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1126                 return 1;
1127
1128         ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da);
1129         if (ret)
1130                 return ret;
1131
1132         ret = rte_eth_dev_pci_generic_probe(pci_dev,
1133                                             sizeof(struct ice_dcf_adapter),
1134                                             ice_dcf_dev_init);
1135         if (ret || !eth_da.nb_representor_ports)
1136                 return ret;
1137         if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1138                 return -ENOTSUP;
1139
1140         dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1141         if (dcf_ethdev == NULL)
1142                 return -ENODEV;
1143
1144         dcf_adapter = dcf_ethdev->data->dev_private;
1145         ret = ice_dcf_init_repr_info(dcf_adapter);
1146         if (ret)
1147                 return ret;
1148
1149         if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1150             eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1151                 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1152                             eth_da.nb_representor_ports);
1153                 ice_dcf_free_repr_info(dcf_adapter);
1154                 return -EINVAL;
1155         }
1156
1157         dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1158
1159         repr_param.dcf_eth_dev = dcf_ethdev;
1160         repr_param.switch_domain_id = 0;
1161
1162         for (i = 0; i < eth_da.nb_representor_ports; i++) {
1163                 uint16_t vf_id = eth_da.representor_ports[i];
1164                 struct rte_eth_dev *vf_rep_eth_dev;
1165
1166                 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
1167                         PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
1168                                     vf_id, dcf_adapter->real_hw.num_vfs - 1);
1169                         ret = -EINVAL;
1170                         break;
1171                 }
1172
1173                 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
1174                         PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
1175                         ret = -EINVAL;
1176                         break;
1177                 }
1178
1179                 repr_param.vf_id = vf_id;
1180                 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
1181                          pci_dev->device.name, vf_id);
1182                 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
1183                                          sizeof(struct ice_dcf_vf_repr),
1184                                          NULL, NULL, ice_dcf_vf_repr_init,
1185                                          &repr_param);
1186                 if (ret) {
1187                         PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
1188                                     repr_name);
1189                         break;
1190                 }
1191
1192                 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
1193                 if (!vf_rep_eth_dev) {
1194                         PMD_DRV_LOG(ERR,
1195                                     "Failed to find the ethdev for DCF VF representor: %s",
1196                                     repr_name);
1197                         ret = -ENODEV;
1198                         break;
1199                 }
1200
1201                 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
1202                 dcf_adapter->num_reprs++;
1203         }
1204
1205         return ret;
1206 }
1207
1208 static int
1209 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
1210 {
1211         struct rte_eth_dev *eth_dev;
1212
1213         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1214         if (!eth_dev)
1215                 return 0;
1216
1217         if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1218                 return rte_eth_dev_pci_generic_remove(pci_dev,
1219                                                       ice_dcf_vf_repr_uninit);
1220         else
1221                 return rte_eth_dev_pci_generic_remove(pci_dev,
1222                                                       ice_dcf_dev_uninit);
1223 }
1224
1225 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
1226         { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
1227         { .vendor_id = 0, /* sentinel */ },
1228 };
1229
1230 static struct rte_pci_driver rte_ice_dcf_pmd = {
1231         .id_table = pci_id_ice_dcf_map,
1232         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1233         .probe = eth_ice_dcf_pci_probe,
1234         .remove = eth_ice_dcf_pci_remove,
1235 };
1236
1237 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1238 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1239 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1240 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");