ebd8ca57ef5f74633b91b5885a69c1dcde2d1808
[dpdk.git] / drivers / net / ice / ice_dcf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <sys/queue.h>
8 #include <sys/types.h>
9 #include <unistd.h>
10
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
13 #include <rte_pci.h>
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_ether.h>
17 #include <ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <rte_dev.h>
22
23 #include <iavf_devids.h>
24
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27 #include "ice_rxtx.h"
28
29 static int
30 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
31                                 struct rte_eth_udp_tunnel *udp_tunnel);
32 static int
33 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
34                                 struct rte_eth_udp_tunnel *udp_tunnel);
35
36 static int
37 ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
38
39 static int
40 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
41
42 static uint16_t
43 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
44                   __rte_unused struct rte_mbuf **bufs,
45                   __rte_unused uint16_t nb_pkts)
46 {
47         return 0;
48 }
49
50 static uint16_t
51 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
52                   __rte_unused struct rte_mbuf **bufs,
53                   __rte_unused uint16_t nb_pkts)
54 {
55         return 0;
56 }
57
58 static int
59 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
60 {
61         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
62         struct rte_eth_dev_data *dev_data = dev->data;
63         struct iavf_hw *hw = &dcf_ad->real_hw.avf;
64         uint16_t buf_size, max_pkt_len;
65
66         buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
67         rxq->rx_hdr_len = 0;
68         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
69         max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
70                               dev->data->mtu + ICE_ETH_OVERHEAD);
71
72         /* Check if the jumbo frame and maximum packet length are set
73          * correctly.
74          */
75         if (dev_data->mtu > RTE_ETHER_MTU) {
76                 if (max_pkt_len <= ICE_ETH_MAX_LEN ||
77                     max_pkt_len > ICE_FRAME_SIZE_MAX) {
78                         PMD_DRV_LOG(ERR, "maximum packet length must be "
79                                     "larger than %u and smaller than %u, "
80                                     "as jumbo frame is enabled",
81                                     (uint32_t)ICE_ETH_MAX_LEN,
82                                     (uint32_t)ICE_FRAME_SIZE_MAX);
83                         return -EINVAL;
84                 }
85         } else {
86                 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
87                     max_pkt_len > ICE_ETH_MAX_LEN) {
88                         PMD_DRV_LOG(ERR, "maximum packet length must be "
89                                     "larger than %u and smaller than %u, "
90                                     "as jumbo frame is disabled",
91                                     (uint32_t)RTE_ETHER_MIN_LEN,
92                                     (uint32_t)ICE_ETH_MAX_LEN);
93                         return -EINVAL;
94                 }
95         }
96
97         rxq->max_pkt_len = max_pkt_len;
98         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
99             (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
100                 dev_data->scattered_rx = 1;
101         }
102         rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
103         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
104         IAVF_WRITE_FLUSH(hw);
105
106         return 0;
107 }
108
109 static int
110 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
111 {
112         struct ice_rx_queue **rxq =
113                 (struct ice_rx_queue **)dev->data->rx_queues;
114         int i, ret;
115
116         for (i = 0; i < dev->data->nb_rx_queues; i++) {
117                 if (!rxq[i] || !rxq[i]->q_set)
118                         continue;
119                 ret = ice_dcf_init_rxq(dev, rxq[i]);
120                 if (ret)
121                         return ret;
122         }
123
124         ice_set_rx_function(dev);
125         ice_set_tx_function(dev);
126
127         return 0;
128 }
129
130 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
131 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
132
133 #define IAVF_ITR_INDEX_DEFAULT          0
134 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
135 #define IAVF_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
136
137 static inline uint16_t
138 iavf_calc_itr_interval(int16_t interval)
139 {
140         if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
141                 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
142
143         /* Convert to hardware count, as writing each 1 represents 2 us */
144         return interval / 2;
145 }
146
147 static int
148 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
149                                      struct rte_intr_handle *intr_handle)
150 {
151         struct ice_dcf_adapter *adapter = dev->data->dev_private;
152         struct ice_dcf_hw *hw = &adapter->real_hw;
153         uint16_t interval, i;
154         int vec;
155
156         if (rte_intr_cap_multiple(intr_handle) &&
157             dev->data->dev_conf.intr_conf.rxq) {
158                 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
159                         return -1;
160         }
161
162         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
163                 intr_handle->intr_vec =
164                         rte_zmalloc("intr_vec",
165                                     dev->data->nb_rx_queues * sizeof(int), 0);
166                 if (!intr_handle->intr_vec) {
167                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
168                                     dev->data->nb_rx_queues);
169                         return -1;
170                 }
171         }
172
173         if (!dev->data->dev_conf.intr_conf.rxq ||
174             !rte_intr_dp_is_en(intr_handle)) {
175                 /* Rx interrupt disabled, Map interrupt only for writeback */
176                 hw->nb_msix = 1;
177                 if (hw->vf_res->vf_cap_flags &
178                     VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
179                         /* If WB_ON_ITR supports, enable it */
180                         hw->msix_base = IAVF_RX_VEC_START;
181                         /* Set the ITR for index zero, to 2us to make sure that
182                          * we leave time for aggregation to occur, but don't
183                          * increase latency dramatically.
184                          */
185                         IAVF_WRITE_REG(&hw->avf,
186                                        IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
187                                        (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
188                                        IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
189                                        (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
190                 } else {
191                         /* If no WB_ON_ITR offload flags, need to set
192                          * interrupt for descriptor write back.
193                          */
194                         hw->msix_base = IAVF_MISC_VEC_ID;
195
196                         /* set ITR to max */
197                         interval =
198                         iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
199                         IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
200                                        IAVF_VFINT_DYN_CTL01_INTENA_MASK |
201                                        (IAVF_ITR_INDEX_DEFAULT <<
202                                         IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
203                                        (interval <<
204                                         IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
205                 }
206                 IAVF_WRITE_FLUSH(&hw->avf);
207                 /* map all queues to the same interrupt */
208                 for (i = 0; i < dev->data->nb_rx_queues; i++)
209                         hw->rxq_map[hw->msix_base] |= 1 << i;
210         } else {
211                 if (!rte_intr_allow_others(intr_handle)) {
212                         hw->nb_msix = 1;
213                         hw->msix_base = IAVF_MISC_VEC_ID;
214                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
215                                 hw->rxq_map[hw->msix_base] |= 1 << i;
216                                 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
217                         }
218                         PMD_DRV_LOG(DEBUG,
219                                     "vector %u are mapping to all Rx queues",
220                                     hw->msix_base);
221                 } else {
222                         /* If Rx interrupt is reuquired, and we can use
223                          * multi interrupts, then the vec is from 1
224                          */
225                         hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
226                                               intr_handle->nb_efd);
227                         hw->msix_base = IAVF_MISC_VEC_ID;
228                         vec = IAVF_MISC_VEC_ID;
229                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
230                                 hw->rxq_map[vec] |= 1 << i;
231                                 intr_handle->intr_vec[i] = vec++;
232                                 if (vec >= hw->nb_msix)
233                                         vec = IAVF_RX_VEC_START;
234                         }
235                         PMD_DRV_LOG(DEBUG,
236                                     "%u vectors are mapping to %u Rx queues",
237                                     hw->nb_msix, dev->data->nb_rx_queues);
238                 }
239         }
240
241         if (ice_dcf_config_irq_map(hw)) {
242                 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
243                 return -1;
244         }
245         return 0;
246 }
247
248 static int
249 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
250 {
251         volatile union ice_rx_flex_desc *rxd;
252         struct rte_mbuf *mbuf = NULL;
253         uint64_t dma_addr;
254         uint16_t i;
255
256         for (i = 0; i < rxq->nb_rx_desc; i++) {
257                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
258                 if (unlikely(!mbuf)) {
259                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
260                         return -ENOMEM;
261                 }
262
263                 rte_mbuf_refcnt_set(mbuf, 1);
264                 mbuf->next = NULL;
265                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
266                 mbuf->nb_segs = 1;
267                 mbuf->port = rxq->port_id;
268
269                 dma_addr =
270                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
271
272                 rxd = &rxq->rx_ring[i];
273                 rxd->read.pkt_addr = dma_addr;
274                 rxd->read.hdr_addr = 0;
275 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
276                 rxd->read.rsvd1 = 0;
277                 rxd->read.rsvd2 = 0;
278 #endif
279
280                 rxq->sw_ring[i].mbuf = (void *)mbuf;
281         }
282
283         return 0;
284 }
285
286 static int
287 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
288 {
289         struct ice_dcf_adapter *ad = dev->data->dev_private;
290         struct iavf_hw *hw = &ad->real_hw.avf;
291         struct ice_rx_queue *rxq;
292         int err = 0;
293
294         if (rx_queue_id >= dev->data->nb_rx_queues)
295                 return -EINVAL;
296
297         rxq = dev->data->rx_queues[rx_queue_id];
298
299         err = alloc_rxq_mbufs(rxq);
300         if (err) {
301                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
302                 return err;
303         }
304
305         rte_wmb();
306
307         /* Init the RX tail register. */
308         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
309         IAVF_WRITE_FLUSH(hw);
310
311         /* Ready to switch the queue on */
312         err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
313         if (err) {
314                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
315                             rx_queue_id);
316                 return err;
317         }
318
319         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
320
321         return 0;
322 }
323
324 static inline void
325 reset_rx_queue(struct ice_rx_queue *rxq)
326 {
327         uint16_t len;
328         uint32_t i;
329
330         if (!rxq)
331                 return;
332
333         len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
334
335         for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
336                 ((volatile char *)rxq->rx_ring)[i] = 0;
337
338         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
339
340         for (i = 0; i < ICE_RX_MAX_BURST; i++)
341                 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
342
343         /* for rx bulk */
344         rxq->rx_nb_avail = 0;
345         rxq->rx_next_avail = 0;
346         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
347
348         rxq->rx_tail = 0;
349         rxq->nb_rx_hold = 0;
350         rxq->pkt_first_seg = NULL;
351         rxq->pkt_last_seg = NULL;
352 }
353
354 static inline void
355 reset_tx_queue(struct ice_tx_queue *txq)
356 {
357         struct ice_tx_entry *txe;
358         uint32_t i, size;
359         uint16_t prev;
360
361         if (!txq) {
362                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
363                 return;
364         }
365
366         txe = txq->sw_ring;
367         size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
368         for (i = 0; i < size; i++)
369                 ((volatile char *)txq->tx_ring)[i] = 0;
370
371         prev = (uint16_t)(txq->nb_tx_desc - 1);
372         for (i = 0; i < txq->nb_tx_desc; i++) {
373                 txq->tx_ring[i].cmd_type_offset_bsz =
374                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
375                 txe[i].mbuf =  NULL;
376                 txe[i].last_id = i;
377                 txe[prev].next_id = i;
378                 prev = i;
379         }
380
381         txq->tx_tail = 0;
382         txq->nb_tx_used = 0;
383
384         txq->last_desc_cleaned = txq->nb_tx_desc - 1;
385         txq->nb_tx_free = txq->nb_tx_desc - 1;
386
387         txq->tx_next_dd = txq->tx_rs_thresh - 1;
388         txq->tx_next_rs = txq->tx_rs_thresh - 1;
389 }
390
391 static int
392 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
393 {
394         struct ice_dcf_adapter *ad = dev->data->dev_private;
395         struct ice_dcf_hw *hw = &ad->real_hw;
396         struct ice_rx_queue *rxq;
397         int err;
398
399         if (rx_queue_id >= dev->data->nb_rx_queues)
400                 return -EINVAL;
401
402         err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
403         if (err) {
404                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
405                             rx_queue_id);
406                 return err;
407         }
408
409         rxq = dev->data->rx_queues[rx_queue_id];
410         rxq->rx_rel_mbufs(rxq);
411         reset_rx_queue(rxq);
412         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
413
414         return 0;
415 }
416
417 static int
418 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
419 {
420         struct ice_dcf_adapter *ad = dev->data->dev_private;
421         struct iavf_hw *hw = &ad->real_hw.avf;
422         struct ice_tx_queue *txq;
423         int err = 0;
424
425         if (tx_queue_id >= dev->data->nb_tx_queues)
426                 return -EINVAL;
427
428         txq = dev->data->tx_queues[tx_queue_id];
429
430         /* Init the RX tail register. */
431         txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
432         IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
433         IAVF_WRITE_FLUSH(hw);
434
435         /* Ready to switch the queue on */
436         err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
437
438         if (err) {
439                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
440                             tx_queue_id);
441                 return err;
442         }
443
444         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
445
446         return 0;
447 }
448
449 static int
450 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
451 {
452         struct ice_dcf_adapter *ad = dev->data->dev_private;
453         struct ice_dcf_hw *hw = &ad->real_hw;
454         struct ice_tx_queue *txq;
455         int err;
456
457         if (tx_queue_id >= dev->data->nb_tx_queues)
458                 return -EINVAL;
459
460         err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
461         if (err) {
462                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
463                             tx_queue_id);
464                 return err;
465         }
466
467         txq = dev->data->tx_queues[tx_queue_id];
468         txq->tx_rel_mbufs(txq);
469         reset_tx_queue(txq);
470         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
471
472         return 0;
473 }
474
475 static int
476 ice_dcf_start_queues(struct rte_eth_dev *dev)
477 {
478         struct ice_rx_queue *rxq;
479         struct ice_tx_queue *txq;
480         int nb_rxq = 0;
481         int nb_txq, i;
482
483         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
484                 txq = dev->data->tx_queues[nb_txq];
485                 if (txq->tx_deferred_start)
486                         continue;
487                 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
488                         PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
489                         goto tx_err;
490                 }
491         }
492
493         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
494                 rxq = dev->data->rx_queues[nb_rxq];
495                 if (rxq->rx_deferred_start)
496                         continue;
497                 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
498                         PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
499                         goto rx_err;
500                 }
501         }
502
503         return 0;
504
505         /* stop the started queues if failed to start all queues */
506 rx_err:
507         for (i = 0; i < nb_rxq; i++)
508                 ice_dcf_rx_queue_stop(dev, i);
509 tx_err:
510         for (i = 0; i < nb_txq; i++)
511                 ice_dcf_tx_queue_stop(dev, i);
512
513         return -1;
514 }
515
516 static int
517 ice_dcf_dev_start(struct rte_eth_dev *dev)
518 {
519         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
520         struct rte_intr_handle *intr_handle = dev->intr_handle;
521         struct ice_adapter *ad = &dcf_ad->parent;
522         struct ice_dcf_hw *hw = &dcf_ad->real_hw;
523         int ret;
524
525         if (hw->resetting) {
526                 PMD_DRV_LOG(ERR,
527                             "The DCF has been reset by PF, please reinit first");
528                 return -EIO;
529         }
530
531         if (hw->tm_conf.root && !hw->tm_conf.committed) {
532                 PMD_DRV_LOG(ERR,
533                         "please call hierarchy_commit() before starting the port");
534                 return -EIO;
535         }
536
537         ad->pf.adapter_stopped = 0;
538
539         hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
540                                       dev->data->nb_tx_queues);
541
542         ret = ice_dcf_init_rx_queues(dev);
543         if (ret) {
544                 PMD_DRV_LOG(ERR, "Fail to init queues");
545                 return ret;
546         }
547
548         if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
549                 ret = ice_dcf_init_rss(hw);
550                 if (ret) {
551                         PMD_DRV_LOG(ERR, "Failed to configure RSS");
552                         return ret;
553                 }
554         }
555
556         ret = ice_dcf_configure_queues(hw);
557         if (ret) {
558                 PMD_DRV_LOG(ERR, "Fail to config queues");
559                 return ret;
560         }
561
562         ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
563         if (ret) {
564                 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
565                 return ret;
566         }
567
568         if (dev->data->dev_conf.intr_conf.rxq != 0) {
569                 rte_intr_disable(intr_handle);
570                 rte_intr_enable(intr_handle);
571         }
572
573         ret = ice_dcf_start_queues(dev);
574         if (ret) {
575                 PMD_DRV_LOG(ERR, "Failed to enable queues");
576                 return ret;
577         }
578
579         ret = ice_dcf_add_del_all_mac_addr(hw, true);
580         if (ret) {
581                 PMD_DRV_LOG(ERR, "Failed to add mac addr");
582                 return ret;
583         }
584
585         dev->data->dev_link.link_status = ETH_LINK_UP;
586
587         return 0;
588 }
589
590 static void
591 ice_dcf_stop_queues(struct rte_eth_dev *dev)
592 {
593         struct ice_dcf_adapter *ad = dev->data->dev_private;
594         struct ice_dcf_hw *hw = &ad->real_hw;
595         struct ice_rx_queue *rxq;
596         struct ice_tx_queue *txq;
597         int ret, i;
598
599         /* Stop All queues */
600         ret = ice_dcf_disable_queues(hw);
601         if (ret)
602                 PMD_DRV_LOG(WARNING, "Fail to stop queues");
603
604         for (i = 0; i < dev->data->nb_tx_queues; i++) {
605                 txq = dev->data->tx_queues[i];
606                 if (!txq)
607                         continue;
608                 txq->tx_rel_mbufs(txq);
609                 reset_tx_queue(txq);
610                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
611         }
612         for (i = 0; i < dev->data->nb_rx_queues; i++) {
613                 rxq = dev->data->rx_queues[i];
614                 if (!rxq)
615                         continue;
616                 rxq->rx_rel_mbufs(rxq);
617                 reset_rx_queue(rxq);
618                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
619         }
620 }
621
622 static int
623 ice_dcf_dev_stop(struct rte_eth_dev *dev)
624 {
625         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
626         struct rte_intr_handle *intr_handle = dev->intr_handle;
627         struct ice_adapter *ad = &dcf_ad->parent;
628         struct ice_dcf_hw *hw = &dcf_ad->real_hw;
629
630         if (ad->pf.adapter_stopped == 1) {
631                 PMD_DRV_LOG(DEBUG, "Port is already stopped");
632                 return 0;
633         }
634
635         /* Stop the VF representors for this device */
636         ice_dcf_vf_repr_stop_all(dcf_ad);
637
638         ice_dcf_stop_queues(dev);
639
640         rte_intr_efd_disable(intr_handle);
641         if (intr_handle->intr_vec) {
642                 rte_free(intr_handle->intr_vec);
643                 intr_handle->intr_vec = NULL;
644         }
645
646         ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
647         dev->data->dev_link.link_status = ETH_LINK_DOWN;
648         ad->pf.adapter_stopped = 1;
649         hw->tm_conf.committed = false;
650
651         return 0;
652 }
653
654 static int
655 ice_dcf_dev_configure(struct rte_eth_dev *dev)
656 {
657         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
658         struct ice_adapter *ad = &dcf_ad->parent;
659
660         ad->rx_bulk_alloc_allowed = true;
661         ad->tx_simple_allowed = true;
662
663         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
664                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
665
666         return 0;
667 }
668
669 static int
670 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
671                      struct rte_eth_dev_info *dev_info)
672 {
673         struct ice_dcf_adapter *adapter = dev->data->dev_private;
674         struct ice_dcf_hw *hw = &adapter->real_hw;
675
676         dev_info->max_mac_addrs = 1;
677         dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
678         dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
679         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
680         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
681         dev_info->hash_key_size = hw->vf_res->rss_key_size;
682         dev_info->reta_size = hw->vf_res->rss_lut_size;
683         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
684
685         dev_info->rx_offload_capa =
686                 DEV_RX_OFFLOAD_VLAN_STRIP |
687                 DEV_RX_OFFLOAD_IPV4_CKSUM |
688                 DEV_RX_OFFLOAD_UDP_CKSUM |
689                 DEV_RX_OFFLOAD_TCP_CKSUM |
690                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
691                 DEV_RX_OFFLOAD_SCATTER |
692                 DEV_RX_OFFLOAD_VLAN_FILTER |
693                 DEV_RX_OFFLOAD_RSS_HASH;
694         dev_info->tx_offload_capa =
695                 DEV_TX_OFFLOAD_VLAN_INSERT |
696                 DEV_TX_OFFLOAD_IPV4_CKSUM |
697                 DEV_TX_OFFLOAD_UDP_CKSUM |
698                 DEV_TX_OFFLOAD_TCP_CKSUM |
699                 DEV_TX_OFFLOAD_SCTP_CKSUM |
700                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
701                 DEV_TX_OFFLOAD_TCP_TSO |
702                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
703                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
704                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
705                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
706                 DEV_TX_OFFLOAD_MULTI_SEGS;
707
708         dev_info->default_rxconf = (struct rte_eth_rxconf) {
709                 .rx_thresh = {
710                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
711                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
712                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
713                 },
714                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
715                 .rx_drop_en = 0,
716                 .offloads = 0,
717         };
718
719         dev_info->default_txconf = (struct rte_eth_txconf) {
720                 .tx_thresh = {
721                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
722                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
723                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
724                 },
725                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
726                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
727                 .offloads = 0,
728         };
729
730         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
731                 .nb_max = ICE_MAX_RING_DESC,
732                 .nb_min = ICE_MIN_RING_DESC,
733                 .nb_align = ICE_ALIGN_RING_DESC,
734         };
735
736         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
737                 .nb_max = ICE_MAX_RING_DESC,
738                 .nb_min = ICE_MIN_RING_DESC,
739                 .nb_align = ICE_ALIGN_RING_DESC,
740         };
741
742         return 0;
743 }
744
745 static int
746 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
747 {
748         return 0;
749 }
750
751 static int
752 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
753 {
754         return 0;
755 }
756
757 static int
758 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
759 {
760         return 0;
761 }
762
763 static int
764 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
765 {
766         return 0;
767 }
768
769 static int
770 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
771                          const struct rte_flow_ops **ops)
772 {
773         if (!dev)
774                 return -EINVAL;
775
776         *ops = &ice_flow_ops;
777         return 0;
778 }
779
780 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
781 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
782 #define ICE_DCF_48_BIT_MASK  RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
783
784 static void
785 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
786 {
787         if (*stat >= *offset)
788                 *stat = *stat - *offset;
789         else
790                 *stat = (uint64_t)((*stat +
791                         ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
792
793         *stat &= ICE_DCF_48_BIT_MASK;
794 }
795
796 static void
797 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
798 {
799         if (*stat >= *offset)
800                 *stat = (uint64_t)(*stat - *offset);
801         else
802                 *stat = (uint64_t)((*stat +
803                         ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
804 }
805
806 static void
807 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
808                      struct virtchnl_eth_stats *nes)
809 {
810         ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
811         ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
812         ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
813         ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
814         ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
815         ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
816         ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
817         ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
818         ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
819         ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
820         ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
821 }
822
823
824 static int
825 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
826 {
827         struct ice_dcf_adapter *ad = dev->data->dev_private;
828         struct ice_dcf_hw *hw = &ad->real_hw;
829         struct virtchnl_eth_stats pstats;
830         int ret;
831
832         if (hw->resetting) {
833                 PMD_DRV_LOG(ERR,
834                             "The DCF has been reset by PF, please reinit first");
835                 return -EIO;
836         }
837
838         ret = ice_dcf_query_stats(hw, &pstats);
839         if (ret == 0) {
840                 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
841                 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
842                                 pstats.rx_broadcast - pstats.rx_discards;
843                 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
844                                                 pstats.tx_unicast;
845                 stats->imissed = pstats.rx_discards;
846                 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
847                 stats->ibytes = pstats.rx_bytes;
848                 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
849                 stats->obytes = pstats.tx_bytes;
850         } else {
851                 PMD_DRV_LOG(ERR, "Get statistics failed");
852         }
853         return ret;
854 }
855
856 static int
857 ice_dcf_stats_reset(struct rte_eth_dev *dev)
858 {
859         struct ice_dcf_adapter *ad = dev->data->dev_private;
860         struct ice_dcf_hw *hw = &ad->real_hw;
861         struct virtchnl_eth_stats pstats;
862         int ret;
863
864         if (hw->resetting)
865                 return 0;
866
867         /* read stat values to clear hardware registers */
868         ret = ice_dcf_query_stats(hw, &pstats);
869         if (ret != 0)
870                 return ret;
871
872         /* set stats offset base on current values */
873         hw->eth_stats_offset = pstats;
874
875         return 0;
876 }
877
878 static void
879 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
880 {
881         if (dcf_adapter->repr_infos) {
882                 rte_free(dcf_adapter->repr_infos);
883                 dcf_adapter->repr_infos = NULL;
884         }
885 }
886
887 static int
888 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
889 {
890         dcf_adapter->repr_infos =
891                         rte_calloc("ice_dcf_rep_info",
892                                    dcf_adapter->real_hw.num_vfs,
893                                    sizeof(dcf_adapter->repr_infos[0]), 0);
894         if (!dcf_adapter->repr_infos) {
895                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
896                 return -ENOMEM;
897         }
898
899         return 0;
900 }
901
902 static int
903 ice_dcf_dev_close(struct rte_eth_dev *dev)
904 {
905         struct ice_dcf_adapter *adapter = dev->data->dev_private;
906
907         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
908                 return 0;
909
910         (void)ice_dcf_dev_stop(dev);
911
912         ice_free_queues(dev);
913
914         ice_dcf_free_repr_info(adapter);
915         ice_dcf_uninit_parent_adapter(dev);
916         ice_dcf_uninit_hw(dev, &adapter->real_hw);
917
918         return 0;
919 }
920
921 int
922 ice_dcf_link_update(struct rte_eth_dev *dev,
923                     __rte_unused int wait_to_complete)
924 {
925         struct ice_dcf_adapter *ad = dev->data->dev_private;
926         struct ice_dcf_hw *hw = &ad->real_hw;
927         struct rte_eth_link new_link;
928
929         memset(&new_link, 0, sizeof(new_link));
930
931         /* Only read status info stored in VF, and the info is updated
932          * when receive LINK_CHANGE event from PF by virtchnl.
933          */
934         switch (hw->link_speed) {
935         case 10:
936                 new_link.link_speed = ETH_SPEED_NUM_10M;
937                 break;
938         case 100:
939                 new_link.link_speed = ETH_SPEED_NUM_100M;
940                 break;
941         case 1000:
942                 new_link.link_speed = ETH_SPEED_NUM_1G;
943                 break;
944         case 10000:
945                 new_link.link_speed = ETH_SPEED_NUM_10G;
946                 break;
947         case 20000:
948                 new_link.link_speed = ETH_SPEED_NUM_20G;
949                 break;
950         case 25000:
951                 new_link.link_speed = ETH_SPEED_NUM_25G;
952                 break;
953         case 40000:
954                 new_link.link_speed = ETH_SPEED_NUM_40G;
955                 break;
956         case 50000:
957                 new_link.link_speed = ETH_SPEED_NUM_50G;
958                 break;
959         case 100000:
960                 new_link.link_speed = ETH_SPEED_NUM_100G;
961                 break;
962         default:
963                 new_link.link_speed = ETH_SPEED_NUM_NONE;
964                 break;
965         }
966
967         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
968         new_link.link_status = hw->link_up ? ETH_LINK_UP :
969                                              ETH_LINK_DOWN;
970         new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
971                                 ETH_LINK_SPEED_FIXED);
972
973         return rte_eth_linkstatus_set(dev, &new_link);
974 }
975
976 /* Add UDP tunneling port */
977 static int
978 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
979                                 struct rte_eth_udp_tunnel *udp_tunnel)
980 {
981         struct ice_dcf_adapter *adapter = dev->data->dev_private;
982         struct ice_adapter *parent_adapter = &adapter->parent;
983         struct ice_hw *parent_hw = &parent_adapter->hw;
984         int ret = 0;
985
986         if (!udp_tunnel)
987                 return -EINVAL;
988
989         switch (udp_tunnel->prot_type) {
990         case RTE_TUNNEL_TYPE_VXLAN:
991                 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
992                                         udp_tunnel->udp_port);
993                 break;
994         case RTE_TUNNEL_TYPE_ECPRI:
995                 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
996                                         udp_tunnel->udp_port);
997                 break;
998         default:
999                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1000                 ret = -EINVAL;
1001                 break;
1002         }
1003
1004         return ret;
1005 }
1006
1007 /* Delete UDP tunneling port */
1008 static int
1009 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1010                                 struct rte_eth_udp_tunnel *udp_tunnel)
1011 {
1012         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1013         struct ice_adapter *parent_adapter = &adapter->parent;
1014         struct ice_hw *parent_hw = &parent_adapter->hw;
1015         int ret = 0;
1016
1017         if (!udp_tunnel)
1018                 return -EINVAL;
1019
1020         switch (udp_tunnel->prot_type) {
1021         case RTE_TUNNEL_TYPE_VXLAN:
1022         case RTE_TUNNEL_TYPE_ECPRI:
1023                 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1024                 break;
1025         default:
1026                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1027                 ret = -EINVAL;
1028                 break;
1029         }
1030
1031         return ret;
1032 }
1033
1034 static int
1035 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1036                 void *arg)
1037 {
1038         if (!arg)
1039                 return -EINVAL;
1040
1041         *(const void **)arg = &ice_dcf_tm_ops;
1042
1043         return 0;
1044 }
1045
1046 static int
1047 ice_dcf_dev_reset(struct rte_eth_dev *dev)
1048 {
1049         int ret;
1050
1051         ret = ice_dcf_dev_uninit(dev);
1052         if (ret)
1053                 return ret;
1054
1055         ret = ice_dcf_dev_init(dev);
1056
1057         return ret;
1058 }
1059
1060 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1061         .dev_start               = ice_dcf_dev_start,
1062         .dev_stop                = ice_dcf_dev_stop,
1063         .dev_close               = ice_dcf_dev_close,
1064         .dev_reset               = ice_dcf_dev_reset,
1065         .dev_configure           = ice_dcf_dev_configure,
1066         .dev_infos_get           = ice_dcf_dev_info_get,
1067         .rx_queue_setup          = ice_rx_queue_setup,
1068         .tx_queue_setup          = ice_tx_queue_setup,
1069         .rx_queue_release        = ice_dev_rx_queue_release,
1070         .tx_queue_release        = ice_dev_tx_queue_release,
1071         .rx_queue_start          = ice_dcf_rx_queue_start,
1072         .tx_queue_start          = ice_dcf_tx_queue_start,
1073         .rx_queue_stop           = ice_dcf_rx_queue_stop,
1074         .tx_queue_stop           = ice_dcf_tx_queue_stop,
1075         .link_update             = ice_dcf_link_update,
1076         .stats_get               = ice_dcf_stats_get,
1077         .stats_reset             = ice_dcf_stats_reset,
1078         .promiscuous_enable      = ice_dcf_dev_promiscuous_enable,
1079         .promiscuous_disable     = ice_dcf_dev_promiscuous_disable,
1080         .allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
1081         .allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
1082         .flow_ops_get            = ice_dcf_dev_flow_ops_get,
1083         .udp_tunnel_port_add     = ice_dcf_dev_udp_tunnel_port_add,
1084         .udp_tunnel_port_del     = ice_dcf_dev_udp_tunnel_port_del,
1085         .tm_ops_get              = ice_dcf_tm_ops_get,
1086 };
1087
1088 static int
1089 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1090 {
1091         struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1092
1093         adapter->real_hw.resetting = false;
1094         eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1095         eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1096         eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1097
1098         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1099                 return 0;
1100
1101         adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1102         if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1103                 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1104                 return -1;
1105         }
1106
1107         if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1108                 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1109                 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1110                 return -1;
1111         }
1112
1113         return 0;
1114 }
1115
1116 static int
1117 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1118 {
1119         ice_dcf_dev_close(eth_dev);
1120
1121         return 0;
1122 }
1123
1124 static int
1125 ice_dcf_cap_check_handler(__rte_unused const char *key,
1126                           const char *value, __rte_unused void *opaque)
1127 {
1128         if (strcmp(value, "dcf"))
1129                 return -1;
1130
1131         return 0;
1132 }
1133
1134 static int
1135 ice_dcf_cap_selected(struct rte_devargs *devargs)
1136 {
1137         struct rte_kvargs *kvlist;
1138         const char *key = "cap";
1139         int ret = 0;
1140
1141         if (devargs == NULL)
1142                 return 0;
1143
1144         kvlist = rte_kvargs_parse(devargs->args, NULL);
1145         if (kvlist == NULL)
1146                 return 0;
1147
1148         if (!rte_kvargs_count(kvlist, key))
1149                 goto exit;
1150
1151         /* dcf capability selected when there's a key-value pair: cap=dcf */
1152         if (rte_kvargs_process(kvlist, key,
1153                                ice_dcf_cap_check_handler, NULL) < 0)
1154                 goto exit;
1155
1156         ret = 1;
1157
1158 exit:
1159         rte_kvargs_free(kvlist);
1160         return ret;
1161 }
1162
1163 static int
1164 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1165                       struct rte_pci_device *pci_dev)
1166 {
1167         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1168         struct ice_dcf_vf_repr_param repr_param;
1169         char repr_name[RTE_ETH_NAME_MAX_LEN];
1170         struct ice_dcf_adapter *dcf_adapter;
1171         struct rte_eth_dev *dcf_ethdev;
1172         uint16_t dcf_vsi_id;
1173         int i, ret;
1174
1175         if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1176                 return 1;
1177
1178         ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da);
1179         if (ret)
1180                 return ret;
1181
1182         ret = rte_eth_dev_pci_generic_probe(pci_dev,
1183                                             sizeof(struct ice_dcf_adapter),
1184                                             ice_dcf_dev_init);
1185         if (ret || !eth_da.nb_representor_ports)
1186                 return ret;
1187         if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1188                 return -ENOTSUP;
1189
1190         dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1191         if (dcf_ethdev == NULL)
1192                 return -ENODEV;
1193
1194         dcf_adapter = dcf_ethdev->data->dev_private;
1195         ret = ice_dcf_init_repr_info(dcf_adapter);
1196         if (ret)
1197                 return ret;
1198
1199         if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1200             eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1201                 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1202                             eth_da.nb_representor_ports);
1203                 ice_dcf_free_repr_info(dcf_adapter);
1204                 return -EINVAL;
1205         }
1206
1207         dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1208
1209         repr_param.dcf_eth_dev = dcf_ethdev;
1210         repr_param.switch_domain_id = 0;
1211
1212         for (i = 0; i < eth_da.nb_representor_ports; i++) {
1213                 uint16_t vf_id = eth_da.representor_ports[i];
1214                 struct rte_eth_dev *vf_rep_eth_dev;
1215
1216                 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
1217                         PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
1218                                     vf_id, dcf_adapter->real_hw.num_vfs - 1);
1219                         ret = -EINVAL;
1220                         break;
1221                 }
1222
1223                 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
1224                         PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
1225                         ret = -EINVAL;
1226                         break;
1227                 }
1228
1229                 repr_param.vf_id = vf_id;
1230                 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
1231                          pci_dev->device.name, vf_id);
1232                 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
1233                                          sizeof(struct ice_dcf_vf_repr),
1234                                          NULL, NULL, ice_dcf_vf_repr_init,
1235                                          &repr_param);
1236                 if (ret) {
1237                         PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
1238                                     repr_name);
1239                         break;
1240                 }
1241
1242                 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
1243                 if (!vf_rep_eth_dev) {
1244                         PMD_DRV_LOG(ERR,
1245                                     "Failed to find the ethdev for DCF VF representor: %s",
1246                                     repr_name);
1247                         ret = -ENODEV;
1248                         break;
1249                 }
1250
1251                 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
1252                 dcf_adapter->num_reprs++;
1253         }
1254
1255         return ret;
1256 }
1257
1258 static int
1259 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
1260 {
1261         struct rte_eth_dev *eth_dev;
1262
1263         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1264         if (!eth_dev)
1265                 return 0;
1266
1267         if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1268                 return rte_eth_dev_pci_generic_remove(pci_dev,
1269                                                       ice_dcf_vf_repr_uninit);
1270         else
1271                 return rte_eth_dev_pci_generic_remove(pci_dev,
1272                                                       ice_dcf_dev_uninit);
1273 }
1274
1275 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
1276         { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
1277         { .vendor_id = 0, /* sentinel */ },
1278 };
1279
1280 static struct rte_pci_driver rte_ice_dcf_pmd = {
1281         .id_table = pci_id_ice_dcf_map,
1282         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1283         .probe = eth_ice_dcf_pci_probe,
1284         .remove = eth_ice_dcf_pci_remove,
1285 };
1286
1287 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1288 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1289 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1290 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");