94c6a6e61cd566a7cfa3ac2cfaa2aaeed61a949d
[dpdk.git] / drivers / net / ice / ice_dcf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <sys/types.h>
8 #include <unistd.h>
9
10 #include <rte_interrupts.h>
11 #include <rte_debug.h>
12 #include <rte_pci.h>
13 #include <rte_atomic.h>
14 #include <rte_eal.h>
15 #include <rte_ether.h>
16 #include <ethdev_pci.h>
17 #include <rte_kvargs.h>
18 #include <rte_malloc.h>
19 #include <rte_memzone.h>
20 #include <rte_dev.h>
21
22 #include <iavf_devids.h>
23
24 #include "ice_generic_flow.h"
25 #include "ice_dcf_ethdev.h"
26 #include "ice_rxtx.h"
27
28 static int
29 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
30                                 struct rte_eth_udp_tunnel *udp_tunnel);
31 static int
32 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
33                                 struct rte_eth_udp_tunnel *udp_tunnel);
34
35 static int
36 ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
37
38 static int
39 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
40
41 static uint16_t
42 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
43                   __rte_unused struct rte_mbuf **bufs,
44                   __rte_unused uint16_t nb_pkts)
45 {
46         return 0;
47 }
48
49 static uint16_t
50 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
51                   __rte_unused struct rte_mbuf **bufs,
52                   __rte_unused uint16_t nb_pkts)
53 {
54         return 0;
55 }
56
57 static int
58 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
59 {
60         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
61         struct rte_eth_dev_data *dev_data = dev->data;
62         struct iavf_hw *hw = &dcf_ad->real_hw.avf;
63         uint16_t buf_size, max_pkt_len;
64
65         buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
66         rxq->rx_hdr_len = 0;
67         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
68         max_pkt_len = RTE_MIN((uint32_t)
69                               ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
70                               dev->data->dev_conf.rxmode.max_rx_pkt_len);
71
72         /* Check if the jumbo frame and maximum packet length are set
73          * correctly.
74          */
75         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
76                 if (max_pkt_len <= ICE_ETH_MAX_LEN ||
77                     max_pkt_len > ICE_FRAME_SIZE_MAX) {
78                         PMD_DRV_LOG(ERR, "maximum packet length must be "
79                                     "larger than %u and smaller than %u, "
80                                     "as jumbo frame is enabled",
81                                     (uint32_t)ICE_ETH_MAX_LEN,
82                                     (uint32_t)ICE_FRAME_SIZE_MAX);
83                         return -EINVAL;
84                 }
85         } else {
86                 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
87                     max_pkt_len > ICE_ETH_MAX_LEN) {
88                         PMD_DRV_LOG(ERR, "maximum packet length must be "
89                                     "larger than %u and smaller than %u, "
90                                     "as jumbo frame is disabled",
91                                     (uint32_t)RTE_ETHER_MIN_LEN,
92                                     (uint32_t)ICE_ETH_MAX_LEN);
93                         return -EINVAL;
94                 }
95         }
96
97         rxq->max_pkt_len = max_pkt_len;
98         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
99             (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
100                 dev_data->scattered_rx = 1;
101         }
102         rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
103         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
104         IAVF_WRITE_FLUSH(hw);
105
106         return 0;
107 }
108
109 static int
110 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
111 {
112         struct ice_rx_queue **rxq =
113                 (struct ice_rx_queue **)dev->data->rx_queues;
114         int i, ret;
115
116         for (i = 0; i < dev->data->nb_rx_queues; i++) {
117                 if (!rxq[i] || !rxq[i]->q_set)
118                         continue;
119                 ret = ice_dcf_init_rxq(dev, rxq[i]);
120                 if (ret)
121                         return ret;
122         }
123
124         ice_set_rx_function(dev);
125         ice_set_tx_function(dev);
126
127         return 0;
128 }
129
130 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
131 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
132
133 #define IAVF_ITR_INDEX_DEFAULT          0
134 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
135 #define IAVF_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
136
137 static inline uint16_t
138 iavf_calc_itr_interval(int16_t interval)
139 {
140         if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
141                 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
142
143         /* Convert to hardware count, as writing each 1 represents 2 us */
144         return interval / 2;
145 }
146
147 static int
148 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
149                                      struct rte_intr_handle *intr_handle)
150 {
151         struct ice_dcf_adapter *adapter = dev->data->dev_private;
152         struct ice_dcf_hw *hw = &adapter->real_hw;
153         uint16_t interval, i;
154         int vec;
155
156         if (rte_intr_cap_multiple(intr_handle) &&
157             dev->data->dev_conf.intr_conf.rxq) {
158                 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
159                         return -1;
160         }
161
162         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
163                 intr_handle->intr_vec =
164                         rte_zmalloc("intr_vec",
165                                     dev->data->nb_rx_queues * sizeof(int), 0);
166                 if (!intr_handle->intr_vec) {
167                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
168                                     dev->data->nb_rx_queues);
169                         return -1;
170                 }
171         }
172
173         if (!dev->data->dev_conf.intr_conf.rxq ||
174             !rte_intr_dp_is_en(intr_handle)) {
175                 /* Rx interrupt disabled, Map interrupt only for writeback */
176                 hw->nb_msix = 1;
177                 if (hw->vf_res->vf_cap_flags &
178                     VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
179                         /* If WB_ON_ITR supports, enable it */
180                         hw->msix_base = IAVF_RX_VEC_START;
181                         IAVF_WRITE_REG(&hw->avf,
182                                        IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
183                                        IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
184                                        IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
185                 } else {
186                         /* If no WB_ON_ITR offload flags, need to set
187                          * interrupt for descriptor write back.
188                          */
189                         hw->msix_base = IAVF_MISC_VEC_ID;
190
191                         /* set ITR to max */
192                         interval =
193                         iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
194                         IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
195                                        IAVF_VFINT_DYN_CTL01_INTENA_MASK |
196                                        (IAVF_ITR_INDEX_DEFAULT <<
197                                         IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
198                                        (interval <<
199                                         IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
200                 }
201                 IAVF_WRITE_FLUSH(&hw->avf);
202                 /* map all queues to the same interrupt */
203                 for (i = 0; i < dev->data->nb_rx_queues; i++)
204                         hw->rxq_map[hw->msix_base] |= 1 << i;
205         } else {
206                 if (!rte_intr_allow_others(intr_handle)) {
207                         hw->nb_msix = 1;
208                         hw->msix_base = IAVF_MISC_VEC_ID;
209                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
210                                 hw->rxq_map[hw->msix_base] |= 1 << i;
211                                 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
212                         }
213                         PMD_DRV_LOG(DEBUG,
214                                     "vector %u are mapping to all Rx queues",
215                                     hw->msix_base);
216                 } else {
217                         /* If Rx interrupt is reuquired, and we can use
218                          * multi interrupts, then the vec is from 1
219                          */
220                         hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
221                                               intr_handle->nb_efd);
222                         hw->msix_base = IAVF_MISC_VEC_ID;
223                         vec = IAVF_MISC_VEC_ID;
224                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
225                                 hw->rxq_map[vec] |= 1 << i;
226                                 intr_handle->intr_vec[i] = vec++;
227                                 if (vec >= hw->nb_msix)
228                                         vec = IAVF_RX_VEC_START;
229                         }
230                         PMD_DRV_LOG(DEBUG,
231                                     "%u vectors are mapping to %u Rx queues",
232                                     hw->nb_msix, dev->data->nb_rx_queues);
233                 }
234         }
235
236         if (ice_dcf_config_irq_map(hw)) {
237                 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
238                 return -1;
239         }
240         return 0;
241 }
242
243 static int
244 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
245 {
246         volatile union ice_rx_flex_desc *rxd;
247         struct rte_mbuf *mbuf = NULL;
248         uint64_t dma_addr;
249         uint16_t i;
250
251         for (i = 0; i < rxq->nb_rx_desc; i++) {
252                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
253                 if (unlikely(!mbuf)) {
254                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
255                         return -ENOMEM;
256                 }
257
258                 rte_mbuf_refcnt_set(mbuf, 1);
259                 mbuf->next = NULL;
260                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
261                 mbuf->nb_segs = 1;
262                 mbuf->port = rxq->port_id;
263
264                 dma_addr =
265                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
266
267                 rxd = &rxq->rx_ring[i];
268                 rxd->read.pkt_addr = dma_addr;
269                 rxd->read.hdr_addr = 0;
270 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
271                 rxd->read.rsvd1 = 0;
272                 rxd->read.rsvd2 = 0;
273 #endif
274
275                 rxq->sw_ring[i].mbuf = (void *)mbuf;
276         }
277
278         return 0;
279 }
280
281 static int
282 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
283 {
284         struct ice_dcf_adapter *ad = dev->data->dev_private;
285         struct iavf_hw *hw = &ad->real_hw.avf;
286         struct ice_rx_queue *rxq;
287         int err = 0;
288
289         if (rx_queue_id >= dev->data->nb_rx_queues)
290                 return -EINVAL;
291
292         rxq = dev->data->rx_queues[rx_queue_id];
293
294         err = alloc_rxq_mbufs(rxq);
295         if (err) {
296                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
297                 return err;
298         }
299
300         rte_wmb();
301
302         /* Init the RX tail register. */
303         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
304         IAVF_WRITE_FLUSH(hw);
305
306         /* Ready to switch the queue on */
307         err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
308         if (err) {
309                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
310                             rx_queue_id);
311                 return err;
312         }
313
314         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
315
316         return 0;
317 }
318
319 static inline void
320 reset_rx_queue(struct ice_rx_queue *rxq)
321 {
322         uint16_t len;
323         uint32_t i;
324
325         if (!rxq)
326                 return;
327
328         len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
329
330         for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
331                 ((volatile char *)rxq->rx_ring)[i] = 0;
332
333         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
334
335         for (i = 0; i < ICE_RX_MAX_BURST; i++)
336                 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
337
338         /* for rx bulk */
339         rxq->rx_nb_avail = 0;
340         rxq->rx_next_avail = 0;
341         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
342
343         rxq->rx_tail = 0;
344         rxq->nb_rx_hold = 0;
345         rxq->pkt_first_seg = NULL;
346         rxq->pkt_last_seg = NULL;
347 }
348
349 static inline void
350 reset_tx_queue(struct ice_tx_queue *txq)
351 {
352         struct ice_tx_entry *txe;
353         uint32_t i, size;
354         uint16_t prev;
355
356         if (!txq) {
357                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
358                 return;
359         }
360
361         txe = txq->sw_ring;
362         size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
363         for (i = 0; i < size; i++)
364                 ((volatile char *)txq->tx_ring)[i] = 0;
365
366         prev = (uint16_t)(txq->nb_tx_desc - 1);
367         for (i = 0; i < txq->nb_tx_desc; i++) {
368                 txq->tx_ring[i].cmd_type_offset_bsz =
369                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
370                 txe[i].mbuf =  NULL;
371                 txe[i].last_id = i;
372                 txe[prev].next_id = i;
373                 prev = i;
374         }
375
376         txq->tx_tail = 0;
377         txq->nb_tx_used = 0;
378
379         txq->last_desc_cleaned = txq->nb_tx_desc - 1;
380         txq->nb_tx_free = txq->nb_tx_desc - 1;
381
382         txq->tx_next_dd = txq->tx_rs_thresh - 1;
383         txq->tx_next_rs = txq->tx_rs_thresh - 1;
384 }
385
386 static int
387 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
388 {
389         struct ice_dcf_adapter *ad = dev->data->dev_private;
390         struct ice_dcf_hw *hw = &ad->real_hw;
391         struct ice_rx_queue *rxq;
392         int err;
393
394         if (rx_queue_id >= dev->data->nb_rx_queues)
395                 return -EINVAL;
396
397         err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
398         if (err) {
399                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
400                             rx_queue_id);
401                 return err;
402         }
403
404         rxq = dev->data->rx_queues[rx_queue_id];
405         rxq->rx_rel_mbufs(rxq);
406         reset_rx_queue(rxq);
407         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
408
409         return 0;
410 }
411
412 static int
413 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
414 {
415         struct ice_dcf_adapter *ad = dev->data->dev_private;
416         struct iavf_hw *hw = &ad->real_hw.avf;
417         struct ice_tx_queue *txq;
418         int err = 0;
419
420         if (tx_queue_id >= dev->data->nb_tx_queues)
421                 return -EINVAL;
422
423         txq = dev->data->tx_queues[tx_queue_id];
424
425         /* Init the RX tail register. */
426         txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
427         IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
428         IAVF_WRITE_FLUSH(hw);
429
430         /* Ready to switch the queue on */
431         err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
432
433         if (err) {
434                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
435                             tx_queue_id);
436                 return err;
437         }
438
439         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
440
441         return 0;
442 }
443
444 static int
445 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
446 {
447         struct ice_dcf_adapter *ad = dev->data->dev_private;
448         struct ice_dcf_hw *hw = &ad->real_hw;
449         struct ice_tx_queue *txq;
450         int err;
451
452         if (tx_queue_id >= dev->data->nb_tx_queues)
453                 return -EINVAL;
454
455         err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
456         if (err) {
457                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
458                             tx_queue_id);
459                 return err;
460         }
461
462         txq = dev->data->tx_queues[tx_queue_id];
463         txq->tx_rel_mbufs(txq);
464         reset_tx_queue(txq);
465         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
466
467         return 0;
468 }
469
470 static int
471 ice_dcf_start_queues(struct rte_eth_dev *dev)
472 {
473         struct ice_rx_queue *rxq;
474         struct ice_tx_queue *txq;
475         int nb_rxq = 0;
476         int nb_txq, i;
477
478         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
479                 txq = dev->data->tx_queues[nb_txq];
480                 if (txq->tx_deferred_start)
481                         continue;
482                 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
483                         PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
484                         goto tx_err;
485                 }
486         }
487
488         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
489                 rxq = dev->data->rx_queues[nb_rxq];
490                 if (rxq->rx_deferred_start)
491                         continue;
492                 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
493                         PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
494                         goto rx_err;
495                 }
496         }
497
498         return 0;
499
500         /* stop the started queues if failed to start all queues */
501 rx_err:
502         for (i = 0; i < nb_rxq; i++)
503                 ice_dcf_rx_queue_stop(dev, i);
504 tx_err:
505         for (i = 0; i < nb_txq; i++)
506                 ice_dcf_tx_queue_stop(dev, i);
507
508         return -1;
509 }
510
511 static int
512 ice_dcf_dev_start(struct rte_eth_dev *dev)
513 {
514         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
515         struct rte_intr_handle *intr_handle = dev->intr_handle;
516         struct ice_adapter *ad = &dcf_ad->parent;
517         struct ice_dcf_hw *hw = &dcf_ad->real_hw;
518         int ret;
519
520         if (hw->resetting) {
521                 PMD_DRV_LOG(ERR,
522                             "The DCF has been reset by PF, please reinit first");
523                 return -EIO;
524         }
525
526         ad->pf.adapter_stopped = 0;
527
528         hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
529                                       dev->data->nb_tx_queues);
530
531         ret = ice_dcf_init_rx_queues(dev);
532         if (ret) {
533                 PMD_DRV_LOG(ERR, "Fail to init queues");
534                 return ret;
535         }
536
537         if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
538                 ret = ice_dcf_init_rss(hw);
539                 if (ret) {
540                         PMD_DRV_LOG(ERR, "Failed to configure RSS");
541                         return ret;
542                 }
543         }
544
545         ret = ice_dcf_configure_queues(hw);
546         if (ret) {
547                 PMD_DRV_LOG(ERR, "Fail to config queues");
548                 return ret;
549         }
550
551         ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
552         if (ret) {
553                 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
554                 return ret;
555         }
556
557         if (dev->data->dev_conf.intr_conf.rxq != 0) {
558                 rte_intr_disable(intr_handle);
559                 rte_intr_enable(intr_handle);
560         }
561
562         ret = ice_dcf_start_queues(dev);
563         if (ret) {
564                 PMD_DRV_LOG(ERR, "Failed to enable queues");
565                 return ret;
566         }
567
568         ret = ice_dcf_add_del_all_mac_addr(hw, true);
569         if (ret) {
570                 PMD_DRV_LOG(ERR, "Failed to add mac addr");
571                 return ret;
572         }
573
574         dev->data->dev_link.link_status = ETH_LINK_UP;
575
576         return 0;
577 }
578
579 static void
580 ice_dcf_stop_queues(struct rte_eth_dev *dev)
581 {
582         struct ice_dcf_adapter *ad = dev->data->dev_private;
583         struct ice_dcf_hw *hw = &ad->real_hw;
584         struct ice_rx_queue *rxq;
585         struct ice_tx_queue *txq;
586         int ret, i;
587
588         /* Stop All queues */
589         ret = ice_dcf_disable_queues(hw);
590         if (ret)
591                 PMD_DRV_LOG(WARNING, "Fail to stop queues");
592
593         for (i = 0; i < dev->data->nb_tx_queues; i++) {
594                 txq = dev->data->tx_queues[i];
595                 if (!txq)
596                         continue;
597                 txq->tx_rel_mbufs(txq);
598                 reset_tx_queue(txq);
599                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
600                 dev->data->tx_queues[i] = NULL;
601         }
602         for (i = 0; i < dev->data->nb_rx_queues; i++) {
603                 rxq = dev->data->rx_queues[i];
604                 if (!rxq)
605                         continue;
606                 rxq->rx_rel_mbufs(rxq);
607                 reset_rx_queue(rxq);
608                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
609                 dev->data->rx_queues[i] = NULL;
610         }
611 }
612
613 static int
614 ice_dcf_dev_stop(struct rte_eth_dev *dev)
615 {
616         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
617         struct rte_intr_handle *intr_handle = dev->intr_handle;
618         struct ice_adapter *ad = &dcf_ad->parent;
619
620         if (ad->pf.adapter_stopped == 1) {
621                 PMD_DRV_LOG(DEBUG, "Port is already stopped");
622                 return 0;
623         }
624
625         /* Stop the VF representors for this device */
626         ice_dcf_vf_repr_stop_all(dcf_ad);
627
628         ice_dcf_stop_queues(dev);
629
630         rte_intr_efd_disable(intr_handle);
631         if (intr_handle->intr_vec) {
632                 rte_free(intr_handle->intr_vec);
633                 intr_handle->intr_vec = NULL;
634         }
635
636         ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
637         dev->data->dev_link.link_status = ETH_LINK_DOWN;
638         ad->pf.adapter_stopped = 1;
639
640         return 0;
641 }
642
643 static int
644 ice_dcf_dev_configure(struct rte_eth_dev *dev)
645 {
646         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
647         struct ice_adapter *ad = &dcf_ad->parent;
648
649         ad->rx_bulk_alloc_allowed = true;
650         ad->tx_simple_allowed = true;
651
652         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
653                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
654
655         return 0;
656 }
657
658 static int
659 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
660                      struct rte_eth_dev_info *dev_info)
661 {
662         struct ice_dcf_adapter *adapter = dev->data->dev_private;
663         struct ice_dcf_hw *hw = &adapter->real_hw;
664
665         dev_info->max_mac_addrs = 1;
666         dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
667         dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
668         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
669         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
670         dev_info->hash_key_size = hw->vf_res->rss_key_size;
671         dev_info->reta_size = hw->vf_res->rss_lut_size;
672         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
673
674         dev_info->rx_offload_capa =
675                 DEV_RX_OFFLOAD_VLAN_STRIP |
676                 DEV_RX_OFFLOAD_IPV4_CKSUM |
677                 DEV_RX_OFFLOAD_UDP_CKSUM |
678                 DEV_RX_OFFLOAD_TCP_CKSUM |
679                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
680                 DEV_RX_OFFLOAD_SCATTER |
681                 DEV_RX_OFFLOAD_JUMBO_FRAME |
682                 DEV_RX_OFFLOAD_VLAN_FILTER |
683                 DEV_RX_OFFLOAD_RSS_HASH;
684         dev_info->tx_offload_capa =
685                 DEV_TX_OFFLOAD_VLAN_INSERT |
686                 DEV_TX_OFFLOAD_IPV4_CKSUM |
687                 DEV_TX_OFFLOAD_UDP_CKSUM |
688                 DEV_TX_OFFLOAD_TCP_CKSUM |
689                 DEV_TX_OFFLOAD_SCTP_CKSUM |
690                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
691                 DEV_TX_OFFLOAD_TCP_TSO |
692                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
693                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
694                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
695                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
696                 DEV_TX_OFFLOAD_MULTI_SEGS;
697
698         dev_info->default_rxconf = (struct rte_eth_rxconf) {
699                 .rx_thresh = {
700                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
701                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
702                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
703                 },
704                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
705                 .rx_drop_en = 0,
706                 .offloads = 0,
707         };
708
709         dev_info->default_txconf = (struct rte_eth_txconf) {
710                 .tx_thresh = {
711                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
712                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
713                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
714                 },
715                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
716                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
717                 .offloads = 0,
718         };
719
720         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
721                 .nb_max = ICE_MAX_RING_DESC,
722                 .nb_min = ICE_MIN_RING_DESC,
723                 .nb_align = ICE_ALIGN_RING_DESC,
724         };
725
726         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
727                 .nb_max = ICE_MAX_RING_DESC,
728                 .nb_min = ICE_MIN_RING_DESC,
729                 .nb_align = ICE_ALIGN_RING_DESC,
730         };
731
732         return 0;
733 }
734
735 static int
736 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
737 {
738         return 0;
739 }
740
741 static int
742 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
743 {
744         return 0;
745 }
746
747 static int
748 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
749 {
750         return 0;
751 }
752
753 static int
754 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
755 {
756         return 0;
757 }
758
759 static int
760 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
761                          const struct rte_flow_ops **ops)
762 {
763         if (!dev)
764                 return -EINVAL;
765
766         *ops = &ice_flow_ops;
767         return 0;
768 }
769
770 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
771 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
772 #define ICE_DCF_48_BIT_MASK  RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
773
774 static void
775 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
776 {
777         if (*stat >= *offset)
778                 *stat = *stat - *offset;
779         else
780                 *stat = (uint64_t)((*stat +
781                         ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
782
783         *stat &= ICE_DCF_48_BIT_MASK;
784 }
785
786 static void
787 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
788 {
789         if (*stat >= *offset)
790                 *stat = (uint64_t)(*stat - *offset);
791         else
792                 *stat = (uint64_t)((*stat +
793                         ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
794 }
795
796 static void
797 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
798                      struct virtchnl_eth_stats *nes)
799 {
800         ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
801         ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
802         ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
803         ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
804         ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
805         ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
806         ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
807         ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
808         ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
809         ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
810         ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
811 }
812
813
814 static int
815 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
816 {
817         struct ice_dcf_adapter *ad = dev->data->dev_private;
818         struct ice_dcf_hw *hw = &ad->real_hw;
819         struct virtchnl_eth_stats pstats;
820         int ret;
821
822         if (hw->resetting) {
823                 PMD_DRV_LOG(ERR,
824                             "The DCF has been reset by PF, please reinit first");
825                 return -EIO;
826         }
827
828         ret = ice_dcf_query_stats(hw, &pstats);
829         if (ret == 0) {
830                 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
831                 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
832                                 pstats.rx_broadcast - pstats.rx_discards;
833                 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
834                                                 pstats.tx_unicast;
835                 stats->imissed = pstats.rx_discards;
836                 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
837                 stats->ibytes = pstats.rx_bytes;
838                 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
839                 stats->obytes = pstats.tx_bytes;
840         } else {
841                 PMD_DRV_LOG(ERR, "Get statistics failed");
842         }
843         return ret;
844 }
845
846 static int
847 ice_dcf_stats_reset(struct rte_eth_dev *dev)
848 {
849         struct ice_dcf_adapter *ad = dev->data->dev_private;
850         struct ice_dcf_hw *hw = &ad->real_hw;
851         struct virtchnl_eth_stats pstats;
852         int ret;
853
854         if (hw->resetting)
855                 return 0;
856
857         /* read stat values to clear hardware registers */
858         ret = ice_dcf_query_stats(hw, &pstats);
859         if (ret != 0)
860                 return ret;
861
862         /* set stats offset base on current values */
863         hw->eth_stats_offset = pstats;
864
865         return 0;
866 }
867
868 static void
869 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
870 {
871         if (dcf_adapter->repr_infos) {
872                 rte_free(dcf_adapter->repr_infos);
873                 dcf_adapter->repr_infos = NULL;
874         }
875 }
876
877 static int
878 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
879 {
880         dcf_adapter->repr_infos =
881                         rte_calloc("ice_dcf_rep_info",
882                                    dcf_adapter->real_hw.num_vfs,
883                                    sizeof(dcf_adapter->repr_infos[0]), 0);
884         if (!dcf_adapter->repr_infos) {
885                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
886                 return -ENOMEM;
887         }
888
889         return 0;
890 }
891
892 static int
893 ice_dcf_dev_close(struct rte_eth_dev *dev)
894 {
895         struct ice_dcf_adapter *adapter = dev->data->dev_private;
896
897         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
898                 return 0;
899
900         (void)ice_dcf_dev_stop(dev);
901
902         ice_dcf_free_repr_info(adapter);
903         ice_dcf_uninit_parent_adapter(dev);
904         ice_dcf_uninit_hw(dev, &adapter->real_hw);
905
906         return 0;
907 }
908
909 int
910 ice_dcf_link_update(struct rte_eth_dev *dev,
911                     __rte_unused int wait_to_complete)
912 {
913         struct ice_dcf_adapter *ad = dev->data->dev_private;
914         struct ice_dcf_hw *hw = &ad->real_hw;
915         struct rte_eth_link new_link;
916
917         memset(&new_link, 0, sizeof(new_link));
918
919         /* Only read status info stored in VF, and the info is updated
920          * when receive LINK_CHANGE event from PF by virtchnl.
921          */
922         switch (hw->link_speed) {
923         case 10:
924                 new_link.link_speed = ETH_SPEED_NUM_10M;
925                 break;
926         case 100:
927                 new_link.link_speed = ETH_SPEED_NUM_100M;
928                 break;
929         case 1000:
930                 new_link.link_speed = ETH_SPEED_NUM_1G;
931                 break;
932         case 10000:
933                 new_link.link_speed = ETH_SPEED_NUM_10G;
934                 break;
935         case 20000:
936                 new_link.link_speed = ETH_SPEED_NUM_20G;
937                 break;
938         case 25000:
939                 new_link.link_speed = ETH_SPEED_NUM_25G;
940                 break;
941         case 40000:
942                 new_link.link_speed = ETH_SPEED_NUM_40G;
943                 break;
944         case 50000:
945                 new_link.link_speed = ETH_SPEED_NUM_50G;
946                 break;
947         case 100000:
948                 new_link.link_speed = ETH_SPEED_NUM_100G;
949                 break;
950         default:
951                 new_link.link_speed = ETH_SPEED_NUM_NONE;
952                 break;
953         }
954
955         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
956         new_link.link_status = hw->link_up ? ETH_LINK_UP :
957                                              ETH_LINK_DOWN;
958         new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
959                                 ETH_LINK_SPEED_FIXED);
960
961         return rte_eth_linkstatus_set(dev, &new_link);
962 }
963
964 /* Add UDP tunneling port */
965 static int
966 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
967                                 struct rte_eth_udp_tunnel *udp_tunnel)
968 {
969         struct ice_dcf_adapter *adapter = dev->data->dev_private;
970         struct ice_adapter *parent_adapter = &adapter->parent;
971         struct ice_hw *parent_hw = &parent_adapter->hw;
972         int ret = 0;
973
974         if (!udp_tunnel)
975                 return -EINVAL;
976
977         switch (udp_tunnel->prot_type) {
978         case RTE_TUNNEL_TYPE_VXLAN:
979                 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
980                                         udp_tunnel->udp_port);
981                 break;
982         case RTE_TUNNEL_TYPE_ECPRI:
983                 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
984                                         udp_tunnel->udp_port);
985                 break;
986         default:
987                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
988                 ret = -EINVAL;
989                 break;
990         }
991
992         return ret;
993 }
994
995 /* Delete UDP tunneling port */
996 static int
997 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
998                                 struct rte_eth_udp_tunnel *udp_tunnel)
999 {
1000         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1001         struct ice_adapter *parent_adapter = &adapter->parent;
1002         struct ice_hw *parent_hw = &parent_adapter->hw;
1003         int ret = 0;
1004
1005         if (!udp_tunnel)
1006                 return -EINVAL;
1007
1008         switch (udp_tunnel->prot_type) {
1009         case RTE_TUNNEL_TYPE_VXLAN:
1010         case RTE_TUNNEL_TYPE_ECPRI:
1011                 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1012                 break;
1013         default:
1014                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1015                 ret = -EINVAL;
1016                 break;
1017         }
1018
1019         return ret;
1020 }
1021
1022 static int
1023 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1024                 void *arg)
1025 {
1026         if (!arg)
1027                 return -EINVAL;
1028
1029         *(const void **)arg = &ice_dcf_tm_ops;
1030
1031         return 0;
1032 }
1033
1034 static int
1035 ice_dcf_dev_reset(struct rte_eth_dev *dev)
1036 {
1037         int ret;
1038
1039         ret = ice_dcf_dev_uninit(dev);
1040         if (ret)
1041                 return ret;
1042
1043         ret = ice_dcf_dev_init(dev);
1044
1045         return ret;
1046 }
1047
1048 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1049         .dev_start               = ice_dcf_dev_start,
1050         .dev_stop                = ice_dcf_dev_stop,
1051         .dev_close               = ice_dcf_dev_close,
1052         .dev_reset               = ice_dcf_dev_reset,
1053         .dev_configure           = ice_dcf_dev_configure,
1054         .dev_infos_get           = ice_dcf_dev_info_get,
1055         .rx_queue_setup          = ice_rx_queue_setup,
1056         .tx_queue_setup          = ice_tx_queue_setup,
1057         .rx_queue_release        = ice_rx_queue_release,
1058         .tx_queue_release        = ice_tx_queue_release,
1059         .rx_queue_start          = ice_dcf_rx_queue_start,
1060         .tx_queue_start          = ice_dcf_tx_queue_start,
1061         .rx_queue_stop           = ice_dcf_rx_queue_stop,
1062         .tx_queue_stop           = ice_dcf_tx_queue_stop,
1063         .link_update             = ice_dcf_link_update,
1064         .stats_get               = ice_dcf_stats_get,
1065         .stats_reset             = ice_dcf_stats_reset,
1066         .promiscuous_enable      = ice_dcf_dev_promiscuous_enable,
1067         .promiscuous_disable     = ice_dcf_dev_promiscuous_disable,
1068         .allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
1069         .allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
1070         .flow_ops_get            = ice_dcf_dev_flow_ops_get,
1071         .udp_tunnel_port_add     = ice_dcf_dev_udp_tunnel_port_add,
1072         .udp_tunnel_port_del     = ice_dcf_dev_udp_tunnel_port_del,
1073         .tm_ops_get              = ice_dcf_tm_ops_get,
1074 };
1075
1076 static int
1077 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1078 {
1079         struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1080
1081         adapter->real_hw.resetting = false;
1082         eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1083         eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1084         eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1085
1086         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1087                 return 0;
1088
1089         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1090
1091         adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1092         if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1093                 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1094                 return -1;
1095         }
1096
1097         if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1098                 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1099                 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1100                 return -1;
1101         }
1102
1103         return 0;
1104 }
1105
1106 static int
1107 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1108 {
1109         ice_dcf_dev_close(eth_dev);
1110
1111         return 0;
1112 }
1113
1114 static int
1115 ice_dcf_cap_check_handler(__rte_unused const char *key,
1116                           const char *value, __rte_unused void *opaque)
1117 {
1118         if (strcmp(value, "dcf"))
1119                 return -1;
1120
1121         return 0;
1122 }
1123
1124 static int
1125 ice_dcf_cap_selected(struct rte_devargs *devargs)
1126 {
1127         struct rte_kvargs *kvlist;
1128         const char *key = "cap";
1129         int ret = 0;
1130
1131         if (devargs == NULL)
1132                 return 0;
1133
1134         kvlist = rte_kvargs_parse(devargs->args, NULL);
1135         if (kvlist == NULL)
1136                 return 0;
1137
1138         if (!rte_kvargs_count(kvlist, key))
1139                 goto exit;
1140
1141         /* dcf capability selected when there's a key-value pair: cap=dcf */
1142         if (rte_kvargs_process(kvlist, key,
1143                                ice_dcf_cap_check_handler, NULL) < 0)
1144                 goto exit;
1145
1146         ret = 1;
1147
1148 exit:
1149         rte_kvargs_free(kvlist);
1150         return ret;
1151 }
1152
1153 static int
1154 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1155                       struct rte_pci_device *pci_dev)
1156 {
1157         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1158         struct ice_dcf_vf_repr_param repr_param;
1159         char repr_name[RTE_ETH_NAME_MAX_LEN];
1160         struct ice_dcf_adapter *dcf_adapter;
1161         struct rte_eth_dev *dcf_ethdev;
1162         uint16_t dcf_vsi_id;
1163         int i, ret;
1164
1165         if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1166                 return 1;
1167
1168         ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da);
1169         if (ret)
1170                 return ret;
1171
1172         ret = rte_eth_dev_pci_generic_probe(pci_dev,
1173                                             sizeof(struct ice_dcf_adapter),
1174                                             ice_dcf_dev_init);
1175         if (ret || !eth_da.nb_representor_ports)
1176                 return ret;
1177         if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1178                 return -ENOTSUP;
1179
1180         dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1181         if (dcf_ethdev == NULL)
1182                 return -ENODEV;
1183
1184         dcf_adapter = dcf_ethdev->data->dev_private;
1185         ret = ice_dcf_init_repr_info(dcf_adapter);
1186         if (ret)
1187                 return ret;
1188
1189         if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1190             eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1191                 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1192                             eth_da.nb_representor_ports);
1193                 ice_dcf_free_repr_info(dcf_adapter);
1194                 return -EINVAL;
1195         }
1196
1197         dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1198
1199         repr_param.dcf_eth_dev = dcf_ethdev;
1200         repr_param.switch_domain_id = 0;
1201
1202         for (i = 0; i < eth_da.nb_representor_ports; i++) {
1203                 uint16_t vf_id = eth_da.representor_ports[i];
1204                 struct rte_eth_dev *vf_rep_eth_dev;
1205
1206                 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
1207                         PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
1208                                     vf_id, dcf_adapter->real_hw.num_vfs - 1);
1209                         ret = -EINVAL;
1210                         break;
1211                 }
1212
1213                 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
1214                         PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
1215                         ret = -EINVAL;
1216                         break;
1217                 }
1218
1219                 repr_param.vf_id = vf_id;
1220                 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
1221                          pci_dev->device.name, vf_id);
1222                 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
1223                                          sizeof(struct ice_dcf_vf_repr),
1224                                          NULL, NULL, ice_dcf_vf_repr_init,
1225                                          &repr_param);
1226                 if (ret) {
1227                         PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
1228                                     repr_name);
1229                         break;
1230                 }
1231
1232                 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
1233                 if (!vf_rep_eth_dev) {
1234                         PMD_DRV_LOG(ERR,
1235                                     "Failed to find the ethdev for DCF VF representor: %s",
1236                                     repr_name);
1237                         ret = -ENODEV;
1238                         break;
1239                 }
1240
1241                 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
1242                 dcf_adapter->num_reprs++;
1243         }
1244
1245         return ret;
1246 }
1247
1248 static int
1249 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
1250 {
1251         struct rte_eth_dev *eth_dev;
1252
1253         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1254         if (!eth_dev)
1255                 return 0;
1256
1257         if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1258                 return rte_eth_dev_pci_generic_remove(pci_dev,
1259                                                       ice_dcf_vf_repr_uninit);
1260         else
1261                 return rte_eth_dev_pci_generic_remove(pci_dev,
1262                                                       ice_dcf_dev_uninit);
1263 }
1264
1265 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
1266         { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
1267         { .vendor_id = 0, /* sentinel */ },
1268 };
1269
1270 static struct rte_pci_driver rte_ice_dcf_pmd = {
1271         .id_table = pci_id_ice_dcf_map,
1272         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1273         .probe = eth_ice_dcf_pci_probe,
1274         .remove = eth_ice_dcf_pci_remove,
1275 };
1276
1277 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1278 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1279 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1280 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");