net/i40e: fix Rx packet statistics
[dpdk.git] / drivers / net / ice / ice_dcf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <sys/queue.h>
8 #include <sys/types.h>
9 #include <unistd.h>
10
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
13 #include <rte_pci.h>
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_ether.h>
17 #include <ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <rte_dev.h>
22
23 #include <iavf_devids.h>
24
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27 #include "ice_rxtx.h"
28
29 static int
30 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
31                                 struct rte_eth_udp_tunnel *udp_tunnel);
32 static int
33 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
34                                 struct rte_eth_udp_tunnel *udp_tunnel);
35
36 static int
37 ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
38
39 static int
40 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
41
42 static uint16_t
43 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
44                   __rte_unused struct rte_mbuf **bufs,
45                   __rte_unused uint16_t nb_pkts)
46 {
47         return 0;
48 }
49
50 static uint16_t
51 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
52                   __rte_unused struct rte_mbuf **bufs,
53                   __rte_unused uint16_t nb_pkts)
54 {
55         return 0;
56 }
57
58 static int
59 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
60 {
61         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
62         struct rte_eth_dev_data *dev_data = dev->data;
63         struct iavf_hw *hw = &dcf_ad->real_hw.avf;
64         uint16_t buf_size, max_pkt_len;
65
66         buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
67         rxq->rx_hdr_len = 0;
68         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
69         max_pkt_len = RTE_MIN((uint32_t)
70                               ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
71                               dev->data->dev_conf.rxmode.max_rx_pkt_len);
72
73         /* Check if the jumbo frame and maximum packet length are set
74          * correctly.
75          */
76         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
77                 if (max_pkt_len <= ICE_ETH_MAX_LEN ||
78                     max_pkt_len > ICE_FRAME_SIZE_MAX) {
79                         PMD_DRV_LOG(ERR, "maximum packet length must be "
80                                     "larger than %u and smaller than %u, "
81                                     "as jumbo frame is enabled",
82                                     (uint32_t)ICE_ETH_MAX_LEN,
83                                     (uint32_t)ICE_FRAME_SIZE_MAX);
84                         return -EINVAL;
85                 }
86         } else {
87                 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
88                     max_pkt_len > ICE_ETH_MAX_LEN) {
89                         PMD_DRV_LOG(ERR, "maximum packet length must be "
90                                     "larger than %u and smaller than %u, "
91                                     "as jumbo frame is disabled",
92                                     (uint32_t)RTE_ETHER_MIN_LEN,
93                                     (uint32_t)ICE_ETH_MAX_LEN);
94                         return -EINVAL;
95                 }
96         }
97
98         rxq->max_pkt_len = max_pkt_len;
99         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
100             (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
101                 dev_data->scattered_rx = 1;
102         }
103         rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
104         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
105         IAVF_WRITE_FLUSH(hw);
106
107         return 0;
108 }
109
110 static int
111 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
112 {
113         struct ice_rx_queue **rxq =
114                 (struct ice_rx_queue **)dev->data->rx_queues;
115         int i, ret;
116
117         for (i = 0; i < dev->data->nb_rx_queues; i++) {
118                 if (!rxq[i] || !rxq[i]->q_set)
119                         continue;
120                 ret = ice_dcf_init_rxq(dev, rxq[i]);
121                 if (ret)
122                         return ret;
123         }
124
125         ice_set_rx_function(dev);
126         ice_set_tx_function(dev);
127
128         return 0;
129 }
130
131 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
132 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
133
134 #define IAVF_ITR_INDEX_DEFAULT          0
135 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
136 #define IAVF_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
137
138 static inline uint16_t
139 iavf_calc_itr_interval(int16_t interval)
140 {
141         if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
142                 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
143
144         /* Convert to hardware count, as writing each 1 represents 2 us */
145         return interval / 2;
146 }
147
148 static int
149 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
150                                      struct rte_intr_handle *intr_handle)
151 {
152         struct ice_dcf_adapter *adapter = dev->data->dev_private;
153         struct ice_dcf_hw *hw = &adapter->real_hw;
154         uint16_t interval, i;
155         int vec;
156
157         if (rte_intr_cap_multiple(intr_handle) &&
158             dev->data->dev_conf.intr_conf.rxq) {
159                 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
160                         return -1;
161         }
162
163         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
164                 intr_handle->intr_vec =
165                         rte_zmalloc("intr_vec",
166                                     dev->data->nb_rx_queues * sizeof(int), 0);
167                 if (!intr_handle->intr_vec) {
168                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
169                                     dev->data->nb_rx_queues);
170                         return -1;
171                 }
172         }
173
174         if (!dev->data->dev_conf.intr_conf.rxq ||
175             !rte_intr_dp_is_en(intr_handle)) {
176                 /* Rx interrupt disabled, Map interrupt only for writeback */
177                 hw->nb_msix = 1;
178                 if (hw->vf_res->vf_cap_flags &
179                     VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
180                         /* If WB_ON_ITR supports, enable it */
181                         hw->msix_base = IAVF_RX_VEC_START;
182                         /* Set the ITR for index zero, to 2us to make sure that
183                          * we leave time for aggregation to occur, but don't
184                          * increase latency dramatically.
185                          */
186                         IAVF_WRITE_REG(&hw->avf,
187                                        IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
188                                        (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
189                                        IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
190                                        (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
191                 } else {
192                         /* If no WB_ON_ITR offload flags, need to set
193                          * interrupt for descriptor write back.
194                          */
195                         hw->msix_base = IAVF_MISC_VEC_ID;
196
197                         /* set ITR to max */
198                         interval =
199                         iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
200                         IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
201                                        IAVF_VFINT_DYN_CTL01_INTENA_MASK |
202                                        (IAVF_ITR_INDEX_DEFAULT <<
203                                         IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
204                                        (interval <<
205                                         IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
206                 }
207                 IAVF_WRITE_FLUSH(&hw->avf);
208                 /* map all queues to the same interrupt */
209                 for (i = 0; i < dev->data->nb_rx_queues; i++)
210                         hw->rxq_map[hw->msix_base] |= 1 << i;
211         } else {
212                 if (!rte_intr_allow_others(intr_handle)) {
213                         hw->nb_msix = 1;
214                         hw->msix_base = IAVF_MISC_VEC_ID;
215                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
216                                 hw->rxq_map[hw->msix_base] |= 1 << i;
217                                 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
218                         }
219                         PMD_DRV_LOG(DEBUG,
220                                     "vector %u are mapping to all Rx queues",
221                                     hw->msix_base);
222                 } else {
223                         /* If Rx interrupt is reuquired, and we can use
224                          * multi interrupts, then the vec is from 1
225                          */
226                         hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
227                                               intr_handle->nb_efd);
228                         hw->msix_base = IAVF_MISC_VEC_ID;
229                         vec = IAVF_MISC_VEC_ID;
230                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
231                                 hw->rxq_map[vec] |= 1 << i;
232                                 intr_handle->intr_vec[i] = vec++;
233                                 if (vec >= hw->nb_msix)
234                                         vec = IAVF_RX_VEC_START;
235                         }
236                         PMD_DRV_LOG(DEBUG,
237                                     "%u vectors are mapping to %u Rx queues",
238                                     hw->nb_msix, dev->data->nb_rx_queues);
239                 }
240         }
241
242         if (ice_dcf_config_irq_map(hw)) {
243                 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
244                 return -1;
245         }
246         return 0;
247 }
248
249 static int
250 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
251 {
252         volatile union ice_rx_flex_desc *rxd;
253         struct rte_mbuf *mbuf = NULL;
254         uint64_t dma_addr;
255         uint16_t i;
256
257         for (i = 0; i < rxq->nb_rx_desc; i++) {
258                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
259                 if (unlikely(!mbuf)) {
260                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
261                         return -ENOMEM;
262                 }
263
264                 rte_mbuf_refcnt_set(mbuf, 1);
265                 mbuf->next = NULL;
266                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
267                 mbuf->nb_segs = 1;
268                 mbuf->port = rxq->port_id;
269
270                 dma_addr =
271                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
272
273                 rxd = &rxq->rx_ring[i];
274                 rxd->read.pkt_addr = dma_addr;
275                 rxd->read.hdr_addr = 0;
276 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
277                 rxd->read.rsvd1 = 0;
278                 rxd->read.rsvd2 = 0;
279 #endif
280
281                 rxq->sw_ring[i].mbuf = (void *)mbuf;
282         }
283
284         return 0;
285 }
286
287 static int
288 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
289 {
290         struct ice_dcf_adapter *ad = dev->data->dev_private;
291         struct iavf_hw *hw = &ad->real_hw.avf;
292         struct ice_rx_queue *rxq;
293         int err = 0;
294
295         if (rx_queue_id >= dev->data->nb_rx_queues)
296                 return -EINVAL;
297
298         rxq = dev->data->rx_queues[rx_queue_id];
299
300         err = alloc_rxq_mbufs(rxq);
301         if (err) {
302                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
303                 return err;
304         }
305
306         rte_wmb();
307
308         /* Init the RX tail register. */
309         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
310         IAVF_WRITE_FLUSH(hw);
311
312         /* Ready to switch the queue on */
313         err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
314         if (err) {
315                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
316                             rx_queue_id);
317                 return err;
318         }
319
320         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
321
322         return 0;
323 }
324
325 static inline void
326 reset_rx_queue(struct ice_rx_queue *rxq)
327 {
328         uint16_t len;
329         uint32_t i;
330
331         if (!rxq)
332                 return;
333
334         len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
335
336         for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
337                 ((volatile char *)rxq->rx_ring)[i] = 0;
338
339         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
340
341         for (i = 0; i < ICE_RX_MAX_BURST; i++)
342                 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
343
344         /* for rx bulk */
345         rxq->rx_nb_avail = 0;
346         rxq->rx_next_avail = 0;
347         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
348
349         rxq->rx_tail = 0;
350         rxq->nb_rx_hold = 0;
351         rxq->pkt_first_seg = NULL;
352         rxq->pkt_last_seg = NULL;
353 }
354
355 static inline void
356 reset_tx_queue(struct ice_tx_queue *txq)
357 {
358         struct ice_tx_entry *txe;
359         uint32_t i, size;
360         uint16_t prev;
361
362         if (!txq) {
363                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
364                 return;
365         }
366
367         txe = txq->sw_ring;
368         size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
369         for (i = 0; i < size; i++)
370                 ((volatile char *)txq->tx_ring)[i] = 0;
371
372         prev = (uint16_t)(txq->nb_tx_desc - 1);
373         for (i = 0; i < txq->nb_tx_desc; i++) {
374                 txq->tx_ring[i].cmd_type_offset_bsz =
375                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
376                 txe[i].mbuf =  NULL;
377                 txe[i].last_id = i;
378                 txe[prev].next_id = i;
379                 prev = i;
380         }
381
382         txq->tx_tail = 0;
383         txq->nb_tx_used = 0;
384
385         txq->last_desc_cleaned = txq->nb_tx_desc - 1;
386         txq->nb_tx_free = txq->nb_tx_desc - 1;
387
388         txq->tx_next_dd = txq->tx_rs_thresh - 1;
389         txq->tx_next_rs = txq->tx_rs_thresh - 1;
390 }
391
392 static int
393 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
394 {
395         struct ice_dcf_adapter *ad = dev->data->dev_private;
396         struct ice_dcf_hw *hw = &ad->real_hw;
397         struct ice_rx_queue *rxq;
398         int err;
399
400         if (rx_queue_id >= dev->data->nb_rx_queues)
401                 return -EINVAL;
402
403         err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
404         if (err) {
405                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
406                             rx_queue_id);
407                 return err;
408         }
409
410         rxq = dev->data->rx_queues[rx_queue_id];
411         rxq->rx_rel_mbufs(rxq);
412         reset_rx_queue(rxq);
413         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
414
415         return 0;
416 }
417
418 static int
419 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
420 {
421         struct ice_dcf_adapter *ad = dev->data->dev_private;
422         struct iavf_hw *hw = &ad->real_hw.avf;
423         struct ice_tx_queue *txq;
424         int err = 0;
425
426         if (tx_queue_id >= dev->data->nb_tx_queues)
427                 return -EINVAL;
428
429         txq = dev->data->tx_queues[tx_queue_id];
430
431         /* Init the RX tail register. */
432         txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
433         IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
434         IAVF_WRITE_FLUSH(hw);
435
436         /* Ready to switch the queue on */
437         err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
438
439         if (err) {
440                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
441                             tx_queue_id);
442                 return err;
443         }
444
445         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
446
447         return 0;
448 }
449
450 static int
451 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
452 {
453         struct ice_dcf_adapter *ad = dev->data->dev_private;
454         struct ice_dcf_hw *hw = &ad->real_hw;
455         struct ice_tx_queue *txq;
456         int err;
457
458         if (tx_queue_id >= dev->data->nb_tx_queues)
459                 return -EINVAL;
460
461         err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
462         if (err) {
463                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
464                             tx_queue_id);
465                 return err;
466         }
467
468         txq = dev->data->tx_queues[tx_queue_id];
469         txq->tx_rel_mbufs(txq);
470         reset_tx_queue(txq);
471         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
472
473         return 0;
474 }
475
476 static int
477 ice_dcf_start_queues(struct rte_eth_dev *dev)
478 {
479         struct ice_rx_queue *rxq;
480         struct ice_tx_queue *txq;
481         int nb_rxq = 0;
482         int nb_txq, i;
483
484         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
485                 txq = dev->data->tx_queues[nb_txq];
486                 if (txq->tx_deferred_start)
487                         continue;
488                 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
489                         PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
490                         goto tx_err;
491                 }
492         }
493
494         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
495                 rxq = dev->data->rx_queues[nb_rxq];
496                 if (rxq->rx_deferred_start)
497                         continue;
498                 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
499                         PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
500                         goto rx_err;
501                 }
502         }
503
504         return 0;
505
506         /* stop the started queues if failed to start all queues */
507 rx_err:
508         for (i = 0; i < nb_rxq; i++)
509                 ice_dcf_rx_queue_stop(dev, i);
510 tx_err:
511         for (i = 0; i < nb_txq; i++)
512                 ice_dcf_tx_queue_stop(dev, i);
513
514         return -1;
515 }
516
517 static int
518 ice_dcf_dev_start(struct rte_eth_dev *dev)
519 {
520         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
521         struct rte_intr_handle *intr_handle = dev->intr_handle;
522         struct ice_adapter *ad = &dcf_ad->parent;
523         struct ice_dcf_hw *hw = &dcf_ad->real_hw;
524         int ret;
525
526         if (hw->resetting) {
527                 PMD_DRV_LOG(ERR,
528                             "The DCF has been reset by PF, please reinit first");
529                 return -EIO;
530         }
531
532         ad->pf.adapter_stopped = 0;
533
534         hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
535                                       dev->data->nb_tx_queues);
536
537         ret = ice_dcf_init_rx_queues(dev);
538         if (ret) {
539                 PMD_DRV_LOG(ERR, "Fail to init queues");
540                 return ret;
541         }
542
543         if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
544                 ret = ice_dcf_init_rss(hw);
545                 if (ret) {
546                         PMD_DRV_LOG(ERR, "Failed to configure RSS");
547                         return ret;
548                 }
549         }
550
551         ret = ice_dcf_configure_queues(hw);
552         if (ret) {
553                 PMD_DRV_LOG(ERR, "Fail to config queues");
554                 return ret;
555         }
556
557         ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
558         if (ret) {
559                 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
560                 return ret;
561         }
562
563         if (dev->data->dev_conf.intr_conf.rxq != 0) {
564                 rte_intr_disable(intr_handle);
565                 rte_intr_enable(intr_handle);
566         }
567
568         ret = ice_dcf_start_queues(dev);
569         if (ret) {
570                 PMD_DRV_LOG(ERR, "Failed to enable queues");
571                 return ret;
572         }
573
574         ret = ice_dcf_add_del_all_mac_addr(hw, true);
575         if (ret) {
576                 PMD_DRV_LOG(ERR, "Failed to add mac addr");
577                 return ret;
578         }
579
580         dev->data->dev_link.link_status = ETH_LINK_UP;
581
582         return 0;
583 }
584
585 static void
586 ice_dcf_stop_queues(struct rte_eth_dev *dev)
587 {
588         struct ice_dcf_adapter *ad = dev->data->dev_private;
589         struct ice_dcf_hw *hw = &ad->real_hw;
590         struct ice_rx_queue *rxq;
591         struct ice_tx_queue *txq;
592         int ret, i;
593
594         /* Stop All queues */
595         ret = ice_dcf_disable_queues(hw);
596         if (ret)
597                 PMD_DRV_LOG(WARNING, "Fail to stop queues");
598
599         for (i = 0; i < dev->data->nb_tx_queues; i++) {
600                 txq = dev->data->tx_queues[i];
601                 if (!txq)
602                         continue;
603                 txq->tx_rel_mbufs(txq);
604                 reset_tx_queue(txq);
605                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
606                 dev->data->tx_queues[i] = NULL;
607         }
608         for (i = 0; i < dev->data->nb_rx_queues; i++) {
609                 rxq = dev->data->rx_queues[i];
610                 if (!rxq)
611                         continue;
612                 rxq->rx_rel_mbufs(rxq);
613                 reset_rx_queue(rxq);
614                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
615                 dev->data->rx_queues[i] = NULL;
616         }
617 }
618
619 static int
620 ice_dcf_dev_stop(struct rte_eth_dev *dev)
621 {
622         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
623         struct rte_intr_handle *intr_handle = dev->intr_handle;
624         struct ice_adapter *ad = &dcf_ad->parent;
625
626         if (ad->pf.adapter_stopped == 1) {
627                 PMD_DRV_LOG(DEBUG, "Port is already stopped");
628                 return 0;
629         }
630
631         /* Stop the VF representors for this device */
632         ice_dcf_vf_repr_stop_all(dcf_ad);
633
634         ice_dcf_stop_queues(dev);
635
636         rte_intr_efd_disable(intr_handle);
637         if (intr_handle->intr_vec) {
638                 rte_free(intr_handle->intr_vec);
639                 intr_handle->intr_vec = NULL;
640         }
641
642         ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
643         dev->data->dev_link.link_status = ETH_LINK_DOWN;
644         ad->pf.adapter_stopped = 1;
645
646         return 0;
647 }
648
649 static int
650 ice_dcf_dev_configure(struct rte_eth_dev *dev)
651 {
652         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
653         struct ice_adapter *ad = &dcf_ad->parent;
654
655         ad->rx_bulk_alloc_allowed = true;
656         ad->tx_simple_allowed = true;
657
658         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
659                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
660
661         return 0;
662 }
663
664 static int
665 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
666                      struct rte_eth_dev_info *dev_info)
667 {
668         struct ice_dcf_adapter *adapter = dev->data->dev_private;
669         struct ice_dcf_hw *hw = &adapter->real_hw;
670
671         dev_info->max_mac_addrs = 1;
672         dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
673         dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
674         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
675         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
676         dev_info->hash_key_size = hw->vf_res->rss_key_size;
677         dev_info->reta_size = hw->vf_res->rss_lut_size;
678         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
679
680         dev_info->rx_offload_capa =
681                 DEV_RX_OFFLOAD_VLAN_STRIP |
682                 DEV_RX_OFFLOAD_IPV4_CKSUM |
683                 DEV_RX_OFFLOAD_UDP_CKSUM |
684                 DEV_RX_OFFLOAD_TCP_CKSUM |
685                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
686                 DEV_RX_OFFLOAD_SCATTER |
687                 DEV_RX_OFFLOAD_JUMBO_FRAME |
688                 DEV_RX_OFFLOAD_VLAN_FILTER |
689                 DEV_RX_OFFLOAD_RSS_HASH;
690         dev_info->tx_offload_capa =
691                 DEV_TX_OFFLOAD_VLAN_INSERT |
692                 DEV_TX_OFFLOAD_IPV4_CKSUM |
693                 DEV_TX_OFFLOAD_UDP_CKSUM |
694                 DEV_TX_OFFLOAD_TCP_CKSUM |
695                 DEV_TX_OFFLOAD_SCTP_CKSUM |
696                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
697                 DEV_TX_OFFLOAD_TCP_TSO |
698                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
699                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
700                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
701                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
702                 DEV_TX_OFFLOAD_MULTI_SEGS;
703
704         dev_info->default_rxconf = (struct rte_eth_rxconf) {
705                 .rx_thresh = {
706                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
707                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
708                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
709                 },
710                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
711                 .rx_drop_en = 0,
712                 .offloads = 0,
713         };
714
715         dev_info->default_txconf = (struct rte_eth_txconf) {
716                 .tx_thresh = {
717                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
718                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
719                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
720                 },
721                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
722                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
723                 .offloads = 0,
724         };
725
726         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
727                 .nb_max = ICE_MAX_RING_DESC,
728                 .nb_min = ICE_MIN_RING_DESC,
729                 .nb_align = ICE_ALIGN_RING_DESC,
730         };
731
732         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
733                 .nb_max = ICE_MAX_RING_DESC,
734                 .nb_min = ICE_MIN_RING_DESC,
735                 .nb_align = ICE_ALIGN_RING_DESC,
736         };
737
738         return 0;
739 }
740
741 static int
742 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
743 {
744         return 0;
745 }
746
747 static int
748 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
749 {
750         return 0;
751 }
752
753 static int
754 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
755 {
756         return 0;
757 }
758
759 static int
760 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
761 {
762         return 0;
763 }
764
765 static int
766 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
767                          const struct rte_flow_ops **ops)
768 {
769         if (!dev)
770                 return -EINVAL;
771
772         *ops = &ice_flow_ops;
773         return 0;
774 }
775
776 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
777 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
778 #define ICE_DCF_48_BIT_MASK  RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
779
780 static void
781 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
782 {
783         if (*stat >= *offset)
784                 *stat = *stat - *offset;
785         else
786                 *stat = (uint64_t)((*stat +
787                         ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
788
789         *stat &= ICE_DCF_48_BIT_MASK;
790 }
791
792 static void
793 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
794 {
795         if (*stat >= *offset)
796                 *stat = (uint64_t)(*stat - *offset);
797         else
798                 *stat = (uint64_t)((*stat +
799                         ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
800 }
801
802 static void
803 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
804                      struct virtchnl_eth_stats *nes)
805 {
806         ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
807         ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
808         ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
809         ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
810         ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
811         ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
812         ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
813         ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
814         ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
815         ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
816         ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
817 }
818
819
820 static int
821 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
822 {
823         struct ice_dcf_adapter *ad = dev->data->dev_private;
824         struct ice_dcf_hw *hw = &ad->real_hw;
825         struct virtchnl_eth_stats pstats;
826         int ret;
827
828         if (hw->resetting) {
829                 PMD_DRV_LOG(ERR,
830                             "The DCF has been reset by PF, please reinit first");
831                 return -EIO;
832         }
833
834         ret = ice_dcf_query_stats(hw, &pstats);
835         if (ret == 0) {
836                 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
837                 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
838                                 pstats.rx_broadcast - pstats.rx_discards;
839                 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
840                                                 pstats.tx_unicast;
841                 stats->imissed = pstats.rx_discards;
842                 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
843                 stats->ibytes = pstats.rx_bytes;
844                 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
845                 stats->obytes = pstats.tx_bytes;
846         } else {
847                 PMD_DRV_LOG(ERR, "Get statistics failed");
848         }
849         return ret;
850 }
851
852 static int
853 ice_dcf_stats_reset(struct rte_eth_dev *dev)
854 {
855         struct ice_dcf_adapter *ad = dev->data->dev_private;
856         struct ice_dcf_hw *hw = &ad->real_hw;
857         struct virtchnl_eth_stats pstats;
858         int ret;
859
860         if (hw->resetting)
861                 return 0;
862
863         /* read stat values to clear hardware registers */
864         ret = ice_dcf_query_stats(hw, &pstats);
865         if (ret != 0)
866                 return ret;
867
868         /* set stats offset base on current values */
869         hw->eth_stats_offset = pstats;
870
871         return 0;
872 }
873
874 static void
875 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
876 {
877         if (dcf_adapter->repr_infos) {
878                 rte_free(dcf_adapter->repr_infos);
879                 dcf_adapter->repr_infos = NULL;
880         }
881 }
882
883 static int
884 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
885 {
886         dcf_adapter->repr_infos =
887                         rte_calloc("ice_dcf_rep_info",
888                                    dcf_adapter->real_hw.num_vfs,
889                                    sizeof(dcf_adapter->repr_infos[0]), 0);
890         if (!dcf_adapter->repr_infos) {
891                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
892                 return -ENOMEM;
893         }
894
895         return 0;
896 }
897
898 static int
899 ice_dcf_dev_close(struct rte_eth_dev *dev)
900 {
901         struct ice_dcf_adapter *adapter = dev->data->dev_private;
902
903         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
904                 return 0;
905
906         (void)ice_dcf_dev_stop(dev);
907
908         ice_dcf_free_repr_info(adapter);
909         ice_dcf_uninit_parent_adapter(dev);
910         ice_dcf_uninit_hw(dev, &adapter->real_hw);
911
912         return 0;
913 }
914
915 int
916 ice_dcf_link_update(struct rte_eth_dev *dev,
917                     __rte_unused int wait_to_complete)
918 {
919         struct ice_dcf_adapter *ad = dev->data->dev_private;
920         struct ice_dcf_hw *hw = &ad->real_hw;
921         struct rte_eth_link new_link;
922
923         memset(&new_link, 0, sizeof(new_link));
924
925         /* Only read status info stored in VF, and the info is updated
926          * when receive LINK_CHANGE event from PF by virtchnl.
927          */
928         switch (hw->link_speed) {
929         case 10:
930                 new_link.link_speed = ETH_SPEED_NUM_10M;
931                 break;
932         case 100:
933                 new_link.link_speed = ETH_SPEED_NUM_100M;
934                 break;
935         case 1000:
936                 new_link.link_speed = ETH_SPEED_NUM_1G;
937                 break;
938         case 10000:
939                 new_link.link_speed = ETH_SPEED_NUM_10G;
940                 break;
941         case 20000:
942                 new_link.link_speed = ETH_SPEED_NUM_20G;
943                 break;
944         case 25000:
945                 new_link.link_speed = ETH_SPEED_NUM_25G;
946                 break;
947         case 40000:
948                 new_link.link_speed = ETH_SPEED_NUM_40G;
949                 break;
950         case 50000:
951                 new_link.link_speed = ETH_SPEED_NUM_50G;
952                 break;
953         case 100000:
954                 new_link.link_speed = ETH_SPEED_NUM_100G;
955                 break;
956         default:
957                 new_link.link_speed = ETH_SPEED_NUM_NONE;
958                 break;
959         }
960
961         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
962         new_link.link_status = hw->link_up ? ETH_LINK_UP :
963                                              ETH_LINK_DOWN;
964         new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
965                                 ETH_LINK_SPEED_FIXED);
966
967         return rte_eth_linkstatus_set(dev, &new_link);
968 }
969
970 /* Add UDP tunneling port */
971 static int
972 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
973                                 struct rte_eth_udp_tunnel *udp_tunnel)
974 {
975         struct ice_dcf_adapter *adapter = dev->data->dev_private;
976         struct ice_adapter *parent_adapter = &adapter->parent;
977         struct ice_hw *parent_hw = &parent_adapter->hw;
978         int ret = 0;
979
980         if (!udp_tunnel)
981                 return -EINVAL;
982
983         switch (udp_tunnel->prot_type) {
984         case RTE_TUNNEL_TYPE_VXLAN:
985                 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
986                                         udp_tunnel->udp_port);
987                 break;
988         case RTE_TUNNEL_TYPE_ECPRI:
989                 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
990                                         udp_tunnel->udp_port);
991                 break;
992         default:
993                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
994                 ret = -EINVAL;
995                 break;
996         }
997
998         return ret;
999 }
1000
1001 /* Delete UDP tunneling port */
1002 static int
1003 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1004                                 struct rte_eth_udp_tunnel *udp_tunnel)
1005 {
1006         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1007         struct ice_adapter *parent_adapter = &adapter->parent;
1008         struct ice_hw *parent_hw = &parent_adapter->hw;
1009         int ret = 0;
1010
1011         if (!udp_tunnel)
1012                 return -EINVAL;
1013
1014         switch (udp_tunnel->prot_type) {
1015         case RTE_TUNNEL_TYPE_VXLAN:
1016         case RTE_TUNNEL_TYPE_ECPRI:
1017                 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1018                 break;
1019         default:
1020                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1021                 ret = -EINVAL;
1022                 break;
1023         }
1024
1025         return ret;
1026 }
1027
1028 static int
1029 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1030                 void *arg)
1031 {
1032         if (!arg)
1033                 return -EINVAL;
1034
1035         *(const void **)arg = &ice_dcf_tm_ops;
1036
1037         return 0;
1038 }
1039
1040 static int
1041 ice_dcf_dev_reset(struct rte_eth_dev *dev)
1042 {
1043         int ret;
1044
1045         ret = ice_dcf_dev_uninit(dev);
1046         if (ret)
1047                 return ret;
1048
1049         ret = ice_dcf_dev_init(dev);
1050
1051         return ret;
1052 }
1053
1054 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1055         .dev_start               = ice_dcf_dev_start,
1056         .dev_stop                = ice_dcf_dev_stop,
1057         .dev_close               = ice_dcf_dev_close,
1058         .dev_reset               = ice_dcf_dev_reset,
1059         .dev_configure           = ice_dcf_dev_configure,
1060         .dev_infos_get           = ice_dcf_dev_info_get,
1061         .rx_queue_setup          = ice_rx_queue_setup,
1062         .tx_queue_setup          = ice_tx_queue_setup,
1063         .rx_queue_release        = ice_dev_rx_queue_release,
1064         .tx_queue_release        = ice_dev_tx_queue_release,
1065         .rx_queue_start          = ice_dcf_rx_queue_start,
1066         .tx_queue_start          = ice_dcf_tx_queue_start,
1067         .rx_queue_stop           = ice_dcf_rx_queue_stop,
1068         .tx_queue_stop           = ice_dcf_tx_queue_stop,
1069         .link_update             = ice_dcf_link_update,
1070         .stats_get               = ice_dcf_stats_get,
1071         .stats_reset             = ice_dcf_stats_reset,
1072         .promiscuous_enable      = ice_dcf_dev_promiscuous_enable,
1073         .promiscuous_disable     = ice_dcf_dev_promiscuous_disable,
1074         .allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
1075         .allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
1076         .flow_ops_get            = ice_dcf_dev_flow_ops_get,
1077         .udp_tunnel_port_add     = ice_dcf_dev_udp_tunnel_port_add,
1078         .udp_tunnel_port_del     = ice_dcf_dev_udp_tunnel_port_del,
1079         .tm_ops_get              = ice_dcf_tm_ops_get,
1080 };
1081
1082 static int
1083 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1084 {
1085         struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1086
1087         adapter->real_hw.resetting = false;
1088         eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1089         eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1090         eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1091
1092         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1093                 return 0;
1094
1095         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1096
1097         adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1098         if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1099                 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1100                 return -1;
1101         }
1102
1103         if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1104                 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1105                 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1106                 return -1;
1107         }
1108
1109         return 0;
1110 }
1111
1112 static int
1113 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1114 {
1115         ice_dcf_dev_close(eth_dev);
1116
1117         return 0;
1118 }
1119
1120 static int
1121 ice_dcf_cap_check_handler(__rte_unused const char *key,
1122                           const char *value, __rte_unused void *opaque)
1123 {
1124         if (strcmp(value, "dcf"))
1125                 return -1;
1126
1127         return 0;
1128 }
1129
1130 static int
1131 ice_dcf_cap_selected(struct rte_devargs *devargs)
1132 {
1133         struct rte_kvargs *kvlist;
1134         const char *key = "cap";
1135         int ret = 0;
1136
1137         if (devargs == NULL)
1138                 return 0;
1139
1140         kvlist = rte_kvargs_parse(devargs->args, NULL);
1141         if (kvlist == NULL)
1142                 return 0;
1143
1144         if (!rte_kvargs_count(kvlist, key))
1145                 goto exit;
1146
1147         /* dcf capability selected when there's a key-value pair: cap=dcf */
1148         if (rte_kvargs_process(kvlist, key,
1149                                ice_dcf_cap_check_handler, NULL) < 0)
1150                 goto exit;
1151
1152         ret = 1;
1153
1154 exit:
1155         rte_kvargs_free(kvlist);
1156         return ret;
1157 }
1158
1159 static int
1160 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1161                       struct rte_pci_device *pci_dev)
1162 {
1163         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1164         struct ice_dcf_vf_repr_param repr_param;
1165         char repr_name[RTE_ETH_NAME_MAX_LEN];
1166         struct ice_dcf_adapter *dcf_adapter;
1167         struct rte_eth_dev *dcf_ethdev;
1168         uint16_t dcf_vsi_id;
1169         int i, ret;
1170
1171         if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1172                 return 1;
1173
1174         ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da);
1175         if (ret)
1176                 return ret;
1177
1178         ret = rte_eth_dev_pci_generic_probe(pci_dev,
1179                                             sizeof(struct ice_dcf_adapter),
1180                                             ice_dcf_dev_init);
1181         if (ret || !eth_da.nb_representor_ports)
1182                 return ret;
1183         if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1184                 return -ENOTSUP;
1185
1186         dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1187         if (dcf_ethdev == NULL)
1188                 return -ENODEV;
1189
1190         dcf_adapter = dcf_ethdev->data->dev_private;
1191         ret = ice_dcf_init_repr_info(dcf_adapter);
1192         if (ret)
1193                 return ret;
1194
1195         if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1196             eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1197                 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1198                             eth_da.nb_representor_ports);
1199                 ice_dcf_free_repr_info(dcf_adapter);
1200                 return -EINVAL;
1201         }
1202
1203         dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1204
1205         repr_param.dcf_eth_dev = dcf_ethdev;
1206         repr_param.switch_domain_id = 0;
1207
1208         for (i = 0; i < eth_da.nb_representor_ports; i++) {
1209                 uint16_t vf_id = eth_da.representor_ports[i];
1210                 struct rte_eth_dev *vf_rep_eth_dev;
1211
1212                 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
1213                         PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
1214                                     vf_id, dcf_adapter->real_hw.num_vfs - 1);
1215                         ret = -EINVAL;
1216                         break;
1217                 }
1218
1219                 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
1220                         PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
1221                         ret = -EINVAL;
1222                         break;
1223                 }
1224
1225                 repr_param.vf_id = vf_id;
1226                 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
1227                          pci_dev->device.name, vf_id);
1228                 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
1229                                          sizeof(struct ice_dcf_vf_repr),
1230                                          NULL, NULL, ice_dcf_vf_repr_init,
1231                                          &repr_param);
1232                 if (ret) {
1233                         PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
1234                                     repr_name);
1235                         break;
1236                 }
1237
1238                 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
1239                 if (!vf_rep_eth_dev) {
1240                         PMD_DRV_LOG(ERR,
1241                                     "Failed to find the ethdev for DCF VF representor: %s",
1242                                     repr_name);
1243                         ret = -ENODEV;
1244                         break;
1245                 }
1246
1247                 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
1248                 dcf_adapter->num_reprs++;
1249         }
1250
1251         return ret;
1252 }
1253
1254 static int
1255 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
1256 {
1257         struct rte_eth_dev *eth_dev;
1258
1259         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1260         if (!eth_dev)
1261                 return 0;
1262
1263         if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1264                 return rte_eth_dev_pci_generic_remove(pci_dev,
1265                                                       ice_dcf_vf_repr_uninit);
1266         else
1267                 return rte_eth_dev_pci_generic_remove(pci_dev,
1268                                                       ice_dcf_dev_uninit);
1269 }
1270
1271 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
1272         { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
1273         { .vendor_id = 0, /* sentinel */ },
1274 };
1275
1276 static struct rte_pci_driver rte_ice_dcf_pmd = {
1277         .id_table = pci_id_ice_dcf_map,
1278         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1279         .probe = eth_ice_dcf_pci_probe,
1280         .remove = eth_ice_dcf_pci_remove,
1281 };
1282
1283 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1284 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1285 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1286 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");