net/ice/base: fix build with GCC 12
[dpdk.git] / drivers / net / ice / ice_dcf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <sys/queue.h>
8 #include <sys/types.h>
9 #include <unistd.h>
10
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
13 #include <rte_pci.h>
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_ether.h>
17 #include <ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <rte_dev.h>
22
23 #include <iavf_devids.h>
24
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27 #include "ice_rxtx.h"
28
29 #define DCF_NUM_MACADDR_MAX      64
30
31 static int dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
32                                                 struct rte_ether_addr *mc_addrs,
33                                                 uint32_t mc_addrs_num, bool add);
34
35 static int
36 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
37                                 struct rte_eth_udp_tunnel *udp_tunnel);
38 static int
39 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
40                                 struct rte_eth_udp_tunnel *udp_tunnel);
41
42 static int
43 ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
44
45 static int
46 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
47
48 struct rte_ice_dcf_xstats_name_off {
49         char name[RTE_ETH_XSTATS_NAME_SIZE];
50         unsigned int offset;
51 };
52
53 static const struct rte_ice_dcf_xstats_name_off rte_ice_dcf_stats_strings[] = {
54         {"rx_bytes", offsetof(struct ice_dcf_eth_stats, rx_bytes)},
55         {"rx_unicast_packets", offsetof(struct ice_dcf_eth_stats, rx_unicast)},
56         {"rx_multicast_packets", offsetof(struct ice_dcf_eth_stats, rx_multicast)},
57         {"rx_broadcast_packets", offsetof(struct ice_dcf_eth_stats, rx_broadcast)},
58         {"rx_dropped_packets", offsetof(struct ice_dcf_eth_stats, rx_discards)},
59         {"rx_unknown_protocol_packets", offsetof(struct ice_dcf_eth_stats,
60                 rx_unknown_protocol)},
61         {"tx_bytes", offsetof(struct ice_dcf_eth_stats, tx_bytes)},
62         {"tx_unicast_packets", offsetof(struct ice_dcf_eth_stats, tx_unicast)},
63         {"tx_multicast_packets", offsetof(struct ice_dcf_eth_stats, tx_multicast)},
64         {"tx_broadcast_packets", offsetof(struct ice_dcf_eth_stats, tx_broadcast)},
65         {"tx_dropped_packets", offsetof(struct ice_dcf_eth_stats, tx_discards)},
66         {"tx_error_packets", offsetof(struct ice_dcf_eth_stats, tx_errors)},
67 };
68
69 #define ICE_DCF_NB_XSTATS (sizeof(rte_ice_dcf_stats_strings) / \
70                 sizeof(rte_ice_dcf_stats_strings[0]))
71
72 static uint16_t
73 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
74                   __rte_unused struct rte_mbuf **bufs,
75                   __rte_unused uint16_t nb_pkts)
76 {
77         return 0;
78 }
79
80 static uint16_t
81 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
82                   __rte_unused struct rte_mbuf **bufs,
83                   __rte_unused uint16_t nb_pkts)
84 {
85         return 0;
86 }
87
88 static int
89 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
90 {
91         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
92         struct rte_eth_dev_data *dev_data = dev->data;
93         struct iavf_hw *hw = &dcf_ad->real_hw.avf;
94         uint16_t buf_size, max_pkt_len;
95
96         buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
97         rxq->rx_hdr_len = 0;
98         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
99         max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
100                               dev->data->mtu + ICE_ETH_OVERHEAD);
101
102         /* Check maximum packet length is set correctly.  */
103         if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
104             max_pkt_len > ICE_FRAME_SIZE_MAX) {
105                 PMD_DRV_LOG(ERR, "maximum packet length must be "
106                             "larger than %u and smaller than %u",
107                             (uint32_t)RTE_ETHER_MIN_LEN,
108                             (uint32_t)ICE_FRAME_SIZE_MAX);
109                 return -EINVAL;
110         }
111
112         rxq->max_pkt_len = max_pkt_len;
113         if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
114             (rxq->max_pkt_len + 2 * RTE_VLAN_HLEN) > buf_size) {
115                 dev_data->scattered_rx = 1;
116         }
117         rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
118         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
119         IAVF_WRITE_FLUSH(hw);
120
121         return 0;
122 }
123
124 static int
125 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
126 {
127         struct ice_rx_queue **rxq =
128                 (struct ice_rx_queue **)dev->data->rx_queues;
129         int i, ret;
130
131         for (i = 0; i < dev->data->nb_rx_queues; i++) {
132                 if (!rxq[i] || !rxq[i]->q_set)
133                         continue;
134                 ret = ice_dcf_init_rxq(dev, rxq[i]);
135                 if (ret)
136                         return ret;
137         }
138
139         ice_set_rx_function(dev);
140         ice_set_tx_function(dev);
141
142         return 0;
143 }
144
145 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
146 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
147
148 #define IAVF_ITR_INDEX_DEFAULT          0
149 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
150 #define IAVF_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
151
152 static inline uint16_t
153 iavf_calc_itr_interval(int16_t interval)
154 {
155         if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
156                 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
157
158         /* Convert to hardware count, as writing each 1 represents 2 us */
159         return interval / 2;
160 }
161
162 static int
163 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
164                                      struct rte_intr_handle *intr_handle)
165 {
166         struct ice_dcf_adapter *adapter = dev->data->dev_private;
167         struct ice_dcf_hw *hw = &adapter->real_hw;
168         uint16_t interval, i;
169         int vec;
170
171         if (rte_intr_cap_multiple(intr_handle) &&
172             dev->data->dev_conf.intr_conf.rxq) {
173                 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
174                         return -1;
175         }
176
177         if (rte_intr_dp_is_en(intr_handle)) {
178                 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
179                                                    dev->data->nb_rx_queues)) {
180                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
181                                     dev->data->nb_rx_queues);
182                         return -1;
183                 }
184         }
185
186         if (!dev->data->dev_conf.intr_conf.rxq ||
187             !rte_intr_dp_is_en(intr_handle)) {
188                 /* Rx interrupt disabled, Map interrupt only for writeback */
189                 hw->nb_msix = 1;
190                 if (hw->vf_res->vf_cap_flags &
191                     VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
192                         /* If WB_ON_ITR supports, enable it */
193                         hw->msix_base = IAVF_RX_VEC_START;
194                         /* Set the ITR for index zero, to 2us to make sure that
195                          * we leave time for aggregation to occur, but don't
196                          * increase latency dramatically.
197                          */
198                         IAVF_WRITE_REG(&hw->avf,
199                                        IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
200                                        (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
201                                        IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
202                                        (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
203                 } else {
204                         /* If no WB_ON_ITR offload flags, need to set
205                          * interrupt for descriptor write back.
206                          */
207                         hw->msix_base = IAVF_MISC_VEC_ID;
208
209                         /* set ITR to max */
210                         interval =
211                         iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
212                         IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
213                                        IAVF_VFINT_DYN_CTL01_INTENA_MASK |
214                                        (IAVF_ITR_INDEX_DEFAULT <<
215                                         IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
216                                        (interval <<
217                                         IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
218                 }
219                 IAVF_WRITE_FLUSH(&hw->avf);
220                 /* map all queues to the same interrupt */
221                 for (i = 0; i < dev->data->nb_rx_queues; i++)
222                         hw->rxq_map[hw->msix_base] |= 1 << i;
223         } else {
224                 if (!rte_intr_allow_others(intr_handle)) {
225                         hw->nb_msix = 1;
226                         hw->msix_base = IAVF_MISC_VEC_ID;
227                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
228                                 hw->rxq_map[hw->msix_base] |= 1 << i;
229                                 rte_intr_vec_list_index_set(intr_handle,
230                                                         i, IAVF_MISC_VEC_ID);
231                         }
232                         PMD_DRV_LOG(DEBUG,
233                                     "vector %u are mapping to all Rx queues",
234                                     hw->msix_base);
235                 } else {
236                         /* If Rx interrupt is required, and we can use
237                          * multi interrupts, then the vec is from 1
238                          */
239                         hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
240                                       rte_intr_nb_efd_get(intr_handle));
241                         hw->msix_base = IAVF_MISC_VEC_ID;
242                         vec = IAVF_MISC_VEC_ID;
243                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
244                                 hw->rxq_map[vec] |= 1 << i;
245                                 rte_intr_vec_list_index_set(intr_handle,
246                                                                    i, vec++);
247                                 if (vec >= hw->nb_msix)
248                                         vec = IAVF_RX_VEC_START;
249                         }
250                         PMD_DRV_LOG(DEBUG,
251                                     "%u vectors are mapping to %u Rx queues",
252                                     hw->nb_msix, dev->data->nb_rx_queues);
253                 }
254         }
255
256         if (ice_dcf_config_irq_map(hw)) {
257                 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
258                 return -1;
259         }
260         return 0;
261 }
262
263 static int
264 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
265 {
266         volatile union ice_rx_flex_desc *rxd;
267         struct rte_mbuf *mbuf = NULL;
268         uint64_t dma_addr;
269         uint16_t i;
270
271         for (i = 0; i < rxq->nb_rx_desc; i++) {
272                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
273                 if (unlikely(!mbuf)) {
274                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
275                         return -ENOMEM;
276                 }
277
278                 rte_mbuf_refcnt_set(mbuf, 1);
279                 mbuf->next = NULL;
280                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
281                 mbuf->nb_segs = 1;
282                 mbuf->port = rxq->port_id;
283
284                 dma_addr =
285                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
286
287                 rxd = &rxq->rx_ring[i];
288                 rxd->read.pkt_addr = dma_addr;
289                 rxd->read.hdr_addr = 0;
290 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
291                 rxd->read.rsvd1 = 0;
292                 rxd->read.rsvd2 = 0;
293 #endif
294
295                 rxq->sw_ring[i].mbuf = (void *)mbuf;
296         }
297
298         return 0;
299 }
300
301 static int
302 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
303 {
304         struct ice_dcf_adapter *ad = dev->data->dev_private;
305         struct iavf_hw *hw = &ad->real_hw.avf;
306         struct ice_rx_queue *rxq;
307         int err = 0;
308
309         if (rx_queue_id >= dev->data->nb_rx_queues)
310                 return -EINVAL;
311
312         rxq = dev->data->rx_queues[rx_queue_id];
313
314         err = alloc_rxq_mbufs(rxq);
315         if (err) {
316                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
317                 return err;
318         }
319
320         rte_wmb();
321
322         /* Init the RX tail register. */
323         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
324         IAVF_WRITE_FLUSH(hw);
325
326         /* Ready to switch the queue on */
327         err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
328         if (err) {
329                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
330                             rx_queue_id);
331                 return err;
332         }
333
334         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
335
336         return 0;
337 }
338
339 static inline void
340 reset_rx_queue(struct ice_rx_queue *rxq)
341 {
342         uint16_t len;
343         uint32_t i;
344
345         if (!rxq)
346                 return;
347
348         len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
349
350         for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
351                 ((volatile char *)rxq->rx_ring)[i] = 0;
352
353         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
354
355         for (i = 0; i < ICE_RX_MAX_BURST; i++)
356                 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
357
358         /* for rx bulk */
359         rxq->rx_nb_avail = 0;
360         rxq->rx_next_avail = 0;
361         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
362
363         rxq->rx_tail = 0;
364         rxq->nb_rx_hold = 0;
365         rxq->pkt_first_seg = NULL;
366         rxq->pkt_last_seg = NULL;
367 }
368
369 static inline void
370 reset_tx_queue(struct ice_tx_queue *txq)
371 {
372         struct ice_tx_entry *txe;
373         uint32_t i, size;
374         uint16_t prev;
375
376         if (!txq) {
377                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
378                 return;
379         }
380
381         txe = txq->sw_ring;
382         size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
383         for (i = 0; i < size; i++)
384                 ((volatile char *)txq->tx_ring)[i] = 0;
385
386         prev = (uint16_t)(txq->nb_tx_desc - 1);
387         for (i = 0; i < txq->nb_tx_desc; i++) {
388                 txq->tx_ring[i].cmd_type_offset_bsz =
389                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
390                 txe[i].mbuf =  NULL;
391                 txe[i].last_id = i;
392                 txe[prev].next_id = i;
393                 prev = i;
394         }
395
396         txq->tx_tail = 0;
397         txq->nb_tx_used = 0;
398
399         txq->last_desc_cleaned = txq->nb_tx_desc - 1;
400         txq->nb_tx_free = txq->nb_tx_desc - 1;
401
402         txq->tx_next_dd = txq->tx_rs_thresh - 1;
403         txq->tx_next_rs = txq->tx_rs_thresh - 1;
404 }
405
406 static int
407 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
408 {
409         struct ice_dcf_adapter *ad = dev->data->dev_private;
410         struct ice_dcf_hw *hw = &ad->real_hw;
411         struct ice_rx_queue *rxq;
412         int err;
413
414         if (rx_queue_id >= dev->data->nb_rx_queues)
415                 return -EINVAL;
416
417         err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
418         if (err) {
419                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
420                             rx_queue_id);
421                 return err;
422         }
423
424         rxq = dev->data->rx_queues[rx_queue_id];
425         rxq->rx_rel_mbufs(rxq);
426         reset_rx_queue(rxq);
427         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
428
429         return 0;
430 }
431
432 static int
433 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
434 {
435         struct ice_dcf_adapter *ad = dev->data->dev_private;
436         struct iavf_hw *hw = &ad->real_hw.avf;
437         struct ice_tx_queue *txq;
438         int err = 0;
439
440         if (tx_queue_id >= dev->data->nb_tx_queues)
441                 return -EINVAL;
442
443         txq = dev->data->tx_queues[tx_queue_id];
444
445         /* Init the RX tail register. */
446         txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
447         IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
448         IAVF_WRITE_FLUSH(hw);
449
450         /* Ready to switch the queue on */
451         err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
452
453         if (err) {
454                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
455                             tx_queue_id);
456                 return err;
457         }
458
459         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
460
461         return 0;
462 }
463
464 static int
465 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
466 {
467         struct ice_dcf_adapter *ad = dev->data->dev_private;
468         struct ice_dcf_hw *hw = &ad->real_hw;
469         struct ice_tx_queue *txq;
470         int err;
471
472         if (tx_queue_id >= dev->data->nb_tx_queues)
473                 return -EINVAL;
474
475         err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
476         if (err) {
477                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
478                             tx_queue_id);
479                 return err;
480         }
481
482         txq = dev->data->tx_queues[tx_queue_id];
483         txq->tx_rel_mbufs(txq);
484         reset_tx_queue(txq);
485         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
486
487         return 0;
488 }
489
490 static int
491 ice_dcf_start_queues(struct rte_eth_dev *dev)
492 {
493         struct ice_rx_queue *rxq;
494         struct ice_tx_queue *txq;
495         int nb_rxq = 0;
496         int nb_txq, i;
497
498         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
499                 txq = dev->data->tx_queues[nb_txq];
500                 if (txq->tx_deferred_start)
501                         continue;
502                 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
503                         PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
504                         goto tx_err;
505                 }
506         }
507
508         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
509                 rxq = dev->data->rx_queues[nb_rxq];
510                 if (rxq->rx_deferred_start)
511                         continue;
512                 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
513                         PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
514                         goto rx_err;
515                 }
516         }
517
518         return 0;
519
520         /* stop the started queues if failed to start all queues */
521 rx_err:
522         for (i = 0; i < nb_rxq; i++)
523                 ice_dcf_rx_queue_stop(dev, i);
524 tx_err:
525         for (i = 0; i < nb_txq; i++)
526                 ice_dcf_tx_queue_stop(dev, i);
527
528         return -1;
529 }
530
531 static int
532 ice_dcf_dev_start(struct rte_eth_dev *dev)
533 {
534         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
535         struct rte_intr_handle *intr_handle = dev->intr_handle;
536         struct ice_adapter *ad = &dcf_ad->parent;
537         struct ice_dcf_hw *hw = &dcf_ad->real_hw;
538         int ret;
539
540         if (hw->resetting) {
541                 PMD_DRV_LOG(ERR,
542                             "The DCF has been reset by PF, please reinit first");
543                 return -EIO;
544         }
545
546         if (hw->tm_conf.root && !hw->tm_conf.committed) {
547                 PMD_DRV_LOG(ERR,
548                         "please call hierarchy_commit() before starting the port");
549                 return -EIO;
550         }
551
552         ad->pf.adapter_stopped = 0;
553
554         hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
555                                       dev->data->nb_tx_queues);
556
557         ret = ice_dcf_init_rx_queues(dev);
558         if (ret) {
559                 PMD_DRV_LOG(ERR, "Fail to init queues");
560                 return ret;
561         }
562
563         if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
564                 ret = ice_dcf_init_rss(hw);
565                 if (ret) {
566                         PMD_DRV_LOG(ERR, "Failed to configure RSS");
567                         return ret;
568                 }
569         }
570
571         ret = ice_dcf_configure_queues(hw);
572         if (ret) {
573                 PMD_DRV_LOG(ERR, "Fail to config queues");
574                 return ret;
575         }
576
577         ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
578         if (ret) {
579                 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
580                 return ret;
581         }
582
583         if (dev->data->dev_conf.intr_conf.rxq != 0) {
584                 rte_intr_disable(intr_handle);
585                 rte_intr_enable(intr_handle);
586         }
587
588         ret = ice_dcf_start_queues(dev);
589         if (ret) {
590                 PMD_DRV_LOG(ERR, "Failed to enable queues");
591                 return ret;
592         }
593
594         ret = ice_dcf_add_del_all_mac_addr(hw, hw->eth_dev->data->mac_addrs,
595                                            true, VIRTCHNL_ETHER_ADDR_PRIMARY);
596         if (ret) {
597                 PMD_DRV_LOG(ERR, "Failed to add mac addr");
598                 return ret;
599         }
600
601         if (dcf_ad->mc_addrs_num) {
602                 /* flush previous addresses */
603                 ret = dcf_add_del_mc_addr_list(hw, dcf_ad->mc_addrs,
604                                                 dcf_ad->mc_addrs_num, true);
605                 if (ret)
606                         return ret;
607         }
608
609
610         dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
611
612         return 0;
613 }
614
615 static void
616 ice_dcf_stop_queues(struct rte_eth_dev *dev)
617 {
618         struct ice_dcf_adapter *ad = dev->data->dev_private;
619         struct ice_dcf_hw *hw = &ad->real_hw;
620         struct ice_rx_queue *rxq;
621         struct ice_tx_queue *txq;
622         int ret, i;
623
624         /* Stop All queues */
625         ret = ice_dcf_disable_queues(hw);
626         if (ret)
627                 PMD_DRV_LOG(WARNING, "Fail to stop queues");
628
629         for (i = 0; i < dev->data->nb_tx_queues; i++) {
630                 txq = dev->data->tx_queues[i];
631                 if (!txq)
632                         continue;
633                 txq->tx_rel_mbufs(txq);
634                 reset_tx_queue(txq);
635                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
636         }
637         for (i = 0; i < dev->data->nb_rx_queues; i++) {
638                 rxq = dev->data->rx_queues[i];
639                 if (!rxq)
640                         continue;
641                 rxq->rx_rel_mbufs(rxq);
642                 reset_rx_queue(rxq);
643                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
644         }
645 }
646
647 static int
648 ice_dcf_dev_stop(struct rte_eth_dev *dev)
649 {
650         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
651         struct rte_intr_handle *intr_handle = dev->intr_handle;
652         struct ice_adapter *ad = &dcf_ad->parent;
653         struct ice_dcf_hw *hw = &dcf_ad->real_hw;
654
655         if (ad->pf.adapter_stopped == 1) {
656                 PMD_DRV_LOG(DEBUG, "Port is already stopped");
657                 return 0;
658         }
659
660         /* Stop the VF representors for this device */
661         ice_dcf_vf_repr_stop_all(dcf_ad);
662
663         ice_dcf_stop_queues(dev);
664
665         rte_intr_efd_disable(intr_handle);
666         rte_intr_vec_list_free(intr_handle);
667
668         ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw,
669                                      dcf_ad->real_hw.eth_dev->data->mac_addrs,
670                                      false, VIRTCHNL_ETHER_ADDR_PRIMARY);
671
672         if (dcf_ad->mc_addrs_num)
673                 /* flush previous addresses */
674                 (void)dcf_add_del_mc_addr_list(&dcf_ad->real_hw,
675                                                                                 dcf_ad->mc_addrs,
676                                                         dcf_ad->mc_addrs_num, false);
677
678         dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
679         ad->pf.adapter_stopped = 1;
680         hw->tm_conf.committed = false;
681
682         return 0;
683 }
684
685 static int
686 ice_dcf_dev_configure(struct rte_eth_dev *dev)
687 {
688         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
689         struct ice_adapter *ad = &dcf_ad->parent;
690
691         ad->rx_bulk_alloc_allowed = true;
692         ad->tx_simple_allowed = true;
693
694         if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
695                 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
696
697         return 0;
698 }
699
700 static int
701 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
702                      struct rte_eth_dev_info *dev_info)
703 {
704         struct ice_dcf_adapter *adapter = dev->data->dev_private;
705         struct ice_dcf_hw *hw = &adapter->real_hw;
706
707         dev_info->max_mac_addrs = DCF_NUM_MACADDR_MAX;
708         dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
709         dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
710         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
711         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
712         dev_info->hash_key_size = hw->vf_res->rss_key_size;
713         dev_info->reta_size = hw->vf_res->rss_lut_size;
714         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
715         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
716         dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
717         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
718
719         dev_info->rx_offload_capa =
720                 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
721                 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
722                 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
723                 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
724                 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
725                 RTE_ETH_RX_OFFLOAD_SCATTER |
726                 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
727                 RTE_ETH_RX_OFFLOAD_RSS_HASH;
728         dev_info->tx_offload_capa =
729                 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
730                 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
731                 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
732                 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
733                 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
734                 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
735                 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
736                 RTE_ETH_TX_OFFLOAD_TCP_TSO |
737                 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
738                 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
739                 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
740                 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
741                 RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
742
743         dev_info->default_rxconf = (struct rte_eth_rxconf) {
744                 .rx_thresh = {
745                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
746                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
747                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
748                 },
749                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
750                 .rx_drop_en = 0,
751                 .offloads = 0,
752         };
753
754         dev_info->default_txconf = (struct rte_eth_txconf) {
755                 .tx_thresh = {
756                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
757                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
758                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
759                 },
760                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
761                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
762                 .offloads = 0,
763         };
764
765         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
766                 .nb_max = ICE_MAX_RING_DESC,
767                 .nb_min = ICE_MIN_RING_DESC,
768                 .nb_align = ICE_ALIGN_RING_DESC,
769         };
770
771         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
772                 .nb_max = ICE_MAX_RING_DESC,
773                 .nb_min = ICE_MIN_RING_DESC,
774                 .nb_align = ICE_ALIGN_RING_DESC,
775         };
776
777         return 0;
778 }
779
780 static int
781 dcf_config_promisc(struct ice_dcf_adapter *adapter,
782                    bool enable_unicast,
783                    bool enable_multicast)
784 {
785         struct ice_dcf_hw *hw = &adapter->real_hw;
786         struct virtchnl_promisc_info promisc;
787         struct dcf_virtchnl_cmd args;
788         int err;
789
790         promisc.flags = 0;
791         promisc.vsi_id = hw->vsi_res->vsi_id;
792
793         if (enable_unicast)
794                 promisc.flags |= FLAG_VF_UNICAST_PROMISC;
795
796         if (enable_multicast)
797                 promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
798
799         memset(&args, 0, sizeof(args));
800         args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
801         args.req_msg = (uint8_t *)&promisc;
802         args.req_msglen = sizeof(promisc);
803
804         err = ice_dcf_execute_virtchnl_cmd(hw, &args);
805         if (err) {
806                 PMD_DRV_LOG(ERR,
807                             "fail to execute command VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE");
808                 return err;
809         }
810
811         adapter->promisc_unicast_enabled = enable_unicast;
812         adapter->promisc_multicast_enabled = enable_multicast;
813         return 0;
814 }
815
816 static int
817 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
818 {
819         struct ice_dcf_adapter *adapter = dev->data->dev_private;
820
821         if (adapter->promisc_unicast_enabled) {
822                 PMD_DRV_LOG(INFO, "promiscuous has been enabled");
823                 return 0;
824         }
825
826         return dcf_config_promisc(adapter, true,
827                                   adapter->promisc_multicast_enabled);
828 }
829
830 static int
831 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
832 {
833         struct ice_dcf_adapter *adapter = dev->data->dev_private;
834
835         if (!adapter->promisc_unicast_enabled) {
836                 PMD_DRV_LOG(INFO, "promiscuous has been disabled");
837                 return 0;
838         }
839
840         return dcf_config_promisc(adapter, false,
841                                   adapter->promisc_multicast_enabled);
842 }
843
844 static int
845 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
846 {
847         struct ice_dcf_adapter *adapter = dev->data->dev_private;
848
849         if (adapter->promisc_multicast_enabled) {
850                 PMD_DRV_LOG(INFO, "allmulticast has been enabled");
851                 return 0;
852         }
853
854         return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
855                                   true);
856 }
857
858 static int
859 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
860 {
861         struct ice_dcf_adapter *adapter = dev->data->dev_private;
862
863         if (!adapter->promisc_multicast_enabled) {
864                 PMD_DRV_LOG(INFO, "allmulticast has been disabled");
865                 return 0;
866         }
867
868         return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
869                                   false);
870 }
871
872 static int
873 dcf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
874                      __rte_unused uint32_t index,
875                      __rte_unused uint32_t pool)
876 {
877         struct ice_dcf_adapter *adapter = dev->data->dev_private;
878         int err;
879
880         if (rte_is_zero_ether_addr(addr)) {
881                 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
882                 return -EINVAL;
883         }
884
885         err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, true,
886                                            VIRTCHNL_ETHER_ADDR_EXTRA);
887         if (err) {
888                 PMD_DRV_LOG(ERR, "fail to add MAC address");
889                 return err;
890         }
891
892         return 0;
893 }
894
895 static void
896 dcf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
897 {
898         struct ice_dcf_adapter *adapter = dev->data->dev_private;
899         struct rte_ether_addr *addr = &dev->data->mac_addrs[index];
900         int err;
901
902         err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, false,
903                                            VIRTCHNL_ETHER_ADDR_EXTRA);
904         if (err)
905                 PMD_DRV_LOG(ERR, "fail to remove MAC address");
906 }
907
908 static int
909 dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
910                          struct rte_ether_addr *mc_addrs,
911                          uint32_t mc_addrs_num, bool add)
912 {
913         struct virtchnl_ether_addr_list *list;
914         struct dcf_virtchnl_cmd args;
915         uint32_t i;
916         int len, err = 0;
917
918         len = sizeof(struct virtchnl_ether_addr_list);
919         len += sizeof(struct virtchnl_ether_addr) * mc_addrs_num;
920
921         list = rte_zmalloc(NULL, len, 0);
922         if (!list) {
923                 PMD_DRV_LOG(ERR, "fail to allocate memory");
924                 return -ENOMEM;
925         }
926
927         for (i = 0; i < mc_addrs_num; i++) {
928                 memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
929                        sizeof(list->list[i].addr));
930                 list->list[i].type = VIRTCHNL_ETHER_ADDR_EXTRA;
931         }
932
933         list->vsi_id = hw->vsi_res->vsi_id;
934         list->num_elements = mc_addrs_num;
935
936         memset(&args, 0, sizeof(args));
937         args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
938                         VIRTCHNL_OP_DEL_ETH_ADDR;
939         args.req_msg = (uint8_t *)list;
940         args.req_msglen  = len;
941         err = ice_dcf_execute_virtchnl_cmd(hw, &args);
942         if (err)
943                 PMD_DRV_LOG(ERR, "fail to execute command %s",
944                             add ? "OP_ADD_ETHER_ADDRESS" :
945                             "OP_DEL_ETHER_ADDRESS");
946         rte_free(list);
947         return err;
948 }
949
950 static int
951 dcf_set_mc_addr_list(struct rte_eth_dev *dev,
952                      struct rte_ether_addr *mc_addrs,
953                      uint32_t mc_addrs_num)
954 {
955         struct ice_dcf_adapter *adapter = dev->data->dev_private;
956         struct ice_dcf_hw *hw = &adapter->real_hw;
957         uint32_t i;
958         int ret;
959
960
961         if (mc_addrs_num > DCF_NUM_MACADDR_MAX) {
962                 PMD_DRV_LOG(ERR,
963                             "can't add more than a limited number (%u) of addresses.",
964                             (uint32_t)DCF_NUM_MACADDR_MAX);
965                 return -EINVAL;
966         }
967
968         for (i = 0; i < mc_addrs_num; i++) {
969                 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
970                         const uint8_t *mac = mc_addrs[i].addr_bytes;
971
972                         PMD_DRV_LOG(ERR,
973                                     "Invalid mac: %02x:%02x:%02x:%02x:%02x:%02x",
974                                     mac[0], mac[1], mac[2], mac[3], mac[4],
975                                     mac[5]);
976                         return -EINVAL;
977                 }
978         }
979
980         if (adapter->mc_addrs_num) {
981                 /* flush previous addresses */
982                 ret = dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
983                                                         adapter->mc_addrs_num, false);
984                 if (ret)
985                         return ret;
986         }
987         if (!mc_addrs_num) {
988                 adapter->mc_addrs_num = 0;
989                 return 0;
990         }
991
992     /* add new ones */
993         ret = dcf_add_del_mc_addr_list(hw, mc_addrs, mc_addrs_num, true);
994         if (ret) {
995                 /* if adding mac address list fails, should add the
996                  * previous addresses back.
997                  */
998                 if (adapter->mc_addrs_num)
999                         (void)dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
1000                                                        adapter->mc_addrs_num,
1001                                                        true);
1002                 return ret;
1003         }
1004         adapter->mc_addrs_num = mc_addrs_num;
1005         memcpy(adapter->mc_addrs,
1006                     mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
1007
1008         return 0;
1009 }
1010
1011 static int
1012 dcf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1013                              struct rte_ether_addr *mac_addr)
1014 {
1015         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1016         struct ice_dcf_hw *hw = &adapter->real_hw;
1017         struct rte_ether_addr *old_addr;
1018         int ret;
1019
1020         old_addr = hw->eth_dev->data->mac_addrs;
1021         if (rte_is_same_ether_addr(old_addr, mac_addr))
1022                 return 0;
1023
1024         ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, old_addr, false,
1025                                            VIRTCHNL_ETHER_ADDR_PRIMARY);
1026         if (ret)
1027                 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1028                             " %02X:%02X:%02X:%02X:%02X:%02X",
1029                             old_addr->addr_bytes[0],
1030                             old_addr->addr_bytes[1],
1031                             old_addr->addr_bytes[2],
1032                             old_addr->addr_bytes[3],
1033                             old_addr->addr_bytes[4],
1034                             old_addr->addr_bytes[5]);
1035
1036         ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, mac_addr, true,
1037                                            VIRTCHNL_ETHER_ADDR_PRIMARY);
1038         if (ret)
1039                 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1040                             " %02X:%02X:%02X:%02X:%02X:%02X",
1041                             mac_addr->addr_bytes[0],
1042                             mac_addr->addr_bytes[1],
1043                             mac_addr->addr_bytes[2],
1044                             mac_addr->addr_bytes[3],
1045                             mac_addr->addr_bytes[4],
1046                             mac_addr->addr_bytes[5]);
1047
1048         if (ret)
1049                 return -EIO;
1050
1051         rte_ether_addr_copy(mac_addr, hw->eth_dev->data->mac_addrs);
1052         return 0;
1053 }
1054
1055 static int
1056 dcf_add_del_vlan_v2(struct ice_dcf_hw *hw, uint16_t vlanid, bool add)
1057 {
1058         struct virtchnl_vlan_supported_caps *supported_caps =
1059                         &hw->vlan_v2_caps.filtering.filtering_support;
1060         struct virtchnl_vlan *vlan_setting;
1061         struct virtchnl_vlan_filter_list_v2 vlan_filter;
1062         struct dcf_virtchnl_cmd args;
1063         uint32_t filtering_caps;
1064         int err;
1065
1066         if (supported_caps->outer) {
1067                 filtering_caps = supported_caps->outer;
1068                 vlan_setting = &vlan_filter.filters[0].outer;
1069         } else {
1070                 filtering_caps = supported_caps->inner;
1071                 vlan_setting = &vlan_filter.filters[0].inner;
1072         }
1073
1074         if (!(filtering_caps & VIRTCHNL_VLAN_ETHERTYPE_8100))
1075                 return -ENOTSUP;
1076
1077         memset(&vlan_filter, 0, sizeof(vlan_filter));
1078         vlan_filter.vport_id = hw->vsi_res->vsi_id;
1079         vlan_filter.num_elements = 1;
1080         vlan_setting->tpid = RTE_ETHER_TYPE_VLAN;
1081         vlan_setting->tci = vlanid;
1082
1083         memset(&args, 0, sizeof(args));
1084         args.v_op = add ? VIRTCHNL_OP_ADD_VLAN_V2 : VIRTCHNL_OP_DEL_VLAN_V2;
1085         args.req_msg = (uint8_t *)&vlan_filter;
1086         args.req_msglen = sizeof(vlan_filter);
1087         err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1088         if (err)
1089                 PMD_DRV_LOG(ERR, "fail to execute command %s",
1090                             add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
1091
1092         return err;
1093 }
1094
1095 static int
1096 dcf_add_del_vlan(struct ice_dcf_hw *hw, uint16_t vlanid, bool add)
1097 {
1098         struct virtchnl_vlan_filter_list *vlan_list;
1099         uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1100                                                         sizeof(uint16_t)];
1101         struct dcf_virtchnl_cmd args;
1102         int err;
1103
1104         vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1105         vlan_list->vsi_id = hw->vsi_res->vsi_id;
1106         vlan_list->num_elements = 1;
1107         vlan_list->vlan_id[0] = vlanid;
1108
1109         memset(&args, 0, sizeof(args));
1110         args.v_op = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN;
1111         args.req_msg = cmd_buffer;
1112         args.req_msglen = sizeof(cmd_buffer);
1113         err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1114         if (err)
1115                 PMD_DRV_LOG(ERR, "fail to execute command %s",
1116                             add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
1117
1118         return err;
1119 }
1120
1121 static int
1122 dcf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1123 {
1124         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1125         struct ice_dcf_hw *hw = &adapter->real_hw;
1126         int err;
1127
1128         if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
1129                 err = dcf_add_del_vlan_v2(hw, vlan_id, on);
1130                 if (err)
1131                         return -EIO;
1132                 return 0;
1133         }
1134
1135         if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1136                 return -ENOTSUP;
1137
1138         err = dcf_add_del_vlan(hw, vlan_id, on);
1139         if (err)
1140                 return -EIO;
1141         return 0;
1142 }
1143
1144 static void
1145 dcf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)
1146 {
1147         struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf;
1148         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1149         struct ice_dcf_hw *hw = &adapter->real_hw;
1150         uint32_t i, j;
1151         uint64_t ids;
1152
1153         for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1154                 if (vfc->ids[i] == 0)
1155                         continue;
1156
1157                 ids = vfc->ids[i];
1158                 for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) {
1159                         if (ids & 1)
1160                                 dcf_add_del_vlan_v2(hw, 64 * i + j, enable);
1161                 }
1162         }
1163 }
1164
1165 static int
1166 dcf_config_vlan_strip_v2(struct ice_dcf_hw *hw, bool enable)
1167 {
1168         struct virtchnl_vlan_supported_caps *stripping_caps =
1169                         &hw->vlan_v2_caps.offloads.stripping_support;
1170         struct virtchnl_vlan_setting vlan_strip;
1171         struct dcf_virtchnl_cmd args;
1172         uint32_t *ethertype;
1173         int ret;
1174
1175         if ((stripping_caps->outer & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
1176             (stripping_caps->outer & VIRTCHNL_VLAN_TOGGLE))
1177                 ethertype = &vlan_strip.outer_ethertype_setting;
1178         else if ((stripping_caps->inner & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
1179                  (stripping_caps->inner & VIRTCHNL_VLAN_TOGGLE))
1180                 ethertype = &vlan_strip.inner_ethertype_setting;
1181         else
1182                 return -ENOTSUP;
1183
1184         memset(&vlan_strip, 0, sizeof(vlan_strip));
1185         vlan_strip.vport_id = hw->vsi_res->vsi_id;
1186         *ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
1187
1188         memset(&args, 0, sizeof(args));
1189         args.v_op = enable ? VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 :
1190                             VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2;
1191         args.req_msg = (uint8_t *)&vlan_strip;
1192         args.req_msglen = sizeof(vlan_strip);
1193         ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1194         if (ret)
1195                 PMD_DRV_LOG(ERR, "fail to execute command %s",
1196                             enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
1197                                      "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2");
1198
1199         return ret;
1200 }
1201
1202 static int
1203 dcf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
1204 {
1205         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1206         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1207         struct ice_dcf_hw *hw = &adapter->real_hw;
1208         bool enable;
1209         int err;
1210
1211         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1212                 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
1213
1214                 dcf_iterate_vlan_filters_v2(dev, enable);
1215         }
1216
1217         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1218                 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1219
1220                 err = dcf_config_vlan_strip_v2(hw, enable);
1221                 /* If not support, the stripping is already disabled by PF */
1222                 if (err == -ENOTSUP && !enable)
1223                         err = 0;
1224                 if (err)
1225                         return -EIO;
1226         }
1227
1228         return 0;
1229 }
1230
1231 static int
1232 dcf_enable_vlan_strip(struct ice_dcf_hw *hw)
1233 {
1234         struct dcf_virtchnl_cmd args;
1235         int ret;
1236
1237         memset(&args, 0, sizeof(args));
1238         args.v_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1239         ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1240         if (ret)
1241                 PMD_DRV_LOG(ERR,
1242                             "Failed to execute command of OP_ENABLE_VLAN_STRIPPING");
1243
1244         return ret;
1245 }
1246
1247 static int
1248 dcf_disable_vlan_strip(struct ice_dcf_hw *hw)
1249 {
1250         struct dcf_virtchnl_cmd args;
1251         int ret;
1252
1253         memset(&args, 0, sizeof(args));
1254         args.v_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1255         ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1256         if (ret)
1257                 PMD_DRV_LOG(ERR,
1258                             "Failed to execute command of OP_DISABLE_VLAN_STRIPPING");
1259
1260         return ret;
1261 }
1262
1263 static int
1264 dcf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1265 {
1266         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1267         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1268         struct ice_dcf_hw *hw = &adapter->real_hw;
1269         int err;
1270
1271         if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1272                 return dcf_dev_vlan_offload_set_v2(dev, mask);
1273
1274         if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1275                 return -ENOTSUP;
1276
1277         /* Vlan stripping setting */
1278         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1279                 /* Enable or disable VLAN stripping */
1280                 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1281                         err = dcf_enable_vlan_strip(hw);
1282                 else
1283                         err = dcf_disable_vlan_strip(hw);
1284
1285                 if (err)
1286                         return -EIO;
1287         }
1288         return 0;
1289 }
1290
1291 static int
1292 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
1293                          const struct rte_flow_ops **ops)
1294 {
1295         if (!dev)
1296                 return -EINVAL;
1297
1298         *ops = &ice_flow_ops;
1299         return 0;
1300 }
1301
1302 static int
1303 ice_dcf_dev_rss_reta_update(struct rte_eth_dev *dev,
1304                         struct rte_eth_rss_reta_entry64 *reta_conf,
1305                         uint16_t reta_size)
1306 {
1307         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1308         struct ice_dcf_hw *hw = &adapter->real_hw;
1309         uint8_t *lut;
1310         uint16_t i, idx, shift;
1311         int ret;
1312
1313         if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1314                 return -ENOTSUP;
1315
1316         if (reta_size != hw->vf_res->rss_lut_size) {
1317                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1318                         "(%d) doesn't match the number of hardware can "
1319                         "support (%d)", reta_size, hw->vf_res->rss_lut_size);
1320                 return -EINVAL;
1321         }
1322
1323         lut = rte_zmalloc("rss_lut", reta_size, 0);
1324         if (!lut) {
1325                 PMD_DRV_LOG(ERR, "No memory can be allocated");
1326                 return -ENOMEM;
1327         }
1328         /* store the old lut table temporarily */
1329         rte_memcpy(lut, hw->rss_lut, reta_size);
1330
1331         for (i = 0; i < reta_size; i++) {
1332                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1333                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1334                 if (reta_conf[idx].mask & (1ULL << shift))
1335                         lut[i] = reta_conf[idx].reta[shift];
1336         }
1337
1338         rte_memcpy(hw->rss_lut, lut, reta_size);
1339         /* send virtchnnl ops to configure rss*/
1340         ret = ice_dcf_configure_rss_lut(hw);
1341         if (ret) /* revert back */
1342                 rte_memcpy(hw->rss_lut, lut, reta_size);
1343         rte_free(lut);
1344
1345         return ret;
1346 }
1347
1348 static int
1349 ice_dcf_dev_rss_reta_query(struct rte_eth_dev *dev,
1350                        struct rte_eth_rss_reta_entry64 *reta_conf,
1351                        uint16_t reta_size)
1352 {
1353         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1354         struct ice_dcf_hw *hw = &adapter->real_hw;
1355         uint16_t i, idx, shift;
1356
1357         if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1358                 return -ENOTSUP;
1359
1360         if (reta_size != hw->vf_res->rss_lut_size) {
1361                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1362                         "(%d) doesn't match the number of hardware can "
1363                         "support (%d)", reta_size, hw->vf_res->rss_lut_size);
1364                 return -EINVAL;
1365         }
1366
1367         for (i = 0; i < reta_size; i++) {
1368                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1369                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1370                 if (reta_conf[idx].mask & (1ULL << shift))
1371                         reta_conf[idx].reta[shift] = hw->rss_lut[i];
1372         }
1373
1374         return 0;
1375 }
1376
1377 static int
1378 ice_dcf_dev_rss_hash_update(struct rte_eth_dev *dev,
1379                         struct rte_eth_rss_conf *rss_conf)
1380 {
1381         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1382         struct ice_dcf_hw *hw = &adapter->real_hw;
1383
1384         if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1385                 return -ENOTSUP;
1386
1387         /* HENA setting, it is enabled by default, no change */
1388         if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
1389                 PMD_DRV_LOG(DEBUG, "No key to be configured");
1390                 return 0;
1391         } else if (rss_conf->rss_key_len != hw->vf_res->rss_key_size) {
1392                 PMD_DRV_LOG(ERR, "The size of hash key configured "
1393                         "(%d) doesn't match the size of hardware can "
1394                         "support (%d)", rss_conf->rss_key_len,
1395                         hw->vf_res->rss_key_size);
1396                 return -EINVAL;
1397         }
1398
1399         rte_memcpy(hw->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
1400
1401         return ice_dcf_configure_rss_key(hw);
1402 }
1403
1404 static int
1405 ice_dcf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1406                           struct rte_eth_rss_conf *rss_conf)
1407 {
1408         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1409         struct ice_dcf_hw *hw = &adapter->real_hw;
1410
1411         if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1412                 return -ENOTSUP;
1413
1414         /* Just set it to default value now. */
1415         rss_conf->rss_hf = ICE_RSS_OFFLOAD_ALL;
1416
1417         if (!rss_conf->rss_key)
1418                 return 0;
1419
1420         rss_conf->rss_key_len = hw->vf_res->rss_key_size;
1421         rte_memcpy(rss_conf->rss_key, hw->rss_key, rss_conf->rss_key_len);
1422
1423         return 0;
1424 }
1425
1426 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
1427 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
1428 #define ICE_DCF_48_BIT_MASK  RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
1429
1430 static void
1431 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
1432 {
1433         if (*stat >= *offset)
1434                 *stat = *stat - *offset;
1435         else
1436                 *stat = (uint64_t)((*stat +
1437                         ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
1438
1439         *stat &= ICE_DCF_48_BIT_MASK;
1440 }
1441
1442 static void
1443 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
1444 {
1445         if (*stat >= *offset)
1446                 *stat = (uint64_t)(*stat - *offset);
1447         else
1448                 *stat = (uint64_t)((*stat +
1449                         ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
1450 }
1451
1452 static void
1453 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
1454                      struct virtchnl_eth_stats *nes)
1455 {
1456         ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1457         ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1458         ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1459         ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1460         ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1461         ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1462         ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1463         ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1464         ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1465         ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1466         ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1467 }
1468
1469
1470 static int
1471 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1472 {
1473         struct ice_dcf_adapter *ad = dev->data->dev_private;
1474         struct ice_dcf_hw *hw = &ad->real_hw;
1475         struct virtchnl_eth_stats pstats;
1476         int ret;
1477
1478         if (hw->resetting) {
1479                 PMD_DRV_LOG(ERR,
1480                             "The DCF has been reset by PF, please reinit first");
1481                 return -EIO;
1482         }
1483
1484         ret = ice_dcf_query_stats(hw, &pstats);
1485         if (ret == 0) {
1486                 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
1487                 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
1488                                 pstats.rx_broadcast - pstats.rx_discards;
1489                 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
1490                                                 pstats.tx_unicast;
1491                 stats->imissed = pstats.rx_discards;
1492                 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
1493                 stats->ibytes = pstats.rx_bytes;
1494                 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
1495                 stats->obytes = pstats.tx_bytes;
1496         } else {
1497                 PMD_DRV_LOG(ERR, "Get statistics failed");
1498         }
1499         return ret;
1500 }
1501
1502 static int
1503 ice_dcf_stats_reset(struct rte_eth_dev *dev)
1504 {
1505         struct ice_dcf_adapter *ad = dev->data->dev_private;
1506         struct ice_dcf_hw *hw = &ad->real_hw;
1507         struct virtchnl_eth_stats pstats;
1508         int ret;
1509
1510         if (hw->resetting)
1511                 return 0;
1512
1513         /* read stat values to clear hardware registers */
1514         ret = ice_dcf_query_stats(hw, &pstats);
1515         if (ret != 0)
1516                 return ret;
1517
1518         /* set stats offset base on current values */
1519         hw->eth_stats_offset = pstats;
1520
1521         return 0;
1522 }
1523
1524 static int ice_dcf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1525                                       struct rte_eth_xstat_name *xstats_names,
1526                                       __rte_unused unsigned int limit)
1527 {
1528         unsigned int i;
1529
1530         if (xstats_names != NULL)
1531                 for (i = 0; i < ICE_DCF_NB_XSTATS; i++) {
1532                         snprintf(xstats_names[i].name,
1533                                 sizeof(xstats_names[i].name),
1534                                 "%s", rte_ice_dcf_stats_strings[i].name);
1535                 }
1536         return ICE_DCF_NB_XSTATS;
1537 }
1538
1539 static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
1540                                  struct rte_eth_xstat *xstats, unsigned int n)
1541 {
1542         int ret;
1543         unsigned int i;
1544         struct ice_dcf_adapter *adapter =
1545                 ICE_DCF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1546         struct ice_dcf_hw *hw = &adapter->real_hw;
1547         struct virtchnl_eth_stats *postats = &hw->eth_stats_offset;
1548         struct virtchnl_eth_stats pnstats;
1549
1550         if (n < ICE_DCF_NB_XSTATS)
1551                 return ICE_DCF_NB_XSTATS;
1552
1553         ret = ice_dcf_query_stats(hw, &pnstats);
1554         if (ret != 0)
1555                 return 0;
1556
1557         if (!xstats)
1558                 return 0;
1559
1560         ice_dcf_update_stats(postats, &pnstats);
1561
1562         /* loop over xstats array and values from pstats */
1563         for (i = 0; i < ICE_DCF_NB_XSTATS; i++) {
1564                 xstats[i].id = i;
1565                 xstats[i].value = *(uint64_t *)(((char *)&pnstats) +
1566                         rte_ice_dcf_stats_strings[i].offset);
1567         }
1568
1569         return ICE_DCF_NB_XSTATS;
1570 }
1571
1572 static void
1573 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
1574 {
1575         if (dcf_adapter->repr_infos) {
1576                 rte_free(dcf_adapter->repr_infos);
1577                 dcf_adapter->repr_infos = NULL;
1578         }
1579 }
1580
1581 static int
1582 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
1583 {
1584         dcf_adapter->repr_infos =
1585                         rte_calloc("ice_dcf_rep_info",
1586                                    dcf_adapter->real_hw.num_vfs,
1587                                    sizeof(dcf_adapter->repr_infos[0]), 0);
1588         if (!dcf_adapter->repr_infos) {
1589                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
1590                 return -ENOMEM;
1591         }
1592
1593         return 0;
1594 }
1595
1596 static int
1597 ice_dcf_dev_close(struct rte_eth_dev *dev)
1598 {
1599         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1600
1601         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1602                 return 0;
1603
1604         (void)ice_dcf_dev_stop(dev);
1605
1606         ice_free_queues(dev);
1607
1608         ice_dcf_free_repr_info(adapter);
1609         ice_dcf_uninit_parent_adapter(dev);
1610         ice_dcf_uninit_hw(dev, &adapter->real_hw);
1611
1612         return 0;
1613 }
1614
1615 int
1616 ice_dcf_link_update(struct rte_eth_dev *dev,
1617                     __rte_unused int wait_to_complete)
1618 {
1619         struct ice_dcf_adapter *ad = dev->data->dev_private;
1620         struct ice_dcf_hw *hw = &ad->real_hw;
1621         struct rte_eth_link new_link;
1622
1623         memset(&new_link, 0, sizeof(new_link));
1624
1625         /* Only read status info stored in VF, and the info is updated
1626          * when receive LINK_CHANGE event from PF by virtchnl.
1627          */
1628         switch (hw->link_speed) {
1629         case 10:
1630                 new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1631                 break;
1632         case 100:
1633                 new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1634                 break;
1635         case 1000:
1636                 new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1637                 break;
1638         case 10000:
1639                 new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1640                 break;
1641         case 20000:
1642                 new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
1643                 break;
1644         case 25000:
1645                 new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
1646                 break;
1647         case 40000:
1648                 new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
1649                 break;
1650         case 50000:
1651                 new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
1652                 break;
1653         case 100000:
1654                 new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
1655                 break;
1656         default:
1657                 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1658                 break;
1659         }
1660
1661         new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1662         new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
1663                                              RTE_ETH_LINK_DOWN;
1664         new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1665                                 RTE_ETH_LINK_SPEED_FIXED);
1666
1667         return rte_eth_linkstatus_set(dev, &new_link);
1668 }
1669
1670 static int
1671 ice_dcf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
1672 {
1673         /* mtu setting is forbidden if port is start */
1674         if (dev->data->dev_started != 0) {
1675                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1676                             dev->data->port_id);
1677                 return -EBUSY;
1678         }
1679
1680         return 0;
1681 }
1682
1683 bool
1684 ice_dcf_adminq_need_retry(struct ice_adapter *ad)
1685 {
1686         return ad->hw.dcf_enabled &&
1687                !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
1688 }
1689
1690 /* Add UDP tunneling port */
1691 static int
1692 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1693                                 struct rte_eth_udp_tunnel *udp_tunnel)
1694 {
1695         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1696         struct ice_adapter *parent_adapter = &adapter->parent;
1697         struct ice_hw *parent_hw = &parent_adapter->hw;
1698         int ret = 0;
1699
1700         if (!udp_tunnel)
1701                 return -EINVAL;
1702
1703         switch (udp_tunnel->prot_type) {
1704         case RTE_ETH_TUNNEL_TYPE_VXLAN:
1705                 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
1706                                         udp_tunnel->udp_port);
1707                 break;
1708         case RTE_ETH_TUNNEL_TYPE_ECPRI:
1709                 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
1710                                         udp_tunnel->udp_port);
1711                 break;
1712         default:
1713                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1714                 ret = -EINVAL;
1715                 break;
1716         }
1717
1718         return ret;
1719 }
1720
1721 /* Delete UDP tunneling port */
1722 static int
1723 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1724                                 struct rte_eth_udp_tunnel *udp_tunnel)
1725 {
1726         struct ice_dcf_adapter *adapter = dev->data->dev_private;
1727         struct ice_adapter *parent_adapter = &adapter->parent;
1728         struct ice_hw *parent_hw = &parent_adapter->hw;
1729         int ret = 0;
1730
1731         if (!udp_tunnel)
1732                 return -EINVAL;
1733
1734         switch (udp_tunnel->prot_type) {
1735         case RTE_ETH_TUNNEL_TYPE_VXLAN:
1736         case RTE_ETH_TUNNEL_TYPE_ECPRI:
1737                 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1738                 break;
1739         default:
1740                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1741                 ret = -EINVAL;
1742                 break;
1743         }
1744
1745         return ret;
1746 }
1747
1748 static int
1749 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1750                 void *arg)
1751 {
1752         if (!arg)
1753                 return -EINVAL;
1754
1755         *(const void **)arg = &ice_dcf_tm_ops;
1756
1757         return 0;
1758 }
1759
1760 static inline void
1761 ice_dcf_reset_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
1762 {
1763         ice_dcf_uninit_hw(eth_dev, hw);
1764         ice_dcf_init_hw(eth_dev, hw);
1765 }
1766
1767 /* Check if reset has been triggered by PF */
1768 static inline bool
1769 ice_dcf_is_reset(struct rte_eth_dev *dev)
1770 {
1771         struct ice_dcf_adapter *ad = dev->data->dev_private;
1772         struct iavf_hw *hw = &ad->real_hw.avf;
1773
1774         return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) &
1775                  IAVF_VF_ARQLEN1_ARQENABLE_MASK);
1776 }
1777
1778 static int
1779 ice_dcf_dev_reset(struct rte_eth_dev *dev)
1780 {
1781         struct ice_dcf_adapter *ad = dev->data->dev_private;
1782         struct ice_dcf_hw *hw = &ad->real_hw;
1783         int ret;
1784
1785         if (ice_dcf_is_reset(dev)) {
1786                 if (!ad->real_hw.resetting)
1787                         ad->real_hw.resetting = true;
1788                 PMD_DRV_LOG(ERR, "The DCF has been reset by PF");
1789
1790                 /*
1791                  * Simply reset hw to trigger an additional DCF enable/disable
1792                  * cycle which help to workaround the issue that kernel driver
1793                  * may not clean up resource during previous reset.
1794                  */
1795                 ice_dcf_reset_hw(dev, hw);
1796         }
1797
1798         ret = ice_dcf_dev_uninit(dev);
1799         if (ret)
1800                 return ret;
1801
1802         ret = ice_dcf_dev_init(dev);
1803
1804         return ret;
1805 }
1806
1807 static const uint32_t *
1808 ice_dcf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1809 {
1810         static const uint32_t ptypes[] = {
1811                 RTE_PTYPE_L2_ETHER,
1812                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1813                 RTE_PTYPE_L4_FRAG,
1814                 RTE_PTYPE_L4_ICMP,
1815                 RTE_PTYPE_L4_NONFRAG,
1816                 RTE_PTYPE_L4_SCTP,
1817                 RTE_PTYPE_L4_TCP,
1818                 RTE_PTYPE_L4_UDP,
1819                 RTE_PTYPE_UNKNOWN
1820         };
1821         return ptypes;
1822 }
1823
1824 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1825         .dev_start                = ice_dcf_dev_start,
1826         .dev_stop                 = ice_dcf_dev_stop,
1827         .dev_close                = ice_dcf_dev_close,
1828         .dev_reset                = ice_dcf_dev_reset,
1829         .dev_configure            = ice_dcf_dev_configure,
1830         .dev_infos_get            = ice_dcf_dev_info_get,
1831         .dev_supported_ptypes_get = ice_dcf_dev_supported_ptypes_get,
1832         .rx_queue_setup           = ice_rx_queue_setup,
1833         .tx_queue_setup           = ice_tx_queue_setup,
1834         .rx_queue_release         = ice_dev_rx_queue_release,
1835         .tx_queue_release         = ice_dev_tx_queue_release,
1836         .rx_queue_start           = ice_dcf_rx_queue_start,
1837         .tx_queue_start           = ice_dcf_tx_queue_start,
1838         .rx_queue_stop            = ice_dcf_rx_queue_stop,
1839         .tx_queue_stop            = ice_dcf_tx_queue_stop,
1840         .rxq_info_get             = ice_rxq_info_get,
1841         .txq_info_get             = ice_txq_info_get,
1842         .get_monitor_addr         = ice_get_monitor_addr,
1843         .link_update              = ice_dcf_link_update,
1844         .stats_get                = ice_dcf_stats_get,
1845         .stats_reset              = ice_dcf_stats_reset,
1846         .xstats_get               = ice_dcf_xstats_get,
1847         .xstats_get_names         = ice_dcf_xstats_get_names,
1848         .xstats_reset             = ice_dcf_stats_reset,
1849         .promiscuous_enable       = ice_dcf_dev_promiscuous_enable,
1850         .promiscuous_disable      = ice_dcf_dev_promiscuous_disable,
1851         .allmulticast_enable      = ice_dcf_dev_allmulticast_enable,
1852         .allmulticast_disable     = ice_dcf_dev_allmulticast_disable,
1853         .mac_addr_add             = dcf_dev_add_mac_addr,
1854         .mac_addr_remove          = dcf_dev_del_mac_addr,
1855         .set_mc_addr_list         = dcf_set_mc_addr_list,
1856         .mac_addr_set             = dcf_dev_set_default_mac_addr,
1857         .vlan_filter_set          = dcf_dev_vlan_filter_set,
1858         .vlan_offload_set         = dcf_dev_vlan_offload_set,
1859         .flow_ops_get             = ice_dcf_dev_flow_ops_get,
1860         .udp_tunnel_port_add      = ice_dcf_dev_udp_tunnel_port_add,
1861         .udp_tunnel_port_del      = ice_dcf_dev_udp_tunnel_port_del,
1862         .tm_ops_get               = ice_dcf_tm_ops_get,
1863         .reta_update              = ice_dcf_dev_rss_reta_update,
1864         .reta_query               = ice_dcf_dev_rss_reta_query,
1865         .rss_hash_update          = ice_dcf_dev_rss_hash_update,
1866         .rss_hash_conf_get        = ice_dcf_dev_rss_hash_conf_get,
1867         .tx_done_cleanup          = ice_tx_done_cleanup,
1868         .mtu_set                  = ice_dcf_dev_mtu_set,
1869 };
1870
1871 static int
1872 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1873 {
1874         struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1875         struct ice_adapter *parent_adapter = &adapter->parent;
1876
1877         eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1878         eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1879         eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1880
1881         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1882                 return 0;
1883
1884         adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1885         if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1886                 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1887                 __atomic_store_n(&parent_adapter->dcf_state_on, false,
1888                                  __ATOMIC_RELAXED);
1889                 return -1;
1890         }
1891
1892         __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
1893
1894         if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1895                 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1896                 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1897                 return -1;
1898         }
1899
1900         dcf_config_promisc(adapter, false, false);
1901         return 0;
1902 }
1903
1904 static int
1905 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1906 {
1907         ice_dcf_dev_close(eth_dev);
1908
1909         return 0;
1910 }
1911
1912 static int
1913 ice_dcf_cap_check_handler(__rte_unused const char *key,
1914                           const char *value, __rte_unused void *opaque)
1915 {
1916         if (strcmp(value, "dcf"))
1917                 return -1;
1918
1919         return 0;
1920 }
1921
1922 static int
1923 ice_dcf_cap_selected(struct rte_devargs *devargs)
1924 {
1925         struct rte_kvargs *kvlist;
1926         const char *key = "cap";
1927         int ret = 0;
1928
1929         if (devargs == NULL)
1930                 return 0;
1931
1932         kvlist = rte_kvargs_parse(devargs->args, NULL);
1933         if (kvlist == NULL)
1934                 return 0;
1935
1936         if (!rte_kvargs_count(kvlist, key))
1937                 goto exit;
1938
1939         /* dcf capability selected when there's a key-value pair: cap=dcf */
1940         if (rte_kvargs_process(kvlist, key,
1941                                ice_dcf_cap_check_handler, NULL) < 0)
1942                 goto exit;
1943
1944         ret = 1;
1945
1946 exit:
1947         rte_kvargs_free(kvlist);
1948         return ret;
1949 }
1950
1951 static int
1952 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1953                       struct rte_pci_device *pci_dev)
1954 {
1955         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1956         struct ice_dcf_vf_repr_param repr_param;
1957         char repr_name[RTE_ETH_NAME_MAX_LEN];
1958         struct ice_dcf_adapter *dcf_adapter;
1959         struct rte_eth_dev *dcf_ethdev;
1960         uint16_t dcf_vsi_id;
1961         int i, ret;
1962
1963         if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1964                 return 1;
1965
1966         ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da);
1967         if (ret)
1968                 return ret;
1969
1970         ret = rte_eth_dev_pci_generic_probe(pci_dev,
1971                                             sizeof(struct ice_dcf_adapter),
1972                                             ice_dcf_dev_init);
1973         if (ret || !eth_da.nb_representor_ports)
1974                 return ret;
1975         if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1976                 return -ENOTSUP;
1977
1978         dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1979         if (dcf_ethdev == NULL)
1980                 return -ENODEV;
1981
1982         dcf_adapter = dcf_ethdev->data->dev_private;
1983         ret = ice_dcf_init_repr_info(dcf_adapter);
1984         if (ret)
1985                 return ret;
1986
1987         if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1988             eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1989                 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1990                             eth_da.nb_representor_ports);
1991                 ice_dcf_free_repr_info(dcf_adapter);
1992                 return -EINVAL;
1993         }
1994
1995         dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1996
1997         repr_param.dcf_eth_dev = dcf_ethdev;
1998         repr_param.switch_domain_id = 0;
1999
2000         for (i = 0; i < eth_da.nb_representor_ports; i++) {
2001                 uint16_t vf_id = eth_da.representor_ports[i];
2002                 struct rte_eth_dev *vf_rep_eth_dev;
2003
2004                 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
2005                         PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
2006                                     vf_id, dcf_adapter->real_hw.num_vfs - 1);
2007                         ret = -EINVAL;
2008                         break;
2009                 }
2010
2011                 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
2012                         PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
2013                         ret = -EINVAL;
2014                         break;
2015                 }
2016
2017                 repr_param.vf_id = vf_id;
2018                 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
2019                          pci_dev->device.name, vf_id);
2020                 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
2021                                          sizeof(struct ice_dcf_vf_repr),
2022                                          NULL, NULL, ice_dcf_vf_repr_init,
2023                                          &repr_param);
2024                 if (ret) {
2025                         PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
2026                                     repr_name);
2027                         break;
2028                 }
2029
2030                 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
2031                 if (!vf_rep_eth_dev) {
2032                         PMD_DRV_LOG(ERR,
2033                                     "Failed to find the ethdev for DCF VF representor: %s",
2034                                     repr_name);
2035                         ret = -ENODEV;
2036                         break;
2037                 }
2038
2039                 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
2040                 dcf_adapter->num_reprs++;
2041         }
2042
2043         return ret;
2044 }
2045
2046 static int
2047 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
2048 {
2049         struct rte_eth_dev *eth_dev;
2050
2051         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
2052         if (!eth_dev)
2053                 return 0;
2054
2055         if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
2056                 return rte_eth_dev_pci_generic_remove(pci_dev,
2057                                                       ice_dcf_vf_repr_uninit);
2058         else
2059                 return rte_eth_dev_pci_generic_remove(pci_dev,
2060                                                       ice_dcf_dev_uninit);
2061 }
2062
2063 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
2064         { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
2065         { .vendor_id = 0, /* sentinel */ },
2066 };
2067
2068 static struct rte_pci_driver rte_ice_dcf_pmd = {
2069         .id_table = pci_id_ice_dcf_map,
2070         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2071         .probe = eth_ice_dcf_pci_probe,
2072         .remove = eth_ice_dcf_pci_remove,
2073 };
2074
2075 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
2076 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
2077 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
2078 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");