ethdev: replace bus specific struct with generic dev
[dpdk.git] / drivers / net / thunderx / nicvf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14 #include <netinet/in.h>
15 #include <sys/queue.h>
16
17 #include <rte_alarm.h>
18 #include <rte_branch_prediction.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_cycles.h>
22 #include <rte_debug.h>
23 #include <rte_dev.h>
24 #include <rte_eal.h>
25 #include <rte_ether.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_ethdev_pci.h>
28 #include <rte_interrupts.h>
29 #include <rte_log.h>
30 #include <rte_memory.h>
31 #include <rte_memzone.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
34 #include <rte_pci.h>
35 #include <rte_bus_pci.h>
36 #include <rte_tailq.h>
37
38 #include "base/nicvf_plat.h"
39
40 #include "nicvf_ethdev.h"
41 #include "nicvf_rxtx.h"
42 #include "nicvf_svf.h"
43 #include "nicvf_logs.h"
44
45 int nicvf_logtype_mbox;
46 int nicvf_logtype_init;
47 int nicvf_logtype_driver;
48
49 static void nicvf_dev_stop(struct rte_eth_dev *dev);
50 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
51 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
52                           bool cleanup);
53
54 RTE_INIT(nicvf_init_log);
55 static void
56 nicvf_init_log(void)
57 {
58         nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
59         if (nicvf_logtype_mbox >= 0)
60                 rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
61
62         nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init");
63         if (nicvf_logtype_init >= 0)
64                 rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
65
66         nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver");
67         if (nicvf_logtype_driver >= 0)
68                 rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
69 }
70
71 static void
72 nicvf_link_status_update(struct nicvf *nic,
73                          struct rte_eth_link *link)
74 {
75         memset(link, 0, sizeof(*link));
76
77         link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
78
79         if (nic->duplex == NICVF_HALF_DUPLEX)
80                 link->link_duplex = ETH_LINK_HALF_DUPLEX;
81         else if (nic->duplex == NICVF_FULL_DUPLEX)
82                 link->link_duplex = ETH_LINK_FULL_DUPLEX;
83         link->link_speed = nic->speed;
84         link->link_autoneg = ETH_LINK_AUTONEG;
85 }
86
87 static void
88 nicvf_interrupt(void *arg)
89 {
90         struct rte_eth_dev *dev = arg;
91         struct nicvf *nic = nicvf_pmd_priv(dev);
92         struct rte_eth_link link;
93
94         if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
95                 if (dev->data->dev_conf.intr_conf.lsc) {
96                         nicvf_link_status_update(nic, &link);
97                         rte_eth_linkstatus_set(dev, &link);
98
99                         _rte_eth_dev_callback_process(dev,
100                                                       RTE_ETH_EVENT_INTR_LSC,
101                                                       NULL);
102                 }
103         }
104
105         rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
106                                 nicvf_interrupt, dev);
107 }
108
109 static void
110 nicvf_vf_interrupt(void *arg)
111 {
112         struct nicvf *nic = arg;
113
114         nicvf_reg_poll_interrupts(nic);
115
116         rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
117                                 nicvf_vf_interrupt, nic);
118 }
119
120 static int
121 nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
122 {
123         return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
124 }
125
126 static int
127 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
128 {
129         return rte_eal_alarm_cancel(fn, arg);
130 }
131
132 /*
133  * Return 0 means link status changed, -1 means not changed
134  */
135 static int
136 nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
137 {
138 #define CHECK_INTERVAL 100  /* 100ms */
139 #define MAX_CHECK_TIME 90   /* 9s (90 * 100ms) in total */
140         struct rte_eth_link link;
141         struct nicvf *nic = nicvf_pmd_priv(dev);
142         int i;
143
144         PMD_INIT_FUNC_TRACE();
145
146         if (wait_to_complete) {
147                 /* rte_eth_link_get() might need to wait up to 9 seconds */
148                 for (i = 0; i < MAX_CHECK_TIME; i++) {
149                         nicvf_link_status_update(nic, &link);
150                         if (link.link_status == ETH_LINK_UP)
151                                 break;
152                         rte_delay_ms(CHECK_INTERVAL);
153                 }
154         } else {
155                 nicvf_link_status_update(nic, &link);
156         }
157
158         return rte_eth_linkstatus_set(dev, &link);
159 }
160
161 static int
162 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
163 {
164         struct nicvf *nic = nicvf_pmd_priv(dev);
165         uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
166         size_t i;
167         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
168
169         PMD_INIT_FUNC_TRACE();
170
171         if (frame_size > NIC_HW_MAX_FRS)
172                 return -EINVAL;
173
174         if (frame_size < NIC_HW_MIN_FRS)
175                 return -EINVAL;
176
177         buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
178
179         /*
180          * Refuse mtu that requires the support of scattered packets
181          * when this feature has not been enabled before.
182          */
183         if (!dev->data->scattered_rx &&
184                 (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
185                 return -EINVAL;
186
187         /* check <seg size> * <max_seg>  >= max_frame */
188         if (dev->data->scattered_rx &&
189                 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
190                 return -EINVAL;
191
192         if (frame_size > ETHER_MAX_LEN)
193                 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
194         else
195                 rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
196
197         if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
198                 return -EINVAL;
199
200         /* Update max frame size */
201         rxmode->max_rx_pkt_len = (uint32_t)frame_size;
202         nic->mtu = mtu;
203
204         for (i = 0; i < nic->sqs_count; i++)
205                 nic->snicvf[i]->mtu = mtu;
206
207         return 0;
208 }
209
210 static int
211 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
212 {
213         uint64_t *data = regs->data;
214         struct nicvf *nic = nicvf_pmd_priv(dev);
215
216         if (data == NULL) {
217                 regs->length = nicvf_reg_get_count();
218                 regs->width = THUNDERX_REG_BYTES;
219                 return 0;
220         }
221
222         /* Support only full register dump */
223         if ((regs->length == 0) ||
224                 (regs->length == (uint32_t)nicvf_reg_get_count())) {
225                 regs->version = nic->vendor_id << 16 | nic->device_id;
226                 nicvf_reg_dump(nic, data);
227                 return 0;
228         }
229         return -ENOTSUP;
230 }
231
232 static int
233 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
234 {
235         uint16_t qidx;
236         struct nicvf_hw_rx_qstats rx_qstats;
237         struct nicvf_hw_tx_qstats tx_qstats;
238         struct nicvf_hw_stats port_stats;
239         struct nicvf *nic = nicvf_pmd_priv(dev);
240         uint16_t rx_start, rx_end;
241         uint16_t tx_start, tx_end;
242         size_t i;
243
244         /* RX queue indices for the first VF */
245         nicvf_rx_range(dev, nic, &rx_start, &rx_end);
246
247         /* Reading per RX ring stats */
248         for (qidx = rx_start; qidx <= rx_end; qidx++) {
249                 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
250                         break;
251
252                 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
253                 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
254                 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
255         }
256
257         /* TX queue indices for the first VF */
258         nicvf_tx_range(dev, nic, &tx_start, &tx_end);
259
260         /* Reading per TX ring stats */
261         for (qidx = tx_start; qidx <= tx_end; qidx++) {
262                 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
263                         break;
264
265                 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
266                 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
267                 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
268         }
269
270         for (i = 0; i < nic->sqs_count; i++) {
271                 struct nicvf *snic = nic->snicvf[i];
272
273                 if (snic == NULL)
274                         break;
275
276                 /* RX queue indices for a secondary VF */
277                 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
278
279                 /* Reading per RX ring stats */
280                 for (qidx = rx_start; qidx <= rx_end; qidx++) {
281                         if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
282                                 break;
283
284                         nicvf_hw_get_rx_qstats(snic, &rx_qstats,
285                                                qidx % MAX_RCV_QUEUES_PER_QS);
286                         stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
287                         stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
288                 }
289
290                 /* TX queue indices for a secondary VF */
291                 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
292                 /* Reading per TX ring stats */
293                 for (qidx = tx_start; qidx <= tx_end; qidx++) {
294                         if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
295                                 break;
296
297                         nicvf_hw_get_tx_qstats(snic, &tx_qstats,
298                                                qidx % MAX_SND_QUEUES_PER_QS);
299                         stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
300                         stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
301                 }
302         }
303
304         nicvf_hw_get_stats(nic, &port_stats);
305         stats->ibytes = port_stats.rx_bytes;
306         stats->ipackets = port_stats.rx_ucast_frames;
307         stats->ipackets += port_stats.rx_bcast_frames;
308         stats->ipackets += port_stats.rx_mcast_frames;
309         stats->ierrors = port_stats.rx_l2_errors;
310         stats->imissed = port_stats.rx_drop_red;
311         stats->imissed += port_stats.rx_drop_overrun;
312         stats->imissed += port_stats.rx_drop_bcast;
313         stats->imissed += port_stats.rx_drop_mcast;
314         stats->imissed += port_stats.rx_drop_l3_bcast;
315         stats->imissed += port_stats.rx_drop_l3_mcast;
316
317         stats->obytes = port_stats.tx_bytes_ok;
318         stats->opackets = port_stats.tx_ucast_frames_ok;
319         stats->opackets += port_stats.tx_bcast_frames_ok;
320         stats->opackets += port_stats.tx_mcast_frames_ok;
321         stats->oerrors = port_stats.tx_drops;
322
323         return 0;
324 }
325
326 static const uint32_t *
327 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
328 {
329         size_t copied;
330         static uint32_t ptypes[32];
331         struct nicvf *nic = nicvf_pmd_priv(dev);
332         static const uint32_t ptypes_common[] = {
333                 RTE_PTYPE_L3_IPV4,
334                 RTE_PTYPE_L3_IPV4_EXT,
335                 RTE_PTYPE_L3_IPV6,
336                 RTE_PTYPE_L3_IPV6_EXT,
337                 RTE_PTYPE_L4_TCP,
338                 RTE_PTYPE_L4_UDP,
339                 RTE_PTYPE_L4_FRAG,
340         };
341         static const uint32_t ptypes_tunnel[] = {
342                 RTE_PTYPE_TUNNEL_GRE,
343                 RTE_PTYPE_TUNNEL_GENEVE,
344                 RTE_PTYPE_TUNNEL_VXLAN,
345                 RTE_PTYPE_TUNNEL_NVGRE,
346         };
347         static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
348
349         copied = sizeof(ptypes_common);
350         memcpy(ptypes, ptypes_common, copied);
351         if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
352                 memcpy((char *)ptypes + copied, ptypes_tunnel,
353                         sizeof(ptypes_tunnel));
354                 copied += sizeof(ptypes_tunnel);
355         }
356
357         memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
358         if (dev->rx_pkt_burst == nicvf_recv_pkts ||
359                 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
360                 return ptypes;
361
362         return NULL;
363 }
364
365 static void
366 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
367 {
368         int i;
369         uint16_t rxqs = 0, txqs = 0;
370         struct nicvf *nic = nicvf_pmd_priv(dev);
371         uint16_t rx_start, rx_end;
372         uint16_t tx_start, tx_end;
373
374         /* Reset all primary nic counters */
375         nicvf_rx_range(dev, nic, &rx_start, &rx_end);
376         for (i = rx_start; i <= rx_end; i++)
377                 rxqs |= (0x3 << (i * 2));
378
379         nicvf_tx_range(dev, nic, &tx_start, &tx_end);
380         for (i = tx_start; i <= tx_end; i++)
381                 txqs |= (0x3 << (i * 2));
382
383         nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
384
385         /* Reset secondary nic queue counters */
386         for (i = 0; i < nic->sqs_count; i++) {
387                 struct nicvf *snic = nic->snicvf[i];
388                 if (snic == NULL)
389                         break;
390
391                 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
392                 for (i = rx_start; i <= rx_end; i++)
393                         rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
394
395                 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
396                 for (i = tx_start; i <= tx_end; i++)
397                         txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
398
399                 nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
400         }
401 }
402
403 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
404 static void
405 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
406 {
407 }
408
409 static inline uint64_t
410 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
411 {
412         uint64_t nic_rss = 0;
413
414         if (ethdev_rss & ETH_RSS_IPV4)
415                 nic_rss |= RSS_IP_ENA;
416
417         if (ethdev_rss & ETH_RSS_IPV6)
418                 nic_rss |= RSS_IP_ENA;
419
420         if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
421                 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
422
423         if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
424                 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
425
426         if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
427                 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
428
429         if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
430                 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
431
432         if (ethdev_rss & ETH_RSS_PORT)
433                 nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
434
435         if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
436                 if (ethdev_rss & ETH_RSS_VXLAN)
437                         nic_rss |= RSS_TUN_VXLAN_ENA;
438
439                 if (ethdev_rss & ETH_RSS_GENEVE)
440                         nic_rss |= RSS_TUN_GENEVE_ENA;
441
442                 if (ethdev_rss & ETH_RSS_NVGRE)
443                         nic_rss |= RSS_TUN_NVGRE_ENA;
444         }
445
446         return nic_rss;
447 }
448
449 static inline uint64_t
450 nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
451 {
452         uint64_t ethdev_rss = 0;
453
454         if (nic_rss & RSS_IP_ENA)
455                 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
456
457         if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
458                 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
459                                 ETH_RSS_NONFRAG_IPV6_TCP);
460
461         if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
462                 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
463                                 ETH_RSS_NONFRAG_IPV6_UDP);
464
465         if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
466                 ethdev_rss |= ETH_RSS_PORT;
467
468         if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
469                 if (nic_rss & RSS_TUN_VXLAN_ENA)
470                         ethdev_rss |= ETH_RSS_VXLAN;
471
472                 if (nic_rss & RSS_TUN_GENEVE_ENA)
473                         ethdev_rss |= ETH_RSS_GENEVE;
474
475                 if (nic_rss & RSS_TUN_NVGRE_ENA)
476                         ethdev_rss |= ETH_RSS_NVGRE;
477         }
478         return ethdev_rss;
479 }
480
481 static int
482 nicvf_dev_reta_query(struct rte_eth_dev *dev,
483                      struct rte_eth_rss_reta_entry64 *reta_conf,
484                      uint16_t reta_size)
485 {
486         struct nicvf *nic = nicvf_pmd_priv(dev);
487         uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
488         int ret, i, j;
489
490         if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
491                 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
492                         "(%d) doesn't match the number hardware can supported "
493                         "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
494                 return -EINVAL;
495         }
496
497         ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
498         if (ret)
499                 return ret;
500
501         /* Copy RETA table */
502         for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
503                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
504                         if ((reta_conf[i].mask >> j) & 0x01)
505                                 reta_conf[i].reta[j] = tbl[j];
506         }
507
508         return 0;
509 }
510
511 static int
512 nicvf_dev_reta_update(struct rte_eth_dev *dev,
513                       struct rte_eth_rss_reta_entry64 *reta_conf,
514                       uint16_t reta_size)
515 {
516         struct nicvf *nic = nicvf_pmd_priv(dev);
517         uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
518         int ret, i, j;
519
520         if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
521                 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
522                         "(%d) doesn't match the number hardware can supported "
523                         "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
524                 return -EINVAL;
525         }
526
527         ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
528         if (ret)
529                 return ret;
530
531         /* Copy RETA table */
532         for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
533                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
534                         if ((reta_conf[i].mask >> j) & 0x01)
535                                 tbl[j] = reta_conf[i].reta[j];
536         }
537
538         return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
539 }
540
541 static int
542 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
543                             struct rte_eth_rss_conf *rss_conf)
544 {
545         struct nicvf *nic = nicvf_pmd_priv(dev);
546
547         if (rss_conf->rss_key)
548                 nicvf_rss_get_key(nic, rss_conf->rss_key);
549
550         rss_conf->rss_key_len =  RSS_HASH_KEY_BYTE_SIZE;
551         rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
552         return 0;
553 }
554
555 static int
556 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
557                           struct rte_eth_rss_conf *rss_conf)
558 {
559         struct nicvf *nic = nicvf_pmd_priv(dev);
560         uint64_t nic_rss;
561
562         if (rss_conf->rss_key &&
563                 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
564                 RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
565                                 rss_conf->rss_key_len);
566                 return -EINVAL;
567         }
568
569         if (rss_conf->rss_key)
570                 nicvf_rss_set_key(nic, rss_conf->rss_key);
571
572         nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
573         nicvf_rss_set_cfg(nic, nic_rss);
574         return 0;
575 }
576
577 static int
578 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
579                     struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
580 {
581         const struct rte_memzone *rz;
582         uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
583
584         rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
585                                       nicvf_netdev_qidx(nic, qidx), ring_size,
586                                       NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
587         if (rz == NULL) {
588                 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
589                 return -ENOMEM;
590         }
591
592         memset(rz->addr, 0, ring_size);
593
594         rxq->phys = rz->iova;
595         rxq->desc = rz->addr;
596         rxq->qlen_mask = desc_cnt - 1;
597
598         return 0;
599 }
600
601 static int
602 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
603                     struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
604 {
605         const struct rte_memzone *rz;
606         uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
607
608         rz = rte_eth_dma_zone_reserve(dev, "sq",
609                                       nicvf_netdev_qidx(nic, qidx), ring_size,
610                                       NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
611         if (rz == NULL) {
612                 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
613                 return -ENOMEM;
614         }
615
616         memset(rz->addr, 0, ring_size);
617
618         sq->phys = rz->iova;
619         sq->desc = rz->addr;
620         sq->qlen_mask = desc_cnt - 1;
621
622         return 0;
623 }
624
625 static int
626 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
627                       uint32_t desc_cnt, uint32_t buffsz)
628 {
629         struct nicvf_rbdr *rbdr;
630         const struct rte_memzone *rz;
631         uint32_t ring_size;
632
633         assert(nic->rbdr == NULL);
634         rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
635                                   RTE_CACHE_LINE_SIZE, nic->node);
636         if (rbdr == NULL) {
637                 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
638                 return -ENOMEM;
639         }
640
641         ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
642         rz = rte_eth_dma_zone_reserve(dev, "rbdr",
643                                       nicvf_netdev_qidx(nic, 0), ring_size,
644                                       NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
645         if (rz == NULL) {
646                 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
647                 return -ENOMEM;
648         }
649
650         memset(rz->addr, 0, ring_size);
651
652         rbdr->phys = rz->iova;
653         rbdr->tail = 0;
654         rbdr->next_tail = 0;
655         rbdr->desc = rz->addr;
656         rbdr->buffsz = buffsz;
657         rbdr->qlen_mask = desc_cnt - 1;
658         rbdr->rbdr_status =
659                 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
660         rbdr->rbdr_door =
661                 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
662
663         nic->rbdr = rbdr;
664         return 0;
665 }
666
667 static void
668 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
669                         nicvf_iova_addr_t phy)
670 {
671         uint16_t qidx;
672         void *obj;
673         struct nicvf_rxq *rxq;
674         uint16_t rx_start, rx_end;
675
676         /* Get queue ranges for this VF */
677         nicvf_rx_range(dev, nic, &rx_start, &rx_end);
678
679         for (qidx = rx_start; qidx <= rx_end; qidx++) {
680                 rxq = dev->data->rx_queues[qidx];
681                 if (rxq->precharge_cnt) {
682                         obj = (void *)nicvf_mbuff_phy2virt(phy,
683                                                            rxq->mbuf_phys_off);
684                         rte_mempool_put(rxq->pool, obj);
685                         rxq->precharge_cnt--;
686                         break;
687                 }
688         }
689 }
690
691 static inline void
692 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
693 {
694         uint32_t qlen_mask, head;
695         struct rbdr_entry_t *entry;
696         struct nicvf_rbdr *rbdr = nic->rbdr;
697
698         qlen_mask = rbdr->qlen_mask;
699         head = rbdr->head;
700         while (head != rbdr->tail) {
701                 entry = rbdr->desc + head;
702                 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
703                 head++;
704                 head = head & qlen_mask;
705         }
706 }
707
708 static inline void
709 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
710 {
711         uint32_t head;
712
713         head = txq->head;
714         while (head != txq->tail) {
715                 if (txq->txbuffs[head]) {
716                         rte_pktmbuf_free_seg(txq->txbuffs[head]);
717                         txq->txbuffs[head] = NULL;
718                 }
719                 head++;
720                 head = head & txq->qlen_mask;
721         }
722 }
723
724 static void
725 nicvf_tx_queue_reset(struct nicvf_txq *txq)
726 {
727         uint32_t txq_desc_cnt = txq->qlen_mask + 1;
728
729         memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
730         memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
731         txq->tail = 0;
732         txq->head = 0;
733         txq->xmit_bufs = 0;
734 }
735
736 static inline int
737 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
738                         uint16_t qidx)
739 {
740         struct nicvf_txq *txq;
741         int ret;
742
743         assert(qidx < MAX_SND_QUEUES_PER_QS);
744
745         if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
746                 RTE_ETH_QUEUE_STATE_STARTED)
747                 return 0;
748
749         txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
750         txq->pool = NULL;
751         ret = nicvf_qset_sq_config(nic, qidx, txq);
752         if (ret) {
753                 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
754                              nic->vf_id, qidx, ret);
755                 goto config_sq_error;
756         }
757
758         dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
759                 RTE_ETH_QUEUE_STATE_STARTED;
760         return ret;
761
762 config_sq_error:
763         nicvf_qset_sq_reclaim(nic, qidx);
764         return ret;
765 }
766
767 static inline int
768 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
769                        uint16_t qidx)
770 {
771         struct nicvf_txq *txq;
772         int ret;
773
774         assert(qidx < MAX_SND_QUEUES_PER_QS);
775
776         if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
777                 RTE_ETH_QUEUE_STATE_STOPPED)
778                 return 0;
779
780         ret = nicvf_qset_sq_reclaim(nic, qidx);
781         if (ret)
782                 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
783                              nic->vf_id, qidx, ret);
784
785         txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
786         nicvf_tx_queue_release_mbufs(txq);
787         nicvf_tx_queue_reset(txq);
788
789         dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
790                 RTE_ETH_QUEUE_STATE_STOPPED;
791         return ret;
792 }
793
794 static inline int
795 nicvf_configure_cpi(struct rte_eth_dev *dev)
796 {
797         struct nicvf *nic = nicvf_pmd_priv(dev);
798         uint16_t qidx, qcnt;
799         int ret;
800
801         /* Count started rx queues */
802         for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
803                 if (dev->data->rx_queue_state[qidx] ==
804                     RTE_ETH_QUEUE_STATE_STARTED)
805                         qcnt++;
806
807         nic->cpi_alg = CPI_ALG_NONE;
808         ret = nicvf_mbox_config_cpi(nic, qcnt);
809         if (ret)
810                 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
811
812         return ret;
813 }
814
815 static inline int
816 nicvf_configure_rss(struct rte_eth_dev *dev)
817 {
818         struct nicvf *nic = nicvf_pmd_priv(dev);
819         uint64_t rsshf;
820         int ret = -EINVAL;
821
822         rsshf = nicvf_rss_ethdev_to_nic(nic,
823                         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
824         PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
825                     dev->data->dev_conf.rxmode.mq_mode,
826                     dev->data->nb_rx_queues,
827                     dev->data->dev_conf.lpbk_mode, rsshf);
828
829         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
830                 ret = nicvf_rss_term(nic);
831         else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
832                 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
833         if (ret)
834                 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
835
836         return ret;
837 }
838
839 static int
840 nicvf_configure_rss_reta(struct rte_eth_dev *dev)
841 {
842         struct nicvf *nic = nicvf_pmd_priv(dev);
843         unsigned int idx, qmap_size;
844         uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
845         uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
846
847         if (nic->cpi_alg != CPI_ALG_NONE)
848                 return -EINVAL;
849
850         /* Prepare queue map */
851         for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
852                 if (dev->data->rx_queue_state[idx] ==
853                                 RTE_ETH_QUEUE_STATE_STARTED)
854                         qmap[qmap_size++] = idx;
855         }
856
857         /* Update default RSS RETA */
858         for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
859                 default_reta[idx] = qmap[idx % qmap_size];
860
861         return nicvf_rss_reta_update(nic, default_reta,
862                                      NIC_MAX_RSS_IDR_TBL_SIZE);
863 }
864
865 static void
866 nicvf_dev_tx_queue_release(void *sq)
867 {
868         struct nicvf_txq *txq;
869
870         PMD_INIT_FUNC_TRACE();
871
872         txq = (struct nicvf_txq *)sq;
873         if (txq) {
874                 if (txq->txbuffs != NULL) {
875                         nicvf_tx_queue_release_mbufs(txq);
876                         rte_free(txq->txbuffs);
877                         txq->txbuffs = NULL;
878                 }
879                 rte_free(txq);
880         }
881 }
882
883 static void
884 nicvf_set_tx_function(struct rte_eth_dev *dev)
885 {
886         struct nicvf_txq *txq;
887         size_t i;
888         bool multiseg = false;
889
890         for (i = 0; i < dev->data->nb_tx_queues; i++) {
891                 txq = dev->data->tx_queues[i];
892                 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
893                         multiseg = true;
894                         break;
895                 }
896         }
897
898         /* Use a simple Tx queue (no offloads, no multi segs) if possible */
899         if (multiseg) {
900                 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
901                 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
902         } else {
903                 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
904                 dev->tx_pkt_burst = nicvf_xmit_pkts;
905         }
906
907         if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
908                 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
909         else
910                 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
911 }
912
913 static void
914 nicvf_set_rx_function(struct rte_eth_dev *dev)
915 {
916         if (dev->data->scattered_rx) {
917                 PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
918                 dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
919         } else {
920                 PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
921                 dev->rx_pkt_burst = nicvf_recv_pkts;
922         }
923 }
924
925 static int
926 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
927                          uint16_t nb_desc, unsigned int socket_id,
928                          const struct rte_eth_txconf *tx_conf)
929 {
930         uint16_t tx_free_thresh;
931         bool is_single_pool;
932         struct nicvf_txq *txq;
933         struct nicvf *nic = nicvf_pmd_priv(dev);
934         uint64_t conf_offloads, offload_capa, unsupported_offloads;
935
936         PMD_INIT_FUNC_TRACE();
937
938         if (qidx >= MAX_SND_QUEUES_PER_QS)
939                 nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
940
941         qidx = qidx % MAX_SND_QUEUES_PER_QS;
942
943         /* Socket id check */
944         if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
945                 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
946                 socket_id, nic->node);
947
948         conf_offloads = tx_conf->offloads;
949         offload_capa = NICVF_TX_OFFLOAD_CAPA;
950
951         unsupported_offloads = conf_offloads & ~offload_capa;
952         if (unsupported_offloads) {
953                 PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
954                       "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
955                       unsupported_offloads, conf_offloads, offload_capa);
956                 return -ENOTSUP;
957         }
958
959         /* Tx deferred start is not supported */
960         if (tx_conf->tx_deferred_start) {
961                 PMD_INIT_LOG(ERR, "Tx deferred start not supported");
962                 return -EINVAL;
963         }
964
965         /* Roundup nb_desc to available qsize and validate max number of desc */
966         nb_desc = nicvf_qsize_sq_roundup(nb_desc);
967         if (nb_desc == 0) {
968                 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
969                 return -EINVAL;
970         }
971
972         /* Validate tx_free_thresh */
973         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
974                                 tx_conf->tx_free_thresh :
975                                 NICVF_DEFAULT_TX_FREE_THRESH);
976
977         if (tx_free_thresh > (nb_desc) ||
978                 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
979                 PMD_INIT_LOG(ERR,
980                         "tx_free_thresh must be less than the number of TX "
981                         "descriptors. (tx_free_thresh=%u port=%d "
982                         "queue=%d)", (unsigned int)tx_free_thresh,
983                         (int)dev->data->port_id, (int)qidx);
984                 return -EINVAL;
985         }
986
987         /* Free memory prior to re-allocation if needed. */
988         if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
989                 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
990                                 nicvf_netdev_qidx(nic, qidx));
991                 nicvf_dev_tx_queue_release(
992                         dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
993                 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
994         }
995
996         /* Allocating tx queue data structure */
997         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
998                                         RTE_CACHE_LINE_SIZE, nic->node);
999         if (txq == NULL) {
1000                 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
1001                              nicvf_netdev_qidx(nic, qidx));
1002                 return -ENOMEM;
1003         }
1004
1005         txq->nic = nic;
1006         txq->queue_id = qidx;
1007         txq->tx_free_thresh = tx_free_thresh;
1008         txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
1009         txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
1010         txq->offloads = conf_offloads;
1011
1012         is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
1013
1014         /* Choose optimum free threshold value for multipool case */
1015         if (!is_single_pool) {
1016                 txq->tx_free_thresh = (uint16_t)
1017                 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
1018                                 NICVF_TX_FREE_MPOOL_THRESH :
1019                                 tx_conf->tx_free_thresh);
1020                 txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
1021         } else {
1022                 txq->pool_free = nicvf_single_pool_free_xmited_buffers;
1023         }
1024
1025         /* Allocate software ring */
1026         txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
1027                                 nb_desc * sizeof(struct rte_mbuf *),
1028                                 RTE_CACHE_LINE_SIZE, nic->node);
1029
1030         if (txq->txbuffs == NULL) {
1031                 nicvf_dev_tx_queue_release(txq);
1032                 return -ENOMEM;
1033         }
1034
1035         if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
1036                 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
1037                 nicvf_dev_tx_queue_release(txq);
1038                 return -ENOMEM;
1039         }
1040
1041         nicvf_tx_queue_reset(txq);
1042
1043         PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
1044                         " phys=0x%" PRIx64 " offloads=0x%" PRIx64,
1045                         nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1046                         txq->phys, txq->offloads);
1047
1048         dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
1049         dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1050                 RTE_ETH_QUEUE_STATE_STOPPED;
1051         return 0;
1052 }
1053
1054 static inline void
1055 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
1056 {
1057         uint32_t rxq_cnt;
1058         uint32_t nb_pkts, released_pkts = 0;
1059         uint32_t refill_cnt = 0;
1060         struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
1061
1062         if (dev->rx_pkt_burst == NULL)
1063                 return;
1064
1065         while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
1066                                 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
1067                 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
1068                                         NICVF_MAX_RX_FREE_THRESH);
1069                 PMD_DRV_LOG(INFO, "nb_pkts=%d  rxq_cnt=%d", nb_pkts, rxq_cnt);
1070                 while (nb_pkts) {
1071                         rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
1072                         released_pkts++;
1073                 }
1074         }
1075
1076
1077         refill_cnt += nicvf_dev_rbdr_refill(dev,
1078                         nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
1079
1080         PMD_DRV_LOG(INFO, "free_cnt=%d  refill_cnt=%d",
1081                     released_pkts, refill_cnt);
1082 }
1083
1084 static void
1085 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1086 {
1087         rxq->head = 0;
1088         rxq->available_space = 0;
1089         rxq->recv_buffers = 0;
1090 }
1091
1092 static inline int
1093 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1094                         uint16_t qidx)
1095 {
1096         struct nicvf_rxq *rxq;
1097         int ret;
1098
1099         assert(qidx < MAX_RCV_QUEUES_PER_QS);
1100
1101         if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1102                 RTE_ETH_QUEUE_STATE_STARTED)
1103                 return 0;
1104
1105         /* Update rbdr pointer to all rxq */
1106         rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1107         rxq->shared_rbdr = nic->rbdr;
1108
1109         ret = nicvf_qset_rq_config(nic, qidx, rxq);
1110         if (ret) {
1111                 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
1112                              nic->vf_id, qidx, ret);
1113                 goto config_rq_error;
1114         }
1115         ret = nicvf_qset_cq_config(nic, qidx, rxq);
1116         if (ret) {
1117                 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
1118                              nic->vf_id, qidx, ret);
1119                 goto config_cq_error;
1120         }
1121
1122         dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1123                 RTE_ETH_QUEUE_STATE_STARTED;
1124         return 0;
1125
1126 config_cq_error:
1127         nicvf_qset_cq_reclaim(nic, qidx);
1128 config_rq_error:
1129         nicvf_qset_rq_reclaim(nic, qidx);
1130         return ret;
1131 }
1132
1133 static inline int
1134 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1135                        uint16_t qidx)
1136 {
1137         struct nicvf_rxq *rxq;
1138         int ret, other_error;
1139
1140         if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1141                 RTE_ETH_QUEUE_STATE_STOPPED)
1142                 return 0;
1143
1144         ret = nicvf_qset_rq_reclaim(nic, qidx);
1145         if (ret)
1146                 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1147                              nic->vf_id, qidx, ret);
1148
1149         other_error = ret;
1150         rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1151         nicvf_rx_queue_release_mbufs(dev, rxq);
1152         nicvf_rx_queue_reset(rxq);
1153
1154         ret = nicvf_qset_cq_reclaim(nic, qidx);
1155         if (ret)
1156                 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1157                              nic->vf_id, qidx, ret);
1158
1159         other_error |= ret;
1160         dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1161                 RTE_ETH_QUEUE_STATE_STOPPED;
1162         return other_error;
1163 }
1164
1165 static void
1166 nicvf_dev_rx_queue_release(void *rx_queue)
1167 {
1168         PMD_INIT_FUNC_TRACE();
1169
1170         rte_free(rx_queue);
1171 }
1172
1173 static int
1174 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1175 {
1176         struct nicvf *nic = nicvf_pmd_priv(dev);
1177         int ret;
1178
1179         if (qidx >= MAX_RCV_QUEUES_PER_QS)
1180                 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
1181
1182         qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1183
1184         ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1185         if (ret)
1186                 return ret;
1187
1188         ret = nicvf_configure_cpi(dev);
1189         if (ret)
1190                 return ret;
1191
1192         return nicvf_configure_rss_reta(dev);
1193 }
1194
1195 static int
1196 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1197 {
1198         int ret;
1199         struct nicvf *nic = nicvf_pmd_priv(dev);
1200
1201         if (qidx >= MAX_SND_QUEUES_PER_QS)
1202                 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1203
1204         qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1205
1206         ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1207         ret |= nicvf_configure_cpi(dev);
1208         ret |= nicvf_configure_rss_reta(dev);
1209         return ret;
1210 }
1211
1212 static int
1213 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1214 {
1215         struct nicvf *nic = nicvf_pmd_priv(dev);
1216
1217         if (qidx >= MAX_SND_QUEUES_PER_QS)
1218                 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1219
1220         qidx = qidx % MAX_SND_QUEUES_PER_QS;
1221
1222         return nicvf_vf_start_tx_queue(dev, nic, qidx);
1223 }
1224
1225 static int
1226 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1227 {
1228         struct nicvf *nic = nicvf_pmd_priv(dev);
1229
1230         if (qidx >= MAX_SND_QUEUES_PER_QS)
1231                 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1232
1233         qidx = qidx % MAX_SND_QUEUES_PER_QS;
1234
1235         return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1236 }
1237
1238 static inline void
1239 nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
1240 {
1241         uintptr_t p;
1242         struct rte_mbuf mb_def;
1243
1244         RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
1245         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
1246         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
1247                                 offsetof(struct rte_mbuf, data_off) != 2);
1248         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
1249                                 offsetof(struct rte_mbuf, data_off) != 4);
1250         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
1251                                 offsetof(struct rte_mbuf, data_off) != 6);
1252         mb_def.nb_segs = 1;
1253         mb_def.data_off = RTE_PKTMBUF_HEADROOM;
1254         mb_def.port = rxq->port_id;
1255         rte_mbuf_refcnt_set(&mb_def, 1);
1256
1257         /* Prevent compiler reordering: rearm_data covers previous fields */
1258         rte_compiler_barrier();
1259         p = (uintptr_t)&mb_def.rearm_data;
1260         rxq->mbuf_initializer.value = *(uint64_t *)p;
1261 }
1262
1263 static int
1264 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1265                          uint16_t nb_desc, unsigned int socket_id,
1266                          const struct rte_eth_rxconf *rx_conf,
1267                          struct rte_mempool *mp)
1268 {
1269         uint16_t rx_free_thresh;
1270         struct nicvf_rxq *rxq;
1271         struct nicvf *nic = nicvf_pmd_priv(dev);
1272         uint64_t conf_offloads, offload_capa, unsupported_offloads;
1273
1274         PMD_INIT_FUNC_TRACE();
1275
1276         if (qidx >= MAX_RCV_QUEUES_PER_QS)
1277                 nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
1278
1279         qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1280
1281         /* Socket id check */
1282         if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1283                 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1284                 socket_id, nic->node);
1285
1286
1287         conf_offloads = rx_conf->offloads;
1288
1289         if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
1290                 PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
1291                 conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
1292         }
1293
1294         offload_capa = NICVF_RX_OFFLOAD_CAPA;
1295         unsupported_offloads = conf_offloads & ~offload_capa;
1296
1297         if (unsupported_offloads) {
1298                 PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
1299                       "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1300                       unsupported_offloads, conf_offloads, offload_capa);
1301                 return -ENOTSUP;
1302         }
1303
1304         /* Mempool memory must be contiguous, so must be one memory segment*/
1305         if (mp->nb_mem_chunks != 1) {
1306                 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1307                 return -EINVAL;
1308         }
1309
1310         /* Mempool memory must be physically contiguous */
1311         if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
1312                 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1313                 return -EINVAL;
1314         }
1315
1316         /* Rx deferred start is not supported */
1317         if (rx_conf->rx_deferred_start) {
1318                 PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1319                 return -EINVAL;
1320         }
1321
1322         /* Roundup nb_desc to available qsize and validate max number of desc */
1323         nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1324         if (nb_desc == 0) {
1325                 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1326                 return -EINVAL;
1327         }
1328
1329         /* Check rx_free_thresh upper bound */
1330         rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1331                                 rx_conf->rx_free_thresh :
1332                                 NICVF_DEFAULT_RX_FREE_THRESH);
1333         if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1334                 rx_free_thresh >= nb_desc * .75) {
1335                 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1336                                 rx_free_thresh);
1337                 return -EINVAL;
1338         }
1339
1340         /* Free memory prior to re-allocation if needed */
1341         if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1342                 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1343                                 nicvf_netdev_qidx(nic, qidx));
1344                 nicvf_dev_rx_queue_release(
1345                         dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
1346                 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1347         }
1348
1349         /* Allocate rxq memory */
1350         rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1351                                         RTE_CACHE_LINE_SIZE, nic->node);
1352         if (rxq == NULL) {
1353                 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
1354                              nicvf_netdev_qidx(nic, qidx));
1355                 return -ENOMEM;
1356         }
1357
1358         rxq->nic = nic;
1359         rxq->pool = mp;
1360         rxq->queue_id = qidx;
1361         rxq->port_id = dev->data->port_id;
1362         rxq->rx_free_thresh = rx_free_thresh;
1363         rxq->rx_drop_en = rx_conf->rx_drop_en;
1364         rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1365         rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1366         rxq->precharge_cnt = 0;
1367
1368         if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1369                 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1370         else
1371                 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1372
1373         nicvf_rxq_mbuf_setup(rxq);
1374
1375         /* Alloc completion queue */
1376         if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1377                 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1378                 nicvf_dev_rx_queue_release(rxq);
1379                 return -ENOMEM;
1380         }
1381
1382         nicvf_rx_queue_reset(rxq);
1383
1384         PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
1385                         " phy=0x%" PRIx64 " offloads=0x%" PRIx64,
1386                         nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1387                         rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
1388
1389         dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
1390         dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1391                 RTE_ETH_QUEUE_STATE_STOPPED;
1392         return 0;
1393 }
1394
1395 static void
1396 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1397 {
1398         struct nicvf *nic = nicvf_pmd_priv(dev);
1399         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1400
1401         PMD_INIT_FUNC_TRACE();
1402
1403         /* Autonegotiation may be disabled */
1404         dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
1405         dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
1406                                  ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1407         if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
1408                 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
1409
1410         dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1411         dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
1412         dev_info->max_rx_queues =
1413                         (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1414         dev_info->max_tx_queues =
1415                         (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1416         dev_info->max_mac_addrs = 1;
1417         dev_info->max_vfs = pci_dev->max_vfs;
1418
1419         dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1420         dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1421         dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1422         dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1423
1424         dev_info->reta_size = nic->rss_info.rss_size;
1425         dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1426         dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1427         if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1428                 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1429
1430         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1431                 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1432                 .rx_drop_en = 0,
1433                 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
1434         };
1435
1436         dev_info->default_txconf = (struct rte_eth_txconf) {
1437                 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1438                 .txq_flags =
1439                         ETH_TXQ_FLAGS_NOMULTSEGS  |
1440                         ETH_TXQ_FLAGS_NOREFCOUNT  |
1441                         ETH_TXQ_FLAGS_NOMULTMEMP  |
1442                         ETH_TXQ_FLAGS_NOVLANOFFL  |
1443                         ETH_TXQ_FLAGS_NOXSUMSCTP,
1444                 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
1445                         DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
1446                         DEV_TX_OFFLOAD_UDP_CKSUM          |
1447                         DEV_TX_OFFLOAD_TCP_CKSUM,
1448         };
1449 }
1450
1451 static nicvf_iova_addr_t
1452 rbdr_rte_mempool_get(void *dev, void *opaque)
1453 {
1454         uint16_t qidx;
1455         uintptr_t mbuf;
1456         struct nicvf_rxq *rxq;
1457         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
1458         struct nicvf *nic = (struct nicvf *)opaque;
1459         uint16_t rx_start, rx_end;
1460
1461         /* Get queue ranges for this VF */
1462         nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
1463
1464         for (qidx = rx_start; qidx <= rx_end; qidx++) {
1465                 rxq = eth_dev->data->rx_queues[qidx];
1466                 /* Maintain equal buffer count across all pools */
1467                 if (rxq->precharge_cnt >= rxq->qlen_mask)
1468                         continue;
1469                 rxq->precharge_cnt++;
1470                 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1471                 if (mbuf)
1472                         return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1473         }
1474         return 0;
1475 }
1476
1477 static int
1478 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1479 {
1480         int ret;
1481         uint16_t qidx, data_off;
1482         uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1483         uint64_t mbuf_phys_off = 0;
1484         struct nicvf_rxq *rxq;
1485         struct rte_mbuf *mbuf;
1486         uint16_t rx_start, rx_end;
1487         uint16_t tx_start, tx_end;
1488         bool vlan_strip;
1489
1490         PMD_INIT_FUNC_TRACE();
1491
1492         /* Userspace process exited without proper shutdown in last run */
1493         if (nicvf_qset_rbdr_active(nic, 0))
1494                 nicvf_vf_stop(dev, nic, false);
1495
1496         /* Get queue ranges for this VF */
1497         nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1498
1499         /*
1500          * Thunderx nicvf PMD can support more than one pool per port only when
1501          * 1) Data payload size is same across all the pools in given port
1502          * AND
1503          * 2) All mbuffs in the pools are from the same hugepage
1504          * AND
1505          * 3) Mbuff metadata size is same across all the pools in given port
1506          *
1507          * This is to support existing application that uses multiple pool/port.
1508          * But, the purpose of using multipool for QoS will not be addressed.
1509          *
1510          */
1511
1512         /* Validate mempool attributes */
1513         for (qidx = rx_start; qidx <= rx_end; qidx++) {
1514                 rxq = dev->data->rx_queues[qidx];
1515                 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1516                 mbuf = rte_pktmbuf_alloc(rxq->pool);
1517                 if (mbuf == NULL) {
1518                         PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
1519                                      "pool=%s",
1520                                      nic->vf_id, qidx, rxq->pool->name);
1521                         return -ENOMEM;
1522                 }
1523                 data_off = nicvf_mbuff_meta_length(mbuf);
1524                 data_off += RTE_PKTMBUF_HEADROOM;
1525                 rte_pktmbuf_free(mbuf);
1526
1527                 if (data_off % RTE_CACHE_LINE_SIZE) {
1528                         PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
1529                                 rxq->pool->name, data_off,
1530                                 data_off % RTE_CACHE_LINE_SIZE);
1531                         return -EINVAL;
1532                 }
1533                 rxq->mbuf_phys_off -= data_off;
1534
1535                 if (mbuf_phys_off == 0)
1536                         mbuf_phys_off = rxq->mbuf_phys_off;
1537                 if (mbuf_phys_off != rxq->mbuf_phys_off) {
1538                         PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
1539                                      PRIx64, rxq->pool->name, nic->vf_id,
1540                                      mbuf_phys_off);
1541                         return -EINVAL;
1542                 }
1543         }
1544
1545         /* Check the level of buffers in the pool */
1546         total_rxq_desc = 0;
1547         for (qidx = rx_start; qidx <= rx_end; qidx++) {
1548                 rxq = dev->data->rx_queues[qidx];
1549                 /* Count total numbers of rxq descs */
1550                 total_rxq_desc += rxq->qlen_mask + 1;
1551                 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
1552                 exp_buffs *= dev->data->nb_rx_queues;
1553                 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1554                         PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1555                                      rxq->pool->name,
1556                                      rte_mempool_avail_count(rxq->pool),
1557                                      exp_buffs);
1558                         return -ENOENT;
1559                 }
1560         }
1561
1562         /* Check RBDR desc overflow */
1563         ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1564         if (ret == 0) {
1565                 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
1566                              "VF%d", nic->vf_id);
1567                 return -ENOMEM;
1568         }
1569
1570         /* Enable qset */
1571         ret = nicvf_qset_config(nic);
1572         if (ret) {
1573                 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
1574                              nic->vf_id);
1575                 return ret;
1576         }
1577
1578         /* Allocate RBDR and RBDR ring desc */
1579         nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1580         ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1581         if (ret) {
1582                 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
1583                              "VF%d", nic->vf_id);
1584                 goto qset_reclaim;
1585         }
1586
1587         /* Enable and configure RBDR registers */
1588         ret = nicvf_qset_rbdr_config(nic, 0);
1589         if (ret) {
1590                 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
1591                              nic->vf_id);
1592                 goto qset_rbdr_free;
1593         }
1594
1595         /* Fill rte_mempool buffers in RBDR pool and precharge it */
1596         ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1597                                         total_rxq_desc);
1598         if (ret) {
1599                 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
1600                              nic->vf_id);
1601                 goto qset_rbdr_reclaim;
1602         }
1603
1604         PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
1605                      nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1606
1607         /* Configure VLAN Strip */
1608         vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
1609                         DEV_RX_OFFLOAD_VLAN_STRIP);
1610         nicvf_vlan_hw_strip(nic, vlan_strip);
1611
1612         /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
1613          * to the 64bit memory address.
1614          * The alignment creates a hole in mbuf(between the end of headroom and
1615          * packet data start). The new revision of the HW provides an option to
1616          * disable the L3 alignment feature and make mbuf layout looks
1617          * more like other NICs. For better application compatibility, disabling
1618          * l3 alignment feature on the hardware revisions it supports
1619          */
1620         nicvf_apad_config(nic, false);
1621
1622         /* Get queue ranges for this VF */
1623         nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1624
1625         /* Configure TX queues */
1626         for (qidx = tx_start; qidx <= tx_end; qidx++) {
1627                 ret = nicvf_vf_start_tx_queue(dev, nic,
1628                         qidx % MAX_SND_QUEUES_PER_QS);
1629                 if (ret)
1630                         goto start_txq_error;
1631         }
1632
1633         /* Configure RX queues */
1634         for (qidx = rx_start; qidx <= rx_end; qidx++) {
1635                 ret = nicvf_vf_start_rx_queue(dev, nic,
1636                         qidx % MAX_RCV_QUEUES_PER_QS);
1637                 if (ret)
1638                         goto start_rxq_error;
1639         }
1640
1641         if (!nic->sqs_mode) {
1642                 /* Configure CPI algorithm */
1643                 ret = nicvf_configure_cpi(dev);
1644                 if (ret)
1645                         goto start_txq_error;
1646
1647                 ret = nicvf_mbox_get_rss_size(nic);
1648                 if (ret) {
1649                         PMD_INIT_LOG(ERR, "Failed to get rss table size");
1650                         goto qset_rss_error;
1651                 }
1652
1653                 /* Configure RSS */
1654                 ret = nicvf_configure_rss(dev);
1655                 if (ret)
1656                         goto qset_rss_error;
1657         }
1658
1659         /* Done; Let PF make the BGX's RX and TX switches to ON position */
1660         nicvf_mbox_cfg_done(nic);
1661         return 0;
1662
1663 qset_rss_error:
1664         nicvf_rss_term(nic);
1665 start_rxq_error:
1666         for (qidx = rx_start; qidx <= rx_end; qidx++)
1667                 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1668 start_txq_error:
1669         for (qidx = tx_start; qidx <= tx_end; qidx++)
1670                 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1671 qset_rbdr_reclaim:
1672         nicvf_qset_rbdr_reclaim(nic, 0);
1673         nicvf_rbdr_release_mbufs(dev, nic);
1674 qset_rbdr_free:
1675         if (nic->rbdr) {
1676                 rte_free(nic->rbdr);
1677                 nic->rbdr = NULL;
1678         }
1679 qset_reclaim:
1680         nicvf_qset_reclaim(nic);
1681         return ret;
1682 }
1683
1684 static int
1685 nicvf_dev_start(struct rte_eth_dev *dev)
1686 {
1687         uint16_t qidx;
1688         int ret;
1689         size_t i;
1690         struct nicvf *nic = nicvf_pmd_priv(dev);
1691         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1692         uint16_t mtu;
1693         uint32_t buffsz = 0, rbdrsz = 0;
1694         struct rte_pktmbuf_pool_private *mbp_priv;
1695         struct nicvf_rxq *rxq;
1696
1697         PMD_INIT_FUNC_TRACE();
1698
1699         /* This function must be called for a primary device */
1700         assert_primary(nic);
1701
1702         /* Validate RBDR buff size */
1703         for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
1704                 rxq = dev->data->rx_queues[qidx];
1705                 mbp_priv = rte_mempool_get_priv(rxq->pool);
1706                 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1707                 if (buffsz % 128) {
1708                         PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
1709                         return -EINVAL;
1710                 }
1711                 if (rbdrsz == 0)
1712                         rbdrsz = buffsz;
1713                 if (rbdrsz != buffsz) {
1714                         PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
1715                                      qidx, rbdrsz, buffsz);
1716                         return -EINVAL;
1717                 }
1718         }
1719
1720         /* Configure loopback */
1721         ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1722         if (ret) {
1723                 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
1724                 return ret;
1725         }
1726
1727         /* Reset all statistics counters attached to this port */
1728         ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1729         if (ret) {
1730                 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
1731                 return ret;
1732         }
1733
1734         /* Setup scatter mode if needed by jumbo */
1735         if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1736                                             2 * VLAN_TAG_SIZE > buffsz)
1737                 dev->data->scattered_rx = 1;
1738         if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
1739                 dev->data->scattered_rx = 1;
1740
1741         /* Setup MTU based on max_rx_pkt_len or default */
1742         mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
1743                 dev->data->dev_conf.rxmode.max_rx_pkt_len
1744                         -  ETHER_HDR_LEN - ETHER_CRC_LEN
1745                 : ETHER_MTU;
1746
1747         if (nicvf_dev_set_mtu(dev, mtu)) {
1748                 PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1749                 return -EBUSY;
1750         }
1751
1752         ret = nicvf_vf_start(dev, nic, rbdrsz);
1753         if (ret != 0)
1754                 return ret;
1755
1756         for (i = 0; i < nic->sqs_count; i++) {
1757                 assert(nic->snicvf[i]);
1758
1759                 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
1760                 if (ret != 0)
1761                         return ret;
1762         }
1763
1764         /* Configure callbacks based on scatter mode */
1765         nicvf_set_tx_function(dev);
1766         nicvf_set_rx_function(dev);
1767
1768         return 0;
1769 }
1770
1771 static void
1772 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1773 {
1774         size_t i;
1775         int ret;
1776         struct nicvf *nic = nicvf_pmd_priv(dev);
1777
1778         PMD_INIT_FUNC_TRACE();
1779
1780         /* Teardown secondary vf first */
1781         for (i = 0; i < nic->sqs_count; i++) {
1782                 if (!nic->snicvf[i])
1783                         continue;
1784
1785                 nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1786         }
1787
1788         /* Stop the primary VF now */
1789         nicvf_vf_stop(dev, nic, cleanup);
1790
1791         /* Disable loopback */
1792         ret = nicvf_loopback_config(nic, 0);
1793         if (ret)
1794                 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1795
1796         /* Reclaim CPI configuration */
1797         ret = nicvf_mbox_config_cpi(nic, 0);
1798         if (ret)
1799                 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1800 }
1801
1802 static void
1803 nicvf_dev_stop(struct rte_eth_dev *dev)
1804 {
1805         PMD_INIT_FUNC_TRACE();
1806
1807         nicvf_dev_stop_cleanup(dev, false);
1808 }
1809
1810 static void
1811 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1812 {
1813         int ret;
1814         uint16_t qidx;
1815         uint16_t tx_start, tx_end;
1816         uint16_t rx_start, rx_end;
1817
1818         PMD_INIT_FUNC_TRACE();
1819
1820         if (cleanup) {
1821                 /* Let PF make the BGX's RX and TX switches to OFF position */
1822                 nicvf_mbox_shutdown(nic);
1823         }
1824
1825         /* Disable VLAN Strip */
1826         nicvf_vlan_hw_strip(nic, 0);
1827
1828         /* Get queue ranges for this VF */
1829         nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1830
1831         for (qidx = tx_start; qidx <= tx_end; qidx++)
1832                 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1833
1834         /* Get queue ranges for this VF */
1835         nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1836
1837         /* Reclaim rq */
1838         for (qidx = rx_start; qidx <= rx_end; qidx++)
1839                 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1840
1841         /* Reclaim RBDR */
1842         ret = nicvf_qset_rbdr_reclaim(nic, 0);
1843         if (ret)
1844                 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1845
1846         /* Move all charged buffers in RBDR back to pool */
1847         if (nic->rbdr != NULL)
1848                 nicvf_rbdr_release_mbufs(dev, nic);
1849
1850         /* Disable qset */
1851         ret = nicvf_qset_reclaim(nic);
1852         if (ret)
1853                 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1854
1855         /* Disable all interrupts */
1856         nicvf_disable_all_interrupts(nic);
1857
1858         /* Free RBDR SW structure */
1859         if (nic->rbdr) {
1860                 rte_free(nic->rbdr);
1861                 nic->rbdr = NULL;
1862         }
1863 }
1864
1865 static void
1866 nicvf_dev_close(struct rte_eth_dev *dev)
1867 {
1868         size_t i;
1869         struct nicvf *nic = nicvf_pmd_priv(dev);
1870
1871         PMD_INIT_FUNC_TRACE();
1872
1873         nicvf_dev_stop_cleanup(dev, true);
1874         nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1875
1876         for (i = 0; i < nic->sqs_count; i++) {
1877                 if (!nic->snicvf[i])
1878                         continue;
1879
1880                 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1881         }
1882 }
1883
1884 static int
1885 nicvf_request_sqs(struct nicvf *nic)
1886 {
1887         size_t i;
1888
1889         assert_primary(nic);
1890         assert(nic->sqs_count > 0);
1891         assert(nic->sqs_count <= MAX_SQS_PER_VF);
1892
1893         /* Set no of Rx/Tx queues in each of the SQsets */
1894         for (i = 0; i < nic->sqs_count; i++) {
1895                 if (nicvf_svf_empty())
1896                         rte_panic("Cannot assign sufficient number of "
1897                                   "secondary queues to primary VF%" PRIu8 "\n",
1898                                   nic->vf_id);
1899
1900                 nic->snicvf[i] = nicvf_svf_pop();
1901                 nic->snicvf[i]->sqs_id = i;
1902         }
1903
1904         return nicvf_mbox_request_sqs(nic);
1905 }
1906
1907 static int
1908 nicvf_dev_configure(struct rte_eth_dev *dev)
1909 {
1910         struct rte_eth_dev_data *data = dev->data;
1911         struct rte_eth_conf *conf = &data->dev_conf;
1912         struct rte_eth_rxmode *rxmode = &conf->rxmode;
1913         struct rte_eth_txmode *txmode = &conf->txmode;
1914         struct nicvf *nic = nicvf_pmd_priv(dev);
1915         uint8_t cqcount;
1916         uint64_t conf_rx_offloads, rx_offload_capa;
1917         uint64_t conf_tx_offloads, tx_offload_capa;
1918
1919         PMD_INIT_FUNC_TRACE();
1920
1921         if (!rte_eal_has_hugepages()) {
1922                 PMD_INIT_LOG(INFO, "Huge page is not configured");
1923                 return -EINVAL;
1924         }
1925
1926         conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
1927         tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1928
1929         if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
1930                 PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
1931                       "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1932                       conf_tx_offloads, tx_offload_capa);
1933                 return -ENOTSUP;
1934         }
1935
1936         if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
1937                 PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
1938                 rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
1939         }
1940
1941         conf_rx_offloads = rxmode->offloads;
1942         rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1943
1944         if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
1945                 PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
1946                       "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1947                       conf_rx_offloads, rx_offload_capa);
1948                 return -ENOTSUP;
1949         }
1950
1951         if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
1952                 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1953                 rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
1954         }
1955
1956         if (txmode->mq_mode) {
1957                 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1958                 return -EINVAL;
1959         }
1960
1961         if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1962                 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1963                 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1964                 return -EINVAL;
1965         }
1966
1967         if (rxmode->split_hdr_size) {
1968                 PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1969                 return -EINVAL;
1970         }
1971
1972         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1973                 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1974                 return -EINVAL;
1975         }
1976
1977         if (conf->dcb_capability_en) {
1978                 PMD_INIT_LOG(INFO, "DCB enable not supported");
1979                 return -EINVAL;
1980         }
1981
1982         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1983                 PMD_INIT_LOG(INFO, "Flow director not supported");
1984                 return -EINVAL;
1985         }
1986
1987         assert_primary(nic);
1988         NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1989         cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
1990         if (cqcount > MAX_RCV_QUEUES_PER_QS) {
1991                 nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
1992                 nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
1993         } else {
1994                 nic->sqs_count = 0;
1995         }
1996
1997         assert(nic->sqs_count <= MAX_SQS_PER_VF);
1998
1999         if (nic->sqs_count > 0) {
2000                 if (nicvf_request_sqs(nic)) {
2001                         rte_panic("Cannot assign sufficient number of "
2002                                   "secondary queues to PORT%d VF%" PRIu8 "\n",
2003                                   dev->data->port_id, nic->vf_id);
2004                 }
2005         }
2006
2007         PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
2008                 dev->data->port_id, nicvf_hw_cap(nic));
2009
2010         return 0;
2011 }
2012
2013 /* Initialize and register driver with DPDK Application */
2014 static const struct eth_dev_ops nicvf_eth_dev_ops = {
2015         .dev_configure            = nicvf_dev_configure,
2016         .dev_start                = nicvf_dev_start,
2017         .dev_stop                 = nicvf_dev_stop,
2018         .link_update              = nicvf_dev_link_update,
2019         .dev_close                = nicvf_dev_close,
2020         .stats_get                = nicvf_dev_stats_get,
2021         .stats_reset              = nicvf_dev_stats_reset,
2022         .promiscuous_enable       = nicvf_dev_promisc_enable,
2023         .dev_infos_get            = nicvf_dev_info_get,
2024         .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
2025         .mtu_set                  = nicvf_dev_set_mtu,
2026         .reta_update              = nicvf_dev_reta_update,
2027         .reta_query               = nicvf_dev_reta_query,
2028         .rss_hash_update          = nicvf_dev_rss_hash_update,
2029         .rss_hash_conf_get        = nicvf_dev_rss_hash_conf_get,
2030         .rx_queue_start           = nicvf_dev_rx_queue_start,
2031         .rx_queue_stop            = nicvf_dev_rx_queue_stop,
2032         .tx_queue_start           = nicvf_dev_tx_queue_start,
2033         .tx_queue_stop            = nicvf_dev_tx_queue_stop,
2034         .rx_queue_setup           = nicvf_dev_rx_queue_setup,
2035         .rx_queue_release         = nicvf_dev_rx_queue_release,
2036         .rx_queue_count           = nicvf_dev_rx_queue_count,
2037         .tx_queue_setup           = nicvf_dev_tx_queue_setup,
2038         .tx_queue_release         = nicvf_dev_tx_queue_release,
2039         .get_reg                  = nicvf_dev_get_regs,
2040 };
2041
2042 static int
2043 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
2044 {
2045         int ret;
2046         struct rte_pci_device *pci_dev;
2047         struct nicvf *nic = nicvf_pmd_priv(eth_dev);
2048
2049         PMD_INIT_FUNC_TRACE();
2050
2051         eth_dev->dev_ops = &nicvf_eth_dev_ops;
2052
2053         /* For secondary processes, the primary has done all the work */
2054         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2055                 if (nic) {
2056                         /* Setup callbacks for secondary process */
2057                         nicvf_set_tx_function(eth_dev);
2058                         nicvf_set_rx_function(eth_dev);
2059                         return 0;
2060                 } else {
2061                         /* If nic == NULL than it is secondary function
2062                          * so ethdev need to be released by caller */
2063                         return ENOTSUP;
2064                 }
2065         }
2066
2067         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2068         rte_eth_copy_pci_info(eth_dev, pci_dev);
2069
2070         nic->device_id = pci_dev->id.device_id;
2071         nic->vendor_id = pci_dev->id.vendor_id;
2072         nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
2073         nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2074
2075         PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
2076                         pci_dev->id.vendor_id, pci_dev->id.device_id,
2077                         pci_dev->addr.domain, pci_dev->addr.bus,
2078                         pci_dev->addr.devid, pci_dev->addr.function);
2079
2080         nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
2081         if (!nic->reg_base) {
2082                 PMD_INIT_LOG(ERR, "Failed to map BAR0");
2083                 ret = -ENODEV;
2084                 goto fail;
2085         }
2086
2087         nicvf_disable_all_interrupts(nic);
2088
2089         ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2090         if (ret) {
2091                 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2092                 goto fail;
2093         }
2094
2095         ret = nicvf_mbox_check_pf_ready(nic);
2096         if (ret) {
2097                 PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2098                 goto alarm_fail;
2099         } else {
2100                 PMD_INIT_LOG(INFO,
2101                         "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2102                         nic->node, nic->vf_id,
2103                         nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2104                         nic->sqs_mode ? "true" : "false",
2105                         nic->loopback_supported ? "true" : "false"
2106                         );
2107         }
2108
2109         ret = nicvf_base_init(nic);
2110         if (ret) {
2111                 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
2112                 goto malloc_fail;
2113         }
2114
2115         if (nic->sqs_mode) {
2116                 /* Push nic to stack of secondary vfs */
2117                 nicvf_svf_push(nic);
2118
2119                 /* Steal nic pointer from the device for further reuse */
2120                 eth_dev->data->dev_private = NULL;
2121
2122                 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2123                 ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
2124                 if (ret) {
2125                         PMD_INIT_LOG(ERR, "Failed to start period alarm");
2126                         goto fail;
2127                 }
2128
2129                 /* Detach port by returning positive error number */
2130                 return ENOTSUP;
2131         }
2132
2133         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2134         if (eth_dev->data->mac_addrs == NULL) {
2135                 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2136                 ret = -ENOMEM;
2137                 goto alarm_fail;
2138         }
2139         if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
2140                 eth_random_addr(&nic->mac_addr[0]);
2141
2142         ether_addr_copy((struct ether_addr *)nic->mac_addr,
2143                         &eth_dev->data->mac_addrs[0]);
2144
2145         ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2146         if (ret) {
2147                 PMD_INIT_LOG(ERR, "Failed to set mac addr");
2148                 goto malloc_fail;
2149         }
2150
2151         PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
2152                 eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2153                 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2154                 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2155
2156         return 0;
2157
2158 malloc_fail:
2159         rte_free(eth_dev->data->mac_addrs);
2160 alarm_fail:
2161         nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2162 fail:
2163         return ret;
2164 }
2165
2166 static const struct rte_pci_id pci_id_nicvf_map[] = {
2167         {
2168                 .class_id = RTE_CLASS_ANY_ID,
2169                 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2170                 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2171                 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2172                 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2173         },
2174         {
2175                 .class_id = RTE_CLASS_ANY_ID,
2176                 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2177                 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2178                 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2179                 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
2180         },
2181         {
2182                 .class_id = RTE_CLASS_ANY_ID,
2183                 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2184                 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2185                 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2186                 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
2187         },
2188         {
2189                 .class_id = RTE_CLASS_ANY_ID,
2190                 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2191                 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2192                 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2193                 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2194         },
2195         {
2196                 .vendor_id = 0,
2197         },
2198 };
2199
2200 static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2201         struct rte_pci_device *pci_dev)
2202 {
2203         return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
2204                 nicvf_eth_dev_init);
2205 }
2206
2207 static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
2208 {
2209         return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
2210 }
2211
2212 static struct rte_pci_driver rte_nicvf_pmd = {
2213         .id_table = pci_id_nicvf_map,
2214         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
2215                         RTE_PCI_DRV_INTR_LSC,
2216         .probe = nicvf_eth_pci_probe,
2217         .remove = nicvf_eth_pci_remove,
2218 };
2219
2220 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
2221 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
2222 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");