1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include "cn9k_ethdev.h"
10 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
12 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 struct rte_eth_dev_data *data = eth_dev->data;
14 struct rte_eth_conf *conf = &data->dev_conf;
15 struct rte_eth_rxmode *rxmode = &conf->rxmode;
18 if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
19 (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
20 flags |= NIX_RX_OFFLOAD_RSS_F;
22 if (dev->rx_offloads &
23 (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
24 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
26 if (dev->rx_offloads &
27 (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
28 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
30 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
31 flags |= NIX_RX_MULTI_SEG_F;
33 if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
34 flags |= NIX_RX_OFFLOAD_TSTAMP_F;
36 if (!dev->ptype_disable)
37 flags |= NIX_RX_OFFLOAD_PTYPE_F;
39 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
40 flags |= NIX_RX_OFFLOAD_SECURITY_F;
42 if (dev->rx_mark_update)
43 flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
49 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
51 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
52 uint64_t conf = dev->tx_offloads;
55 /* Fastpath is dependent on these enums */
56 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
57 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
58 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
59 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
60 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
61 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
62 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
63 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
64 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
65 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
66 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
67 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
68 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
69 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
70 offsetof(struct rte_mbuf, buf_iova) + 8);
71 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
72 offsetof(struct rte_mbuf, buf_iova) + 16);
73 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
74 offsetof(struct rte_mbuf, ol_flags) + 12);
75 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
76 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
78 if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
79 conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
80 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
82 if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
83 conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
84 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
86 if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
87 conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
88 conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
89 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
91 if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
92 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
94 if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
95 flags |= NIX_TX_MULTI_SEG_F;
97 /* Enable Inner checksum for TSO */
98 if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
99 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
101 /* Enable Inner and Outer checksum for Tunnel TSO */
102 if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
103 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
104 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
105 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
107 if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
108 flags |= NIX_TX_OFFLOAD_TSTAMP_F;
110 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
111 flags |= NIX_TX_OFFLOAD_SECURITY_F;
117 cn9k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
119 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
122 dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
123 dev->ptype_disable = 0;
125 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
126 dev->ptype_disable = 1;
129 cn9k_eth_set_rx_function(eth_dev);
134 nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq,
137 union nix_send_hdr_w0_u send_hdr_w0;
139 /* Initialize the fields based on basic single segment packet */
141 if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
142 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
143 send_hdr_w0.sizem1 = 2;
144 if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
145 /* Default: one seg packet would have:
146 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
149 send_hdr_w0.sizem1 = 3;
151 /* To calculate the offset for send_mem,
152 * send_hdr->w0.sizem1 * 2
154 txq->ts_mem = dev->tstamp.tx_tstamp_iova;
157 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
158 send_hdr_w0.sizem1 = 1;
160 send_hdr_w0.sq = qid;
161 txq->send_hdr_w0 = send_hdr_w0.u;
166 cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
167 uint16_t nb_desc, unsigned int socket,
168 const struct rte_eth_txconf *tx_conf)
170 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
171 struct roc_cpt_lf *inl_lf;
172 struct cn9k_eth_txq *txq;
173 struct roc_nix_sq *sq;
177 RTE_SET_USED(socket);
179 /* Common Tx queue setup */
180 rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
181 sizeof(struct cn9k_eth_txq), tx_conf);
186 /* Update fast path queue */
187 txq = eth_dev->data->tx_queues[qid];
188 txq->fc_mem = sq->fc;
189 txq->lmt_addr = sq->lmt_addr;
190 txq->io_addr = sq->io_addr;
191 txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
192 txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
194 /* Fetch CPT LF info for outbound if present */
195 if (dev->outb.lf_base) {
196 crypto_qid = qid % dev->outb.nb_crypto_qs;
197 inl_lf = dev->outb.lf_base + crypto_qid;
199 txq->cpt_io_addr = inl_lf->io_addr;
200 txq->cpt_fc = inl_lf->fc_addr;
201 txq->cpt_desc = inl_lf->nb_desc * 0.7;
202 txq->sa_base = (uint64_t)dev->outb.sa_base;
203 txq->sa_base |= eth_dev->data->port_id;
204 PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN);
207 nix_form_default_desc(dev, txq, qid);
208 txq->lso_tun_fmt = dev->lso_tun_fmt;
213 cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
214 uint16_t nb_desc, unsigned int socket,
215 const struct rte_eth_rxconf *rx_conf,
216 struct rte_mempool *mp)
218 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
219 struct cn9k_eth_rxq *rxq;
220 struct roc_nix_rq *rq;
221 struct roc_nix_cq *cq;
224 RTE_SET_USED(socket);
226 /* CQ Errata needs min 4K ring */
227 if (dev->cq_min_4k && nb_desc < 4096)
230 /* Common Rx queue setup */
231 rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
232 sizeof(struct cn9k_eth_rxq), rx_conf, mp);
239 /* Update fast path queue */
240 rxq = eth_dev->data->rx_queues[qid];
242 rxq->desc = (uintptr_t)cq->desc_base;
243 rxq->cq_door = cq->door;
244 rxq->cq_status = cq->status;
245 rxq->wdata = cq->wdata;
246 rxq->head = cq->head;
247 rxq->qmask = cq->qmask;
248 rxq->tstamp = &dev->tstamp;
250 /* Data offset from data to start of mbuf is first_skip */
251 rxq->data_off = rq->first_skip;
252 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
255 rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
260 cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
262 struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
265 rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
269 /* Clear fc cache pkts to trigger worker stop */
270 txq->fc_cache_pkts = 0;
275 cn9k_nix_configure(struct rte_eth_dev *eth_dev)
277 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
278 struct rte_eth_conf *conf = ð_dev->data->dev_conf;
279 struct rte_eth_txmode *txmode = &conf->txmode;
282 /* Platform specific checks */
283 if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
284 (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
285 ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
286 (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
287 plt_err("Outer IP and SCTP checksum unsupported");
291 /* Common nix configure */
292 rc = cnxk_nix_configure(eth_dev);
296 /* Update offload flags */
297 dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
298 dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
300 plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
301 " tx_offload_flags=0x%x",
302 eth_dev->data->port_id, dev->rx_offload_flags,
303 dev->tx_offload_flags);
307 /* Function to enable ptp config for VFs */
309 nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
311 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
313 if (nix_recalc_mtu(eth_dev))
314 plt_err("Failed to set MTU size for ptp");
316 dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
318 /* Setting up the function pointers as per new offload flags */
319 cn9k_eth_set_rx_function(eth_dev);
320 cn9k_eth_set_tx_function(eth_dev);
324 nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
326 struct cn9k_eth_rxq *rxq = queue;
327 struct cnxk_eth_rxq_sp *rxq_sp;
328 struct rte_eth_dev *eth_dev;
333 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
334 eth_dev = rxq_sp->dev->eth_dev;
335 nix_ptp_enable_vf(eth_dev);
341 cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
343 struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
344 struct rte_eth_dev *eth_dev;
345 struct cn9k_eth_rxq *rxq;
351 eth_dev = dev->eth_dev;
355 dev->ptp_en = ptp_en;
357 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
358 rxq = eth_dev->data->rx_queues[i];
359 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
362 if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
363 !(roc_nix_is_lbk(nix))) {
364 /* In case of VF, setting of MTU cannot be done directly in this
365 * function as this is running as part of MBOX request(PF->VF)
366 * and MTU setting also requires MBOX message to be
369 eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
377 cn9k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
379 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
382 rc = cnxk_nix_timesync_enable(eth_dev);
386 dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
387 dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
389 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
390 nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
392 /* Setting up the rx[tx]_offload_flags due to change
393 * in rx[tx]_offloads.
395 cn9k_eth_set_rx_function(eth_dev);
396 cn9k_eth_set_tx_function(eth_dev);
401 cn9k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
403 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
406 rc = cnxk_nix_timesync_disable(eth_dev);
410 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
411 dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
413 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
414 nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
416 /* Setting up the rx[tx]_offload_flags due to change
417 * in rx[tx]_offloads.
419 cn9k_eth_set_rx_function(eth_dev);
420 cn9k_eth_set_tx_function(eth_dev);
425 cn9k_nix_dev_start(struct rte_eth_dev *eth_dev)
427 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
428 struct roc_nix *nix = &dev->nix;
431 /* Common eth dev start */
432 rc = cnxk_nix_dev_start(eth_dev);
436 /* Update VF about data off shifted by 8 bytes if PTP already
437 * enabled in PF owning this VF
439 if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
440 nix_ptp_enable_vf(eth_dev);
442 /* Setting up the rx[tx]_offload_flags due to change
443 * in rx[tx]_offloads.
445 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
446 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
448 cn9k_eth_set_tx_function(eth_dev);
449 cn9k_eth_set_rx_function(eth_dev);
454 cn9k_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
455 struct timespec *timestamp)
457 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
458 struct cnxk_timesync_info *tstamp = &dev->tstamp;
461 if (*tstamp->tx_tstamp == 0)
464 ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp);
465 *timestamp = rte_ns_to_timespec(ns);
466 *tstamp->tx_tstamp = 0;
473 cn9k_nix_rx_metadata_negotiate(struct rte_eth_dev *eth_dev, uint64_t *features)
475 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
478 (RTE_ETH_RX_METADATA_USER_FLAG | RTE_ETH_RX_METADATA_USER_MARK);
481 dev->rx_offload_flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
482 dev->rx_mark_update = true;
484 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
485 dev->rx_mark_update = false;
488 cn9k_eth_set_rx_function(eth_dev);
493 /* Update platform specific eth dev ops */
495 nix_eth_dev_ops_override(void)
497 static int init_once;
503 /* Update platform specific ops */
504 cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
505 cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
506 cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
507 cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop;
508 cnxk_eth_dev_ops.dev_start = cn9k_nix_dev_start;
509 cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set;
510 cnxk_eth_dev_ops.timesync_enable = cn9k_nix_timesync_enable;
511 cnxk_eth_dev_ops.timesync_disable = cn9k_nix_timesync_disable;
512 cnxk_eth_dev_ops.mtr_ops_get = NULL;
513 cnxk_eth_dev_ops.rx_metadata_negotiate = cn9k_nix_rx_metadata_negotiate;
514 cnxk_eth_dev_ops.timesync_read_tx_timestamp =
515 cn9k_nix_timesync_read_tx_timestamp;
519 npc_flow_ops_override(void)
521 static int init_once;
527 /* Update platform specific ops */
528 cnxk_flow_ops.create = cn9k_flow_create;
529 cnxk_flow_ops.destroy = cn9k_flow_destroy;
533 cn9k_nix_remove(struct rte_pci_device *pci_dev)
535 return cnxk_nix_remove(pci_dev);
539 cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
541 struct rte_eth_dev *eth_dev;
542 struct cnxk_eth_dev *dev;
545 if (RTE_CACHE_LINE_SIZE != 128) {
546 plt_err("Driver not compiled for CN9K");
552 plt_err("Failed to initialize platform model, rc=%d", rc);
556 nix_eth_dev_ops_override();
557 npc_flow_ops_override();
559 cn9k_eth_sec_ops_override();
562 rc = cnxk_nix_probe(pci_drv, pci_dev);
566 /* Find eth dev allocated */
567 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
571 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
572 /* Setup callbacks for secondary process */
573 cn9k_eth_set_tx_function(eth_dev);
574 cn9k_eth_set_rx_function(eth_dev);
578 dev = cnxk_eth_pmd_priv(eth_dev);
579 /* Update capabilities already set for TSO.
580 * TSO not supported for earlier chip revisions
582 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
583 dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
584 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
585 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
586 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
588 /* 50G and 100G to be supported for board version C0
591 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
592 dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
593 dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
598 /* Register up msg callbacks for PTP information */
599 roc_nix_ptp_info_cb_register(&dev->nix, cn9k_nix_ptp_info_update_cb);
601 /* Update HW erratas */
602 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
607 static const struct rte_pci_id cn9k_pci_nix_map[] = {
608 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_PF),
609 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_PF),
610 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_PF),
611 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_PF),
612 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_PF),
613 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_VF),
614 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_VF),
615 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_VF),
616 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_VF),
617 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_VF),
618 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_AF_VF),
619 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_AF_VF),
620 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_AF_VF),
621 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_AF_VF),
622 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_AF_VF),
628 static struct rte_pci_driver cn9k_pci_nix = {
629 .id_table = cn9k_pci_nix_map,
630 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
631 RTE_PCI_DRV_INTR_LSC,
632 .probe = cn9k_nix_probe,
633 .remove = cn9k_nix_remove,
636 RTE_PMD_REGISTER_PCI(net_cn9k, cn9k_pci_nix);
637 RTE_PMD_REGISTER_PCI_TABLE(net_cn9k, cn9k_pci_nix_map);
638 RTE_PMD_REGISTER_KMOD_DEP(net_cn9k, "vfio-pci");