1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include "cn10k_ethdev.h"
5 #include "cn10k_rte_flow.h"
10 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
12 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 struct rte_eth_dev_data *data = eth_dev->data;
14 struct rte_eth_conf *conf = &data->dev_conf;
15 struct rte_eth_rxmode *rxmode = &conf->rxmode;
18 if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
19 (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
20 flags |= NIX_RX_OFFLOAD_RSS_F;
22 if (dev->rx_offloads &
23 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
24 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
26 if (dev->rx_offloads &
27 (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
28 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
30 if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
31 flags |= NIX_RX_MULTI_SEG_F;
33 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
34 flags |= NIX_RX_OFFLOAD_TSTAMP_F;
36 if (!dev->ptype_disable)
37 flags |= NIX_RX_OFFLOAD_PTYPE_F;
43 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
45 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
46 uint64_t conf = dev->tx_offloads;
49 /* Fastpath is dependent on these enums */
50 RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
51 RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
52 RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
53 RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
54 RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
55 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
56 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
57 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
58 RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
59 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
60 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
61 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
62 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
63 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
64 offsetof(struct rte_mbuf, buf_iova) + 8);
65 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
66 offsetof(struct rte_mbuf, buf_iova) + 16);
67 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
68 offsetof(struct rte_mbuf, ol_flags) + 12);
69 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
70 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
72 if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
73 conf & DEV_TX_OFFLOAD_QINQ_INSERT)
74 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
76 if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
77 conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
78 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
80 if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
81 conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
82 conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
83 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
85 if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
86 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
88 if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
89 flags |= NIX_TX_MULTI_SEG_F;
91 /* Enable Inner checksum for TSO */
92 if (conf & DEV_TX_OFFLOAD_TCP_TSO)
93 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
95 /* Enable Inner and Outer checksum for Tunnel TSO */
96 if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
97 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
98 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
99 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
101 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
102 flags |= NIX_TX_OFFLOAD_TSTAMP_F;
108 cn10k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
110 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
113 dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
114 dev->ptype_disable = 0;
116 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
117 dev->ptype_disable = 1;
120 cn10k_eth_set_rx_function(eth_dev);
125 nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn10k_eth_txq *txq,
128 struct nix_send_ext_s *send_hdr_ext;
129 union nix_send_hdr_w0_u send_hdr_w0;
130 struct nix_send_mem_s *send_mem;
131 union nix_send_sg_s sg_w0;
135 /* Initialize the fields based on basic single segment packet */
136 memset(&txq->cmd, 0, sizeof(txq->cmd));
140 if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
141 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
142 send_hdr_w0.sizem1 = 2;
144 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[0];
145 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
146 if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
147 /* Default: one seg packet would have:
148 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
151 send_hdr_w0.sizem1 = 3;
152 send_hdr_ext->w0.tstmp = 1;
154 /* To calculate the offset for send_mem,
155 * send_hdr->w0.sizem1 * 2
157 send_mem = (struct nix_send_mem_s *)(txq->cmd + 2);
158 send_mem->w0.subdc = NIX_SUBDC_MEM;
159 send_mem->w0.alg = NIX_SENDMEMALG_SETTSTMP;
160 send_mem->addr = dev->tstamp.tx_tstamp_iova;
163 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
164 send_hdr_w0.sizem1 = 1;
167 send_hdr_w0.sq = qid;
168 sg_w0.subdc = NIX_SUBDC_SG;
170 sg_w0.ld_type = NIX_SENDLDTYPE_LDD;
172 txq->send_hdr_w0 = send_hdr_w0.u;
173 txq->sg_w0 = sg_w0.u;
179 cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
180 uint16_t nb_desc, unsigned int socket,
181 const struct rte_eth_txconf *tx_conf)
183 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
184 struct cn10k_eth_txq *txq;
185 struct roc_nix_sq *sq;
188 RTE_SET_USED(socket);
190 /* Common Tx queue setup */
191 rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
192 sizeof(struct cn10k_eth_txq), tx_conf);
197 /* Update fast path queue */
198 txq = eth_dev->data->tx_queues[qid];
199 txq->fc_mem = sq->fc;
200 /* Store lmt base in tx queue for easy access */
201 txq->lmt_base = dev->nix.lmt_base;
202 txq->io_addr = sq->io_addr;
203 txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
204 txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
206 nix_form_default_desc(dev, txq, qid);
207 txq->lso_tun_fmt = dev->lso_tun_fmt;
212 cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
213 uint16_t nb_desc, unsigned int socket,
214 const struct rte_eth_rxconf *rx_conf,
215 struct rte_mempool *mp)
217 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
218 struct cn10k_eth_rxq *rxq;
219 struct roc_nix_rq *rq;
220 struct roc_nix_cq *cq;
223 RTE_SET_USED(socket);
225 /* CQ Errata needs min 4K ring */
226 if (dev->cq_min_4k && nb_desc < 4096)
229 /* Common Rx queue setup */
230 rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
231 sizeof(struct cn10k_eth_rxq), rx_conf, mp);
238 /* Update fast path queue */
239 rxq = eth_dev->data->rx_queues[qid];
241 rxq->desc = (uintptr_t)cq->desc_base;
242 rxq->cq_door = cq->door;
243 rxq->cq_status = cq->status;
244 rxq->wdata = cq->wdata;
245 rxq->head = cq->head;
246 rxq->qmask = cq->qmask;
247 rxq->tstamp = &dev->tstamp;
249 /* Data offset from data to start of mbuf is first_skip */
250 rxq->data_off = rq->first_skip;
251 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
254 rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
259 cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
261 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
264 rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
268 /* Clear fc cache pkts to trigger worker stop */
269 txq->fc_cache_pkts = 0;
274 cn10k_nix_configure(struct rte_eth_dev *eth_dev)
276 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
279 /* Common nix configure */
280 rc = cnxk_nix_configure(eth_dev);
284 /* Update offload flags */
285 dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
286 dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
288 plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
289 " tx_offload_flags=0x%x",
290 eth_dev->data->port_id, dev->rx_offload_flags,
291 dev->tx_offload_flags);
295 /* Function to enable ptp config for VFs */
297 nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
299 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
301 if (nix_recalc_mtu(eth_dev))
302 plt_err("Failed to set MTU size for ptp");
304 dev->scalar_ena = true;
305 dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
307 /* Setting up the function pointers as per new offload flags */
308 cn10k_eth_set_rx_function(eth_dev);
309 cn10k_eth_set_tx_function(eth_dev);
313 nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
315 struct cn10k_eth_rxq *rxq = queue;
316 struct cnxk_eth_rxq_sp *rxq_sp;
317 struct rte_eth_dev *eth_dev;
322 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
323 eth_dev = rxq_sp->dev->eth_dev;
324 nix_ptp_enable_vf(eth_dev);
330 cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
332 struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
333 struct rte_eth_dev *eth_dev;
334 struct cn10k_eth_rxq *rxq;
340 eth_dev = dev->eth_dev;
344 dev->ptp_en = ptp_en;
346 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
347 rxq = eth_dev->data->rx_queues[i];
348 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
351 if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
352 !(roc_nix_is_lbk(nix))) {
353 /* In case of VF, setting of MTU cannot be done directly in this
354 * function as this is running as part of MBOX request(PF->VF)
355 * and MTU setting also requires MBOX message to be
358 eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
366 cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
368 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
369 struct roc_nix *nix = &dev->nix;
372 /* Common eth dev start */
373 rc = cnxk_nix_dev_start(eth_dev);
377 /* Update VF about data off shifted by 8 bytes if PTP already
378 * enabled in PF owning this VF
380 if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
381 nix_ptp_enable_vf(eth_dev);
383 /* Setting up the rx[tx]_offload_flags due to change
384 * in rx[tx]_offloads.
386 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
387 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
389 cn10k_eth_set_tx_function(eth_dev);
390 cn10k_eth_set_rx_function(eth_dev);
394 /* Update platform specific eth dev ops */
396 nix_eth_dev_ops_override(void)
398 static int init_once;
404 /* Update platform specific ops */
405 cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
406 cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
407 cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
408 cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
409 cnxk_eth_dev_ops.dev_start = cn10k_nix_dev_start;
410 cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
414 npc_flow_ops_override(void)
416 static int init_once;
422 /* Update platform specific ops */
423 cnxk_flow_ops.create = cn10k_flow_create;
424 cnxk_flow_ops.destroy = cn10k_flow_destroy;
428 cn10k_nix_remove(struct rte_pci_device *pci_dev)
430 return cnxk_nix_remove(pci_dev);
434 cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
436 struct rte_eth_dev *eth_dev;
437 struct cnxk_eth_dev *dev;
440 if (RTE_CACHE_LINE_SIZE != 64) {
441 plt_err("Driver not compiled for CN10K");
447 plt_err("Failed to initialize platform model, rc=%d", rc);
451 nix_eth_dev_ops_override();
452 npc_flow_ops_override();
455 rc = cnxk_nix_probe(pci_drv, pci_dev);
459 /* Find eth dev allocated */
460 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
464 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
465 /* Setup callbacks for secondary process */
466 cn10k_eth_set_tx_function(eth_dev);
467 cn10k_eth_set_rx_function(eth_dev);
471 dev = cnxk_eth_pmd_priv(eth_dev);
473 /* Register up msg callbacks for PTP information */
474 roc_nix_ptp_info_cb_register(&dev->nix, cn10k_nix_ptp_info_update_cb);
479 static const struct rte_pci_id cn10k_pci_nix_map[] = {
480 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_PF),
481 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_PF),
482 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_VF),
483 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_VF),
484 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_AF_VF),
485 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_AF_VF),
491 static struct rte_pci_driver cn10k_pci_nix = {
492 .id_table = cn10k_pci_nix_map,
493 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
494 RTE_PCI_DRV_INTR_LSC,
495 .probe = cn10k_nix_probe,
496 .remove = cn10k_nix_remove,
499 RTE_PMD_REGISTER_PCI(net_cn10k, cn10k_pci_nix);
500 RTE_PMD_REGISTER_PCI_TABLE(net_cn10k, cn10k_pci_nix_map);
501 RTE_PMD_REGISTER_KMOD_DEP(net_cn10k, "vfio-pci");