1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include "cn9k_ethdev.h"
5 #include "cn9k_rte_flow.h"
10 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
12 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 struct rte_eth_dev_data *data = eth_dev->data;
14 struct rte_eth_conf *conf = &data->dev_conf;
15 struct rte_eth_rxmode *rxmode = &conf->rxmode;
18 if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
19 (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
20 flags |= NIX_RX_OFFLOAD_RSS_F;
22 if (dev->rx_offloads &
23 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
24 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
26 if (dev->rx_offloads &
27 (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
28 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
30 if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
31 flags |= NIX_RX_MULTI_SEG_F;
33 if (!dev->ptype_disable)
34 flags |= NIX_RX_OFFLOAD_PTYPE_F;
40 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
42 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
43 uint64_t conf = dev->tx_offloads;
46 /* Fastpath is dependent on these enums */
47 RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
48 RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
49 RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
50 RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
51 RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
52 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
53 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
54 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
55 RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
56 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
57 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
58 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
59 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
60 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
61 offsetof(struct rte_mbuf, buf_iova) + 8);
62 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
63 offsetof(struct rte_mbuf, buf_iova) + 16);
64 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
65 offsetof(struct rte_mbuf, ol_flags) + 12);
66 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
67 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
69 if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
70 conf & DEV_TX_OFFLOAD_QINQ_INSERT)
71 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
73 if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
74 conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
75 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
77 if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
78 conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
79 conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
80 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
82 if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
83 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
85 if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
86 flags |= NIX_TX_MULTI_SEG_F;
88 /* Enable Inner checksum for TSO */
89 if (conf & DEV_TX_OFFLOAD_TCP_TSO)
90 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
92 /* Enable Inner and Outer checksum for Tunnel TSO */
93 if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
94 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
95 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
96 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
102 cn9k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
104 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
107 dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
108 dev->ptype_disable = 0;
110 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
111 dev->ptype_disable = 1;
114 cn9k_eth_set_rx_function(eth_dev);
119 nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq,
122 struct nix_send_ext_s *send_hdr_ext;
123 struct nix_send_hdr_s *send_hdr;
124 union nix_send_sg_s *sg;
128 /* Initialize the fields based on basic single segment packet */
129 memset(&txq->cmd, 0, sizeof(txq->cmd));
131 if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
132 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
133 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
134 send_hdr->w0.sizem1 = 2;
136 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
137 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
138 sg = (union nix_send_sg_s *)&txq->cmd[4];
140 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
141 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
142 send_hdr->w0.sizem1 = 1;
143 sg = (union nix_send_sg_s *)&txq->cmd[2];
146 send_hdr->w0.sq = qid;
147 sg->subdc = NIX_SUBDC_SG;
149 sg->ld_type = NIX_SENDLDTYPE_LDD;
155 cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
156 uint16_t nb_desc, unsigned int socket,
157 const struct rte_eth_txconf *tx_conf)
159 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
160 struct cn9k_eth_txq *txq;
161 struct roc_nix_sq *sq;
164 RTE_SET_USED(socket);
166 /* Common Tx queue setup */
167 rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
168 sizeof(struct cn9k_eth_txq), tx_conf);
173 /* Update fast path queue */
174 txq = eth_dev->data->tx_queues[qid];
175 txq->fc_mem = sq->fc;
176 txq->lmt_addr = sq->lmt_addr;
177 txq->io_addr = sq->io_addr;
178 txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
179 txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
181 nix_form_default_desc(dev, txq, qid);
182 txq->lso_tun_fmt = dev->lso_tun_fmt;
187 cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
188 uint16_t nb_desc, unsigned int socket,
189 const struct rte_eth_rxconf *rx_conf,
190 struct rte_mempool *mp)
192 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
193 struct cn9k_eth_rxq *rxq;
194 struct roc_nix_rq *rq;
195 struct roc_nix_cq *cq;
198 RTE_SET_USED(socket);
200 /* CQ Errata needs min 4K ring */
201 if (dev->cq_min_4k && nb_desc < 4096)
204 /* Common Rx queue setup */
205 rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
206 sizeof(struct cn9k_eth_rxq), rx_conf, mp);
213 /* Update fast path queue */
214 rxq = eth_dev->data->rx_queues[qid];
216 rxq->desc = (uintptr_t)cq->desc_base;
217 rxq->cq_door = cq->door;
218 rxq->cq_status = cq->status;
219 rxq->wdata = cq->wdata;
220 rxq->head = cq->head;
221 rxq->qmask = cq->qmask;
223 /* Data offset from data to start of mbuf is first_skip */
224 rxq->data_off = rq->first_skip;
225 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
228 rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
233 cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
235 struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
238 rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
242 /* Clear fc cache pkts to trigger worker stop */
243 txq->fc_cache_pkts = 0;
248 cn9k_nix_configure(struct rte_eth_dev *eth_dev)
250 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
251 struct rte_eth_conf *conf = ð_dev->data->dev_conf;
252 struct rte_eth_txmode *txmode = &conf->txmode;
255 /* Platform specific checks */
256 if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
257 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
258 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
259 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
260 plt_err("Outer IP and SCTP checksum unsupported");
264 /* Common nix configure */
265 rc = cnxk_nix_configure(eth_dev);
269 /* Update offload flags */
270 dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
271 dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
273 plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
274 " tx_offload_flags=0x%x",
275 eth_dev->data->port_id, dev->rx_offload_flags,
276 dev->tx_offload_flags);
280 /* Function to enable ptp config for VFs */
282 nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
284 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
286 if (nix_recalc_mtu(eth_dev))
287 plt_err("Failed to set MTU size for ptp");
289 dev->scalar_ena = true;
290 dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
292 /* Setting up the function pointers as per new offload flags */
293 cn9k_eth_set_rx_function(eth_dev);
294 cn9k_eth_set_tx_function(eth_dev);
298 nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
300 struct cn9k_eth_rxq *rxq = queue;
301 struct cnxk_eth_rxq_sp *rxq_sp;
302 struct rte_eth_dev *eth_dev;
307 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
308 eth_dev = rxq_sp->dev->eth_dev;
309 nix_ptp_enable_vf(eth_dev);
315 cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
317 struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
318 struct rte_eth_dev *eth_dev;
319 struct cn9k_eth_rxq *rxq;
325 eth_dev = dev->eth_dev;
329 dev->ptp_en = ptp_en;
331 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
332 rxq = eth_dev->data->rx_queues[i];
333 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
336 if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
337 !(roc_nix_is_lbk(nix))) {
338 /* In case of VF, setting of MTU cannot be done directly in this
339 * function as this is running as part of MBOX request(PF->VF)
340 * and MTU setting also requires MBOX message to be
343 eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
351 cn9k_nix_dev_start(struct rte_eth_dev *eth_dev)
353 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
356 /* Common eth dev start */
357 rc = cnxk_nix_dev_start(eth_dev);
361 /* Setting up the rx[tx]_offload_flags due to change
362 * in rx[tx]_offloads.
364 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
365 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
367 cn9k_eth_set_tx_function(eth_dev);
368 cn9k_eth_set_rx_function(eth_dev);
372 /* Update platform specific eth dev ops */
374 nix_eth_dev_ops_override(void)
376 static int init_once;
382 /* Update platform specific ops */
383 cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
384 cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
385 cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
386 cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop;
387 cnxk_eth_dev_ops.dev_start = cn9k_nix_dev_start;
388 cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set;
392 npc_flow_ops_override(void)
394 static int init_once;
400 /* Update platform specific ops */
401 cnxk_flow_ops.create = cn9k_flow_create;
402 cnxk_flow_ops.destroy = cn9k_flow_destroy;
406 cn9k_nix_remove(struct rte_pci_device *pci_dev)
408 return cnxk_nix_remove(pci_dev);
412 cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
414 struct rte_eth_dev *eth_dev;
415 struct cnxk_eth_dev *dev;
418 if (RTE_CACHE_LINE_SIZE != 128) {
419 plt_err("Driver not compiled for CN9K");
425 plt_err("Failed to initialize platform model, rc=%d", rc);
429 nix_eth_dev_ops_override();
430 npc_flow_ops_override();
433 rc = cnxk_nix_probe(pci_drv, pci_dev);
437 /* Find eth dev allocated */
438 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
442 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
443 /* Setup callbacks for secondary process */
444 cn9k_eth_set_tx_function(eth_dev);
445 cn9k_eth_set_rx_function(eth_dev);
449 dev = cnxk_eth_pmd_priv(eth_dev);
450 /* Update capabilities already set for TSO.
451 * TSO not supported for earlier chip revisions
453 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
454 dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
455 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
456 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
457 DEV_TX_OFFLOAD_GRE_TNL_TSO);
459 /* 50G and 100G to be supported for board version C0
462 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
463 dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
464 dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
469 /* Register up msg callbacks for PTP information */
470 roc_nix_ptp_info_cb_register(&dev->nix, cn9k_nix_ptp_info_update_cb);
472 /* Update HW erratas */
473 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
478 static const struct rte_pci_id cn9k_pci_nix_map[] = {
484 static struct rte_pci_driver cn9k_pci_nix = {
485 .id_table = cn9k_pci_nix_map,
486 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
487 RTE_PCI_DRV_INTR_LSC,
488 .probe = cn9k_nix_probe,
489 .remove = cn9k_nix_remove,
492 RTE_PMD_REGISTER_PCI(net_cn9k, cn9k_pci_nix);
493 RTE_PMD_REGISTER_PCI_TABLE(net_cn9k, cn9k_pci_nix_map);
494 RTE_PMD_REGISTER_KMOD_DEP(net_cn9k, "vfio-pci");