1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include "cn10k_ethdev.h"
9 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
11 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
12 struct rte_eth_dev_data *data = eth_dev->data;
13 struct rte_eth_conf *conf = &data->dev_conf;
14 struct rte_eth_rxmode *rxmode = &conf->rxmode;
17 if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
18 (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
19 flags |= NIX_RX_OFFLOAD_RSS_F;
21 if (dev->rx_offloads &
22 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
23 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
25 if (dev->rx_offloads &
26 (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
27 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
29 if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
30 flags |= NIX_RX_MULTI_SEG_F;
32 if (!dev->ptype_disable)
33 flags |= NIX_RX_OFFLOAD_PTYPE_F;
39 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
41 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
42 uint64_t conf = dev->tx_offloads;
45 /* Fastpath is dependent on these enums */
46 RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
47 RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
48 RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
49 RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
50 RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
51 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
52 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
53 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
54 RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
55 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
56 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
57 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
58 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
59 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
60 offsetof(struct rte_mbuf, buf_iova) + 8);
61 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
62 offsetof(struct rte_mbuf, buf_iova) + 16);
63 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
64 offsetof(struct rte_mbuf, ol_flags) + 12);
65 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
66 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
68 if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
69 conf & DEV_TX_OFFLOAD_QINQ_INSERT)
70 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
72 if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
73 conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
74 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
76 if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
77 conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
78 conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
79 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
81 if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
82 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
84 if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
85 flags |= NIX_TX_MULTI_SEG_F;
87 /* Enable Inner checksum for TSO */
88 if (conf & DEV_TX_OFFLOAD_TCP_TSO)
89 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
91 /* Enable Inner and Outer checksum for Tunnel TSO */
92 if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
93 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
94 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
95 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
101 cn10k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
103 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
106 dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
107 dev->ptype_disable = 0;
109 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
110 dev->ptype_disable = 1;
113 cn10k_eth_set_rx_function(eth_dev);
118 nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn10k_eth_txq *txq,
121 struct nix_send_ext_s *send_hdr_ext;
122 union nix_send_hdr_w0_u send_hdr_w0;
123 union nix_send_sg_s sg_w0;
127 /* Initialize the fields based on basic single segment packet */
128 memset(&txq->cmd, 0, sizeof(txq->cmd));
132 if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
133 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
134 send_hdr_w0.sizem1 = 2;
136 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[0];
137 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
139 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
140 send_hdr_w0.sizem1 = 1;
143 send_hdr_w0.sq = qid;
144 sg_w0.subdc = NIX_SUBDC_SG;
146 sg_w0.ld_type = NIX_SENDLDTYPE_LDD;
148 txq->send_hdr_w0 = send_hdr_w0.u;
149 txq->sg_w0 = sg_w0.u;
155 cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
156 uint16_t nb_desc, unsigned int socket,
157 const struct rte_eth_txconf *tx_conf)
159 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
160 struct cn10k_eth_txq *txq;
161 struct roc_nix_sq *sq;
164 RTE_SET_USED(socket);
166 /* Common Tx queue setup */
167 rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
168 sizeof(struct cn10k_eth_txq), tx_conf);
173 /* Update fast path queue */
174 txq = eth_dev->data->tx_queues[qid];
175 txq->fc_mem = sq->fc;
176 /* Store lmt base in tx queue for easy access */
177 txq->lmt_base = dev->nix.lmt_base;
178 txq->io_addr = sq->io_addr;
179 txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
180 txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
182 nix_form_default_desc(dev, txq, qid);
183 txq->lso_tun_fmt = dev->lso_tun_fmt;
188 cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
189 uint16_t nb_desc, unsigned int socket,
190 const struct rte_eth_rxconf *rx_conf,
191 struct rte_mempool *mp)
193 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
194 struct cn10k_eth_rxq *rxq;
195 struct roc_nix_rq *rq;
196 struct roc_nix_cq *cq;
199 RTE_SET_USED(socket);
201 /* CQ Errata needs min 4K ring */
202 if (dev->cq_min_4k && nb_desc < 4096)
205 /* Common Rx queue setup */
206 rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
207 sizeof(struct cn10k_eth_rxq), rx_conf, mp);
214 /* Update fast path queue */
215 rxq = eth_dev->data->rx_queues[qid];
217 rxq->desc = (uintptr_t)cq->desc_base;
218 rxq->cq_door = cq->door;
219 rxq->cq_status = cq->status;
220 rxq->wdata = cq->wdata;
221 rxq->head = cq->head;
222 rxq->qmask = cq->qmask;
224 /* Data offset from data to start of mbuf is first_skip */
225 rxq->data_off = rq->first_skip;
226 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
229 rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
234 cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
236 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
239 rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
243 /* Clear fc cache pkts to trigger worker stop */
244 txq->fc_cache_pkts = 0;
249 cn10k_nix_configure(struct rte_eth_dev *eth_dev)
251 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
254 /* Common nix configure */
255 rc = cnxk_nix_configure(eth_dev);
259 /* Update offload flags */
260 dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
261 dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
263 plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
264 " tx_offload_flags=0x%x",
265 eth_dev->data->port_id, dev->rx_offload_flags,
266 dev->tx_offload_flags);
271 cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
273 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
276 /* Common eth dev start */
277 rc = cnxk_nix_dev_start(eth_dev);
281 /* Setting up the rx[tx]_offload_flags due to change
282 * in rx[tx]_offloads.
284 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
285 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
287 cn10k_eth_set_tx_function(eth_dev);
288 cn10k_eth_set_rx_function(eth_dev);
292 /* Update platform specific eth dev ops */
294 nix_eth_dev_ops_override(void)
296 static int init_once;
302 /* Update platform specific ops */
303 cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
304 cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
305 cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
306 cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
307 cnxk_eth_dev_ops.dev_start = cn10k_nix_dev_start;
308 cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
312 cn10k_nix_remove(struct rte_pci_device *pci_dev)
314 return cnxk_nix_remove(pci_dev);
318 cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
320 struct rte_eth_dev *eth_dev;
323 if (RTE_CACHE_LINE_SIZE != 64) {
324 plt_err("Driver not compiled for CN10K");
330 plt_err("Failed to initialize platform model, rc=%d", rc);
334 nix_eth_dev_ops_override();
337 rc = cnxk_nix_probe(pci_drv, pci_dev);
341 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
342 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
346 /* Setup callbacks for secondary process */
347 cn10k_eth_set_tx_function(eth_dev);
348 cn10k_eth_set_rx_function(eth_dev);
353 static const struct rte_pci_id cn10k_pci_nix_map[] = {
354 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_PF),
355 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_PF),
356 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_VF),
357 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_VF),
358 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_AF_VF),
359 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_AF_VF),
365 static struct rte_pci_driver cn10k_pci_nix = {
366 .id_table = cn10k_pci_nix_map,
367 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
368 RTE_PCI_DRV_INTR_LSC,
369 .probe = cn10k_nix_probe,
370 .remove = cn10k_nix_remove,
373 RTE_PMD_REGISTER_PCI(net_cn10k, cn10k_pci_nix);
374 RTE_PMD_REGISTER_PCI_TABLE(net_cn10k, cn10k_pci_nix_map);
375 RTE_PMD_REGISTER_KMOD_DEP(net_cn10k, "vfio-pci");