1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include "cn10k_ethdev.h"
5 #include "cn10k_flow.h"
10 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
12 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 struct rte_eth_dev_data *data = eth_dev->data;
14 struct rte_eth_conf *conf = &data->dev_conf;
15 struct rte_eth_rxmode *rxmode = &conf->rxmode;
18 if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
19 (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
20 flags |= NIX_RX_OFFLOAD_RSS_F;
22 if (dev->rx_offloads &
23 (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
24 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
26 if (dev->rx_offloads &
27 (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
28 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
30 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
31 flags |= NIX_RX_MULTI_SEG_F;
33 if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
34 flags |= NIX_RX_OFFLOAD_TSTAMP_F;
36 if (!dev->ptype_disable)
37 flags |= NIX_RX_OFFLOAD_PTYPE_F;
39 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
40 flags |= NIX_RX_OFFLOAD_SECURITY_F;
42 if (dev->rx_mark_update)
43 flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
49 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
51 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
52 uint64_t conf = dev->tx_offloads;
55 /* Fastpath is dependent on these enums */
56 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
57 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
58 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
59 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
60 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
61 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
62 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
63 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
64 RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
65 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
66 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
67 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
68 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
69 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
70 offsetof(struct rte_mbuf, buf_iova) + 8);
71 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
72 offsetof(struct rte_mbuf, buf_iova) + 16);
73 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
74 offsetof(struct rte_mbuf, ol_flags) + 12);
75 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
76 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
78 if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
79 conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
80 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
82 if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
83 conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
84 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
86 if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
87 conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
88 conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
89 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
91 if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
92 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
94 if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
95 flags |= NIX_TX_MULTI_SEG_F;
97 /* Enable Inner checksum for TSO */
98 if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
99 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
101 /* Enable Inner and Outer checksum for Tunnel TSO */
102 if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
103 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
104 flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
105 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
107 if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
108 flags |= NIX_TX_OFFLOAD_TSTAMP_F;
110 if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
111 flags |= NIX_TX_OFFLOAD_SECURITY_F;
114 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
120 cn10k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
122 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
125 dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
126 dev->ptype_disable = 0;
128 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
129 dev->ptype_disable = 1;
132 cn10k_eth_set_rx_function(eth_dev);
137 nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn10k_eth_txq *txq,
140 union nix_send_hdr_w0_u send_hdr_w0;
142 /* Initialize the fields based on basic single segment packet */
144 if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
145 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
146 send_hdr_w0.sizem1 = 2;
147 if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
148 /* Default: one seg packet would have:
149 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
152 send_hdr_w0.sizem1 = 3;
154 /* To calculate the offset for send_mem,
155 * send_hdr->w0.sizem1 * 2
157 txq->ts_mem = dev->tstamp.tx_tstamp_iova;
160 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
161 send_hdr_w0.sizem1 = 1;
163 send_hdr_w0.sq = qid;
164 txq->send_hdr_w0 = send_hdr_w0.u;
169 cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
170 uint16_t nb_desc, unsigned int socket,
171 const struct rte_eth_txconf *tx_conf)
173 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
174 struct roc_nix *nix = &dev->nix;
175 uint64_t mark_fmt, mark_flag;
176 struct roc_cpt_lf *inl_lf;
177 struct cn10k_eth_txq *txq;
178 struct roc_nix_sq *sq;
182 RTE_SET_USED(socket);
184 /* Common Tx queue setup */
185 rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
186 sizeof(struct cn10k_eth_txq), tx_conf);
191 /* Update fast path queue */
192 txq = eth_dev->data->tx_queues[qid];
193 txq->fc_mem = sq->fc;
194 /* Store lmt base in tx queue for easy access */
195 txq->lmt_base = nix->lmt_base;
196 txq->io_addr = sq->io_addr;
197 txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
198 txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
200 /* Fetch CPT LF info for outbound if present */
201 if (dev->outb.lf_base) {
202 crypto_qid = qid % dev->outb.nb_crypto_qs;
203 inl_lf = dev->outb.lf_base + crypto_qid;
205 txq->cpt_io_addr = inl_lf->io_addr;
206 txq->cpt_fc = inl_lf->fc_addr;
207 txq->cpt_desc = inl_lf->nb_desc * 0.7;
208 txq->sa_base = (uint64_t)dev->outb.sa_base;
209 txq->sa_base |= eth_dev->data->port_id;
210 PLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16));
213 /* Restore marking flag from roc */
214 mark_fmt = roc_nix_tm_mark_format_get(nix, &mark_flag);
215 txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
216 txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
218 nix_form_default_desc(dev, txq, qid);
219 txq->lso_tun_fmt = dev->lso_tun_fmt;
224 cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
225 uint16_t nb_desc, unsigned int socket,
226 const struct rte_eth_rxconf *rx_conf,
227 struct rte_mempool *mp)
229 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
230 struct cnxk_eth_rxq_sp *rxq_sp;
231 struct cn10k_eth_rxq *rxq;
232 struct roc_nix_rq *rq;
233 struct roc_nix_cq *cq;
236 RTE_SET_USED(socket);
238 /* CQ Errata needs min 4K ring */
239 if (dev->cq_min_4k && nb_desc < 4096)
242 /* Common Rx queue setup */
243 rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
244 sizeof(struct cn10k_eth_rxq), rx_conf, mp);
251 /* Update fast path queue */
252 rxq = eth_dev->data->rx_queues[qid];
254 rxq->desc = (uintptr_t)cq->desc_base;
255 rxq->cq_door = cq->door;
256 rxq->cq_status = cq->status;
257 rxq->wdata = cq->wdata;
258 rxq->head = cq->head;
259 rxq->qmask = cq->qmask;
260 rxq->tstamp = &dev->tstamp;
262 /* Data offset from data to start of mbuf is first_skip */
263 rxq->data_off = rq->first_skip;
264 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
266 /* Setup security related info */
267 if (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F) {
268 rxq->lmt_base = dev->nix.lmt_base;
269 rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix,
272 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
273 rxq->aura_handle = rxq_sp->qconf.mp->pool_id;
276 rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
281 cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
283 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
286 rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
290 /* Clear fc cache pkts to trigger worker stop */
291 txq->fc_cache_pkts = 0;
296 cn10k_nix_configure(struct rte_eth_dev *eth_dev)
298 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
301 /* Common nix configure */
302 rc = cnxk_nix_configure(eth_dev);
306 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
307 dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
308 /* Register callback to handle security error work */
309 roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
312 /* Update offload flags */
313 dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
314 dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
316 /* reset reassembly dynfield/flag offset */
317 dev->reass_dynfield_off = -1;
318 dev->reass_dynflag_bit = -1;
320 plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
321 " tx_offload_flags=0x%x",
322 eth_dev->data->port_id, dev->rx_offload_flags,
323 dev->tx_offload_flags);
327 /* Function to enable ptp config for VFs */
329 nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
331 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
333 if (nix_recalc_mtu(eth_dev))
334 plt_err("Failed to set MTU size for ptp");
336 dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
338 /* Setting up the function pointers as per new offload flags */
339 cn10k_eth_set_rx_function(eth_dev);
340 cn10k_eth_set_tx_function(eth_dev);
344 nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
346 struct cn10k_eth_rxq *rxq = queue;
347 struct cnxk_eth_rxq_sp *rxq_sp;
348 struct rte_eth_dev *eth_dev;
353 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
354 eth_dev = rxq_sp->dev->eth_dev;
355 nix_ptp_enable_vf(eth_dev);
361 cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
363 struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
364 struct rte_eth_dev *eth_dev;
365 struct cn10k_eth_rxq *rxq;
371 eth_dev = dev->eth_dev;
375 dev->ptp_en = ptp_en;
377 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
378 rxq = eth_dev->data->rx_queues[i];
379 rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
382 if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
383 !(roc_nix_is_lbk(nix))) {
384 /* In case of VF, setting of MTU cannot be done directly in this
385 * function as this is running as part of MBOX request(PF->VF)
386 * and MTU setting also requires MBOX message to be
389 eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
397 cn10k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
399 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
402 rc = cnxk_nix_timesync_enable(eth_dev);
406 dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
407 dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
409 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
410 nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
412 /* Setting up the rx[tx]_offload_flags due to change
413 * in rx[tx]_offloads.
415 cn10k_eth_set_rx_function(eth_dev);
416 cn10k_eth_set_tx_function(eth_dev);
421 cn10k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
423 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
426 rc = cnxk_nix_timesync_disable(eth_dev);
430 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
431 dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
433 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
434 nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
436 /* Setting up the rx[tx]_offload_flags due to change
437 * in rx[tx]_offloads.
439 cn10k_eth_set_rx_function(eth_dev);
440 cn10k_eth_set_tx_function(eth_dev);
445 cn10k_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
446 struct timespec *timestamp)
448 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
449 struct cnxk_timesync_info *tstamp = &dev->tstamp;
452 if (*tstamp->tx_tstamp == 0)
455 *tstamp->tx_tstamp = ((*tstamp->tx_tstamp >> 32) * NSEC_PER_SEC) +
456 (*tstamp->tx_tstamp & 0xFFFFFFFFUL);
457 ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp);
458 *timestamp = rte_ns_to_timespec(ns);
459 *tstamp->tx_tstamp = 0;
466 cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
468 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
469 struct roc_nix *nix = &dev->nix;
472 /* Common eth dev start */
473 rc = cnxk_nix_dev_start(eth_dev);
477 /* Update VF about data off shifted by 8 bytes if PTP already
478 * enabled in PF owning this VF
480 if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
481 nix_ptp_enable_vf(eth_dev);
483 /* Setting up the rx[tx]_offload_flags due to change
484 * in rx[tx]_offloads.
486 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
487 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
489 cn10k_eth_set_tx_function(eth_dev);
490 cn10k_eth_set_rx_function(eth_dev);
495 cn10k_nix_rx_metadata_negotiate(struct rte_eth_dev *eth_dev, uint64_t *features)
497 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
500 (RTE_ETH_RX_METADATA_USER_FLAG | RTE_ETH_RX_METADATA_USER_MARK);
503 dev->rx_offload_flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
504 dev->rx_mark_update = true;
506 dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
507 dev->rx_mark_update = false;
510 cn10k_eth_set_rx_function(eth_dev);
516 cn10k_nix_reassembly_capability_get(struct rte_eth_dev *eth_dev,
517 struct rte_eth_ip_reassembly_params *reassembly_capa)
519 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
521 RTE_SET_USED(eth_dev);
523 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
524 reassembly_capa->timeout_ms = 60 * 1000;
525 reassembly_capa->max_frags = 4;
526 reassembly_capa->flags = RTE_ETH_DEV_REASSEMBLY_F_IPV4 |
527 RTE_ETH_DEV_REASSEMBLY_F_IPV6;
535 cn10k_nix_reassembly_conf_get(struct rte_eth_dev *eth_dev,
536 struct rte_eth_ip_reassembly_params *conf)
538 RTE_SET_USED(eth_dev);
544 cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
545 const struct rte_eth_ip_reassembly_params *conf)
547 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
550 rc = roc_nix_reassembly_configure(conf->timeout_ms,
552 if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
553 dev->rx_offload_flags |= NIX_RX_REAS_F;
559 cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
560 int mark_yellow, int mark_red,
561 struct rte_tm_error *error)
563 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
564 struct roc_nix *roc_nix = &dev->nix;
565 uint64_t mark_fmt, mark_flag;
568 rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
574 mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
576 dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
579 dev->tx_mark = false;
580 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
581 dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
582 dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
585 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
586 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
588 txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
589 txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
591 cn10k_eth_set_tx_function(eth_dev);
597 cn10k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
598 int mark_yellow, int mark_red,
599 struct rte_tm_error *error)
601 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
602 struct roc_nix *roc_nix = &dev->nix;
603 uint64_t mark_fmt, mark_flag;
606 rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
611 mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
613 dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
616 dev->tx_mark = false;
617 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
618 dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
619 dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
622 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
623 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
625 txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
626 txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
628 cn10k_eth_set_tx_function(eth_dev);
634 cn10k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
635 int mark_yellow, int mark_red,
636 struct rte_tm_error *error)
638 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
639 struct roc_nix *roc_nix = &dev->nix;
640 uint64_t mark_fmt, mark_flag;
643 rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
648 mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
650 dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
653 dev->tx_mark = false;
654 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
655 dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
656 dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
659 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
660 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
662 txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
663 txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
665 cn10k_eth_set_tx_function(eth_dev);
670 /* Update platform specific eth dev ops */
672 nix_eth_dev_ops_override(void)
674 static int init_once;
680 /* Update platform specific ops */
681 cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
682 cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
683 cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
684 cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
685 cnxk_eth_dev_ops.dev_start = cn10k_nix_dev_start;
686 cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
687 cnxk_eth_dev_ops.timesync_enable = cn10k_nix_timesync_enable;
688 cnxk_eth_dev_ops.timesync_disable = cn10k_nix_timesync_disable;
689 cnxk_eth_dev_ops.rx_metadata_negotiate =
690 cn10k_nix_rx_metadata_negotiate;
691 cnxk_eth_dev_ops.timesync_read_tx_timestamp =
692 cn10k_nix_timesync_read_tx_timestamp;
693 cnxk_eth_dev_ops.ip_reassembly_capability_get =
694 cn10k_nix_reassembly_capability_get;
695 cnxk_eth_dev_ops.ip_reassembly_conf_get = cn10k_nix_reassembly_conf_get;
696 cnxk_eth_dev_ops.ip_reassembly_conf_set = cn10k_nix_reassembly_conf_set;
699 /* Update platform specific tm ops */
701 nix_tm_ops_override(void)
703 static int init_once;
709 /* Update platform specific ops */
710 cnxk_tm_ops.mark_vlan_dei = cn10k_nix_tm_mark_vlan_dei;
711 cnxk_tm_ops.mark_ip_ecn = cn10k_nix_tm_mark_ip_ecn;
712 cnxk_tm_ops.mark_ip_dscp = cn10k_nix_tm_mark_ip_dscp;
716 npc_flow_ops_override(void)
718 static int init_once;
724 /* Update platform specific ops */
725 cnxk_flow_ops.create = cn10k_flow_create;
726 cnxk_flow_ops.destroy = cn10k_flow_destroy;
730 cn10k_nix_remove(struct rte_pci_device *pci_dev)
732 return cnxk_nix_remove(pci_dev);
736 cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
738 struct rte_eth_dev *eth_dev;
739 struct cnxk_eth_dev *dev;
742 if (RTE_CACHE_LINE_SIZE != 64) {
743 plt_err("Driver not compiled for CN10K");
749 plt_err("Failed to initialize platform model, rc=%d", rc);
753 nix_eth_dev_ops_override();
754 nix_tm_ops_override();
755 npc_flow_ops_override();
757 cn10k_eth_sec_ops_override();
760 rc = cnxk_nix_probe(pci_drv, pci_dev);
764 /* Find eth dev allocated */
765 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
769 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
770 /* Setup callbacks for secondary process */
771 cn10k_eth_set_tx_function(eth_dev);
772 cn10k_eth_set_rx_function(eth_dev);
776 dev = cnxk_eth_pmd_priv(eth_dev);
778 /* DROP_RE is not supported with inline IPSec for CN10K A0 and
779 * when vector mode is enabled.
781 if ((roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
782 roc_model_is_cnf10kb_a0()) &&
783 !roc_env_is_asim()) {
784 dev->ipsecd_drop_re_dis = 1;
785 dev->vec_drop_re_dis = 1;
788 /* Register up msg callbacks for PTP information */
789 roc_nix_ptp_info_cb_register(&dev->nix, cn10k_nix_ptp_info_update_cb);
794 static const struct rte_pci_id cn10k_pci_nix_map[] = {
795 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_PF),
796 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_PF),
797 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_PF),
798 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_VF),
799 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_VF),
800 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_VF),
801 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_AF_VF),
802 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_AF_VF),
803 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_AF_VF),
809 static struct rte_pci_driver cn10k_pci_nix = {
810 .id_table = cn10k_pci_nix_map,
811 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
812 RTE_PCI_DRV_INTR_LSC,
813 .probe = cn10k_nix_probe,
814 .remove = cn10k_nix_remove,
817 RTE_PMD_REGISTER_PCI(net_cn10k, cn10k_pci_nix);
818 RTE_PMD_REGISTER_PCI_TABLE(net_cn10k, cn10k_pci_nix_map);
819 RTE_PMD_REGISTER_KMOD_DEP(net_cn10k, "vfio-pci");