X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcnxk%2Fcn10k_ethdev.c;h=f7eb0f437b779bd2f4fa31097570f7879983b935;hb=295968d1740760337e16b0d7914875c5cac52850;hp=b079edbd35a94dc029f73aea8dee14f4dae29fcc;hpb=5a6ce511b1f306eb915d75b69286d980d58e0e45;p=dpdk.git diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c index b079edbd35..f7eb0f437b 100644 --- a/drivers/net/cnxk/cn10k_ethdev.c +++ b/drivers/net/cnxk/cn10k_ethdev.c @@ -15,27 +15,30 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) struct rte_eth_rxmode *rxmode = &conf->rxmode; uint16_t flags = 0; - if (rxmode->mq_mode == ETH_MQ_RX_RSS && - (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH)) + if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS && + (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) flags |= NIX_RX_OFFLOAD_RSS_F; if (dev->rx_offloads & - (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) + (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) flags |= NIX_RX_OFFLOAD_CHECKSUM_F; if (dev->rx_offloads & - (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) + (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) flags |= NIX_RX_OFFLOAD_CHECKSUM_F; - if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) flags |= NIX_RX_MULTI_SEG_F; - if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) + if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) flags |= NIX_RX_OFFLOAD_TSTAMP_F; if (!dev->ptype_disable) flags |= NIX_RX_OFFLOAD_PTYPE_F; + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) + flags |= NIX_RX_OFFLOAD_SECURITY_F; + return flags; } @@ -69,38 +72,41 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev) RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) != offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *)); - if (conf & DEV_TX_OFFLOAD_VLAN_INSERT || - conf & DEV_TX_OFFLOAD_QINQ_INSERT) + if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT || + conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F; - if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM || - conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) + if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM || + conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F; - if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM || - conf & DEV_TX_OFFLOAD_TCP_CKSUM || - conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM) + if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM || + conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM || + conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F; - if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F; - if (conf & DEV_TX_OFFLOAD_MULTI_SEGS) + if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) flags |= NIX_TX_MULTI_SEG_F; /* Enable Inner checksum for TSO */ - if (conf & DEV_TX_OFFLOAD_TCP_TSO) + if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO) flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F); /* Enable Inner and Outer checksum for Tunnel TSO */ - if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)) + if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F); - if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) + if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) flags |= NIX_TX_OFFLOAD_TSTAMP_F; + if (conf & RTE_ETH_TX_OFFLOAD_SECURITY) + flags |= NIX_TX_OFFLOAD_SECURITY_F; + return flags; } @@ -181,8 +187,11 @@ cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, const struct rte_eth_txconf *tx_conf) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct roc_nix *nix = &dev->nix; + struct roc_cpt_lf *inl_lf; struct cn10k_eth_txq *txq; struct roc_nix_sq *sq; + uint16_t crypto_qid; int rc; RTE_SET_USED(socket); @@ -198,11 +207,24 @@ cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, txq = eth_dev->data->tx_queues[qid]; txq->fc_mem = sq->fc; /* Store lmt base in tx queue for easy access */ - txq->lmt_base = dev->nix.lmt_base; + txq->lmt_base = nix->lmt_base; txq->io_addr = sq->io_addr; txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj; txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2; + /* Fetch CPT LF info for outbound if present */ + if (dev->outb.lf_base) { + crypto_qid = qid % dev->outb.nb_crypto_qs; + inl_lf = dev->outb.lf_base + crypto_qid; + + txq->cpt_io_addr = inl_lf->io_addr; + txq->cpt_fc = inl_lf->fc_addr; + txq->cpt_desc = inl_lf->nb_desc * 0.7; + txq->sa_base = (uint64_t)dev->outb.sa_base; + txq->sa_base |= eth_dev->data->port_id; + PLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16)); + } + nix_form_default_desc(dev, txq, qid); txq->lso_tun_fmt = dev->lso_tun_fmt; return 0; @@ -215,6 +237,7 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, struct rte_mempool *mp) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct cnxk_eth_rxq_sp *rxq_sp; struct cn10k_eth_rxq *rxq; struct roc_nix_rq *rq; struct roc_nix_cq *cq; @@ -250,6 +273,15 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, rxq->data_off = rq->first_skip; rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev); + /* Setup security related info */ + if (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F) { + rxq->lmt_base = dev->nix.lmt_base; + rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix, + dev->inb.inl_dev); + } + rxq_sp = cnxk_eth_rxq_to_sp(rxq); + rxq->aura_handle = rxq_sp->qconf.mp->pool_id; + /* Lookup mem */ rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get(); return 0; @@ -301,7 +333,6 @@ nix_ptp_enable_vf(struct rte_eth_dev *eth_dev) if (nix_recalc_mtu(eth_dev)) plt_err("Failed to set MTU size for ptp"); - dev->scalar_ena = true; dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F; /* Setting up the function pointers as per new offload flags */ @@ -501,6 +532,8 @@ cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) nix_eth_dev_ops_override(); npc_flow_ops_override(); + cn10k_eth_sec_ops_override(); + /* Common probe */ rc = cnxk_nix_probe(pci_drv, pci_dev); if (rc) @@ -520,6 +553,11 @@ cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) dev = cnxk_eth_pmd_priv(eth_dev); + /* DROP_RE is not supported with inline IPSec for CN10K A0 */ + if (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() || + roc_model_is_cnf10kb_a0()) + dev->ipsecd_drop_re_dis = 1; + /* Register up msg callbacks for PTP information */ roc_nix_ptp_info_cb_register(&dev->nix, cn10k_nix_ptp_info_update_cb); @@ -529,10 +567,13 @@ cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) static const struct rte_pci_id cn10k_pci_nix_map[] = { CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_PF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_PF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_PF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_AF_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_AF_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_AF_VF), { .vendor_id = 0, },