X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcnxk%2Fcn9k_ethdev.c;h=17f8f6debbc88b24dd3a08709e1e5c2403624a6a;hb=295968d1740760337e16b0d7914875c5cac52850;hp=cf9f7c7fb0423aaf6ff738268dde54b2ab335443;hpb=8c009b4505e9f51c4dece87b0f0be0e7c8de3e0e;p=dpdk.git diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c index cf9f7c7fb0..17f8f6debb 100644 --- a/drivers/net/cnxk/cn9k_ethdev.c +++ b/drivers/net/cnxk/cn9k_ethdev.c @@ -15,24 +15,30 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) struct rte_eth_rxmode *rxmode = &conf->rxmode; uint16_t flags = 0; - if (rxmode->mq_mode == ETH_MQ_RX_RSS && - (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH)) + if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS && + (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) flags |= NIX_RX_OFFLOAD_RSS_F; if (dev->rx_offloads & - (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) + (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) flags |= NIX_RX_OFFLOAD_CHECKSUM_F; if (dev->rx_offloads & - (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) + (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) flags |= NIX_RX_OFFLOAD_CHECKSUM_F; - if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) flags |= NIX_RX_MULTI_SEG_F; + if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) + flags |= NIX_RX_OFFLOAD_TSTAMP_F; + if (!dev->ptype_disable) flags |= NIX_RX_OFFLOAD_PTYPE_F; + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) + flags |= NIX_RX_OFFLOAD_SECURITY_F; + return flags; } @@ -66,35 +72,41 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev) RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) != offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *)); - if (conf & DEV_TX_OFFLOAD_VLAN_INSERT || - conf & DEV_TX_OFFLOAD_QINQ_INSERT) + if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT || + conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F; - if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM || - conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) + if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM || + conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F; - if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM || - conf & DEV_TX_OFFLOAD_TCP_CKSUM || - conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM) + if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM || + conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM || + conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F; - if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F; - if (conf & DEV_TX_OFFLOAD_MULTI_SEGS) + if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) flags |= NIX_TX_MULTI_SEG_F; /* Enable Inner checksum for TSO */ - if (conf & DEV_TX_OFFLOAD_TCP_TSO) + if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO) flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F); /* Enable Inner and Outer checksum for Tunnel TSO */ - if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)) + if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F); + if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) + flags |= NIX_TX_OFFLOAD_TSTAMP_F; + + if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) + flags |= NIX_TX_OFFLOAD_SECURITY_F; + return flags; } @@ -121,10 +133,9 @@ nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq, { struct nix_send_ext_s *send_hdr_ext; struct nix_send_hdr_s *send_hdr; + struct nix_send_mem_s *send_mem; union nix_send_sg_s *sg; - RTE_SET_USED(dev); - /* Initialize the fields based on basic single segment packet */ memset(&txq->cmd, 0, sizeof(txq->cmd)); @@ -135,6 +146,23 @@ nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq, send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2]; send_hdr_ext->w0.subdc = NIX_SUBDC_EXT; + if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) { + /* Default: one seg packet would have: + * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM) + * => 8/2 - 1 = 3 + */ + send_hdr->w0.sizem1 = 3; + send_hdr_ext->w0.tstmp = 1; + + /* To calculate the offset for send_mem, + * send_hdr->w0.sizem1 * 2 + */ + send_mem = (struct nix_send_mem_s *) + (txq->cmd + (send_hdr->w0.sizem1 << 1)); + send_mem->w0.cn9k.subdc = NIX_SUBDC_MEM; + send_mem->w0.cn9k.alg = NIX_SENDMEMALG_SETTSTMP; + send_mem->addr = dev->tstamp.tx_tstamp_iova; + } sg = (union nix_send_sg_s *)&txq->cmd[4]; } else { send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0]; @@ -157,8 +185,10 @@ cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, const struct rte_eth_txconf *tx_conf) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct roc_cpt_lf *inl_lf; struct cn9k_eth_txq *txq; struct roc_nix_sq *sq; + uint16_t crypto_qid; int rc; RTE_SET_USED(socket); @@ -178,6 +208,19 @@ cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj; txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2; + /* Fetch CPT LF info for outbound if present */ + if (dev->outb.lf_base) { + crypto_qid = qid % dev->outb.nb_crypto_qs; + inl_lf = dev->outb.lf_base + crypto_qid; + + txq->cpt_io_addr = inl_lf->io_addr; + txq->cpt_fc = inl_lf->fc_addr; + txq->cpt_desc = inl_lf->nb_desc * 0.7; + txq->sa_base = (uint64_t)dev->outb.sa_base; + txq->sa_base |= eth_dev->data->port_id; + PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN); + } + nix_form_default_desc(dev, txq, qid); txq->lso_tun_fmt = dev->lso_tun_fmt; return 0; @@ -219,6 +262,7 @@ cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, rxq->wdata = cq->wdata; rxq->head = cq->head; rxq->qmask = cq->qmask; + rxq->tstamp = &dev->tstamp; /* Data offset from data to start of mbuf is first_skip */ rxq->data_off = rq->first_skip; @@ -254,9 +298,9 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev) /* Platform specific checks */ if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) && - (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && - ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || - (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) { + (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) && + ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) || + (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) { plt_err("Outer IP and SCTP checksum unsupported"); return -EINVAL; } @@ -277,10 +321,128 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev) return 0; } +/* Function to enable ptp config for VFs */ +static void +nix_ptp_enable_vf(struct rte_eth_dev *eth_dev) +{ + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + + if (nix_recalc_mtu(eth_dev)) + plt_err("Failed to set MTU size for ptp"); + + dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F; + + /* Setting up the function pointers as per new offload flags */ + cn9k_eth_set_rx_function(eth_dev); + cn9k_eth_set_tx_function(eth_dev); +} + +static uint16_t +nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts) +{ + struct cn9k_eth_rxq *rxq = queue; + struct cnxk_eth_rxq_sp *rxq_sp; + struct rte_eth_dev *eth_dev; + + RTE_SET_USED(mbufs); + RTE_SET_USED(pkts); + + rxq_sp = cnxk_eth_rxq_to_sp(rxq); + eth_dev = rxq_sp->dev->eth_dev; + nix_ptp_enable_vf(eth_dev); + + return 0; +} + +static int +cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en) +{ + struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix; + struct rte_eth_dev *eth_dev; + struct cn9k_eth_rxq *rxq; + int i; + + if (!dev) + return -EINVAL; + + eth_dev = dev->eth_dev; + if (!eth_dev) + return -EINVAL; + + dev->ptp_en = ptp_en; + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rxq = eth_dev->data->rx_queues[i]; + rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev); + } + + if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) && + !(roc_nix_is_lbk(nix))) { + /* In case of VF, setting of MTU cannot be done directly in this + * function as this is running as part of MBOX request(PF->VF) + * and MTU setting also requires MBOX message to be + * sent(VF->PF) + */ + eth_dev->rx_pkt_burst = nix_ptp_vf_burst; + rte_mb(); + } + + return 0; +} + +static int +cn9k_nix_timesync_enable(struct rte_eth_dev *eth_dev) +{ + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + int i, rc; + + rc = cnxk_nix_timesync_enable(eth_dev); + if (rc) + return rc; + + dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F; + dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F; + + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i); + + /* Setting up the rx[tx]_offload_flags due to change + * in rx[tx]_offloads. + */ + cn9k_eth_set_rx_function(eth_dev); + cn9k_eth_set_tx_function(eth_dev); + return 0; +} + +static int +cn9k_nix_timesync_disable(struct rte_eth_dev *eth_dev) +{ + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + int i, rc; + + rc = cnxk_nix_timesync_disable(eth_dev); + if (rc) + return rc; + + dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F; + dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F; + + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i); + + /* Setting up the rx[tx]_offload_flags due to change + * in rx[tx]_offloads. + */ + cn9k_eth_set_rx_function(eth_dev); + cn9k_eth_set_tx_function(eth_dev); + return 0; +} + static int cn9k_nix_dev_start(struct rte_eth_dev *eth_dev) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct roc_nix *nix = &dev->nix; int rc; /* Common eth dev start */ @@ -288,6 +450,12 @@ cn9k_nix_dev_start(struct rte_eth_dev *eth_dev) if (rc) return rc; + /* Update VF about data off shifted by 8 bytes if PTP already + * enabled in PF owning this VF + */ + if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix)))) + nix_ptp_enable_vf(eth_dev); + /* Setting up the rx[tx]_offload_flags due to change * in rx[tx]_offloads. */ @@ -316,6 +484,8 @@ nix_eth_dev_ops_override(void) cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop; cnxk_eth_dev_ops.dev_start = cn9k_nix_dev_start; cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set; + cnxk_eth_dev_ops.timesync_enable = cn9k_nix_timesync_enable; + cnxk_eth_dev_ops.timesync_disable = cn9k_nix_timesync_disable; } static void @@ -359,6 +529,8 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) nix_eth_dev_ops_override(); npc_flow_ops_override(); + cn9k_eth_sec_ops_override(); + /* Common probe */ rc = cnxk_nix_probe(pci_drv, pci_dev); if (rc) @@ -381,21 +553,24 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) * TSO not supported for earlier chip revisions */ if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) - dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO); + dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO); /* 50G and 100G to be supported for board version C0 * and above of CN9K. */ if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) { - dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G; - dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G; + dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G; + dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G; } dev->hwcap = 0; + /* Register up msg callbacks for PTP information */ + roc_nix_ptp_info_cb_register(&dev->nix, cn9k_nix_ptp_info_update_cb); + /* Update HW erratas */ if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) dev->cq_min_4k = 1;