X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fcnxk%2Fcnxk_ethdev.c;h=d2ee39d7ec9b63dc848b31586ebe2a8341998bc8;hb=a7c236b894a848c7bb9afb773a7e3c13615abaa8;hp=165c35477aa43c0bd9f0d21385196411ef57e566;hpb=aa898299d35035bed0c1dc94c259d2beaeb7331c;p=dpdk.git diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index 165c35477a..d2ee39d7ec 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -8,7 +8,8 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev) { uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA; - if (roc_nix_is_vf_or_sdp(&dev->nix)) + if (roc_nix_is_vf_or_sdp(&dev->nix) || + dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) capa &= ~DEV_RX_OFFLOAD_TIMESTAMP; return capa; @@ -37,6 +38,162 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev) return speed_capa; } +int +cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev) +{ + struct roc_nix *nix = &dev->nix; + + if (dev->inb.inl_dev == use_inl_dev) + return 0; + + plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!", + dev->inb.nb_sess, !!dev->inb.inl_dev); + + /* Change the mode */ + dev->inb.inl_dev = use_inl_dev; + + /* Update RoC for NPC rule insertion */ + roc_nix_inb_mode_set(nix, use_inl_dev); + + /* Setup lookup mem */ + return cnxk_nix_lookup_mem_sa_base_set(dev); +} + +static int +nix_security_setup(struct cnxk_eth_dev *dev) +{ + struct roc_nix *nix = &dev->nix; + int i, rc = 0; + + if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + /* Setup Inline Inbound */ + rc = roc_nix_inl_inb_init(nix); + if (rc) { + plt_err("Failed to initialize nix inline inb, rc=%d", + rc); + return rc; + } + + /* By default pick using inline device for poll mode. + * Will be overridden when event mode rq's are setup. + */ + cnxk_nix_inb_mode_set(dev, true); + } + + if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY || + dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + struct plt_bitmap *bmap; + size_t bmap_sz; + void *mem; + + /* Setup enough descriptors for all tx queues */ + nix->outb_nb_desc = dev->outb.nb_desc; + nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs; + + /* Setup Inline Outbound */ + rc = roc_nix_inl_outb_init(nix); + if (rc) { + plt_err("Failed to initialize nix inline outb, rc=%d", + rc); + goto cleanup; + } + + dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix); + + /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */ + if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)) + goto done; + + rc = -ENOMEM; + /* Allocate a bitmap to alloc and free sa indexes */ + bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa); + mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE); + if (mem == NULL) { + plt_err("Outbound SA bmap alloc failed"); + + rc |= roc_nix_inl_outb_fini(nix); + goto cleanup; + } + + rc = -EIO; + bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz); + if (!bmap) { + plt_err("Outbound SA bmap init failed"); + + rc |= roc_nix_inl_outb_fini(nix); + plt_free(mem); + goto cleanup; + } + + for (i = 0; i < dev->outb.max_sa; i++) + plt_bitmap_set(bmap, i); + + dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix); + dev->outb.sa_bmap_mem = mem; + dev->outb.sa_bmap = bmap; + } + +done: + return 0; +cleanup: + if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) + rc |= roc_nix_inl_inb_fini(nix); + return rc; +} + +static int +nix_security_release(struct cnxk_eth_dev *dev) +{ + struct rte_eth_dev *eth_dev = dev->eth_dev; + struct cnxk_eth_sec_sess *eth_sec, *tvar; + struct roc_nix *nix = &dev->nix; + int rc, ret = 0; + + /* Cleanup Inline inbound */ + if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + /* Destroy inbound sessions */ + tvar = NULL; + RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar) + cnxk_eth_sec_ops.session_destroy(eth_dev, + eth_sec->sess); + + /* Clear lookup mem */ + cnxk_nix_lookup_mem_sa_base_clear(dev); + + rc = roc_nix_inl_inb_fini(nix); + if (rc) + plt_err("Failed to cleanup nix inline inb, rc=%d", rc); + ret |= rc; + } + + /* Cleanup Inline outbound */ + if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY || + dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + /* Destroy outbound sessions */ + tvar = NULL; + RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar) + cnxk_eth_sec_ops.session_destroy(eth_dev, + eth_sec->sess); + + rc = roc_nix_inl_outb_fini(nix); + if (rc) + plt_err("Failed to cleanup nix inline outb, rc=%d", rc); + ret |= rc; + + plt_bitmap_free(dev->outb.sa_bmap); + plt_free(dev->outb.sa_bmap_mem); + dev->outb.sa_bmap = NULL; + dev->outb.sa_bmap_mem = NULL; + } + + dev->inb.inl_dev = false; + roc_nix_inb_mode_set(nix, false); + dev->nb_rxq_sso = 0; + dev->inb.nb_sess = 0; + dev->outb.nb_sess = 0; + return ret; +} + static void nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq) { @@ -52,29 +209,24 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq) mbp_priv = rte_mempool_get_priv(rxq->qconf.mp); buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; - if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) { + if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) { dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER; dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; } } -static int +int nix_recalc_mtu(struct rte_eth_dev *eth_dev) { struct rte_eth_dev_data *data = eth_dev->data; struct cnxk_eth_rxq_sp *rxq; - uint16_t mtu; int rc; rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1; /* Setup scatter mode if needed by jumbo */ nix_enable_mseg_on_jumbo(rxq); - /* Setup MTU based on max_rx_pkt_len */ - mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD + - CNXK_NIX_MAX_VTAG_ACT_SIZE; - - rc = cnxk_nix_mtu_set(eth_dev, mtu); + rc = cnxk_nix_mtu_set(eth_dev, data->mtu); if (rc) plt_err("Failed to set default MTU size, rc=%d", rc); @@ -120,6 +272,7 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev) /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */ if (roc_model_is_cn96_ax() && + dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG && (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) { fc_cfg.mode = (fc_cfg.mode == RTE_FC_FULL || @@ -148,7 +301,8 @@ cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev) offsetof(struct rte_mbuf, data_off) != 6); mb_def.nb_segs = 1; - mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.data_off = RTE_PKTMBUF_HEADROOM + + (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET); mb_def.port = port_id; rte_mbuf_refcnt_set(&mb_def, 1); @@ -187,10 +341,16 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, /* Free memory prior to re-allocation if needed. */ if (eth_dev->data->tx_queues[qid] != NULL) { plt_nix_dbg("Freeing memory prior to re-allocation %d", qid); - dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]); + dev_ops->tx_queue_release(eth_dev, qid); eth_dev->data->tx_queues[qid] = NULL; } + /* When Tx Security offload is enabled, increase tx desc count by + * max possible outbound desc count. + */ + if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) + nb_desc += dev->outb.nb_desc; + /* Setup ROC SQ */ sq = &dev->sqs[qid]; sq->qid = qid; @@ -215,6 +375,8 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, txq_sp->dev = dev; txq_sp->qid = qid; txq_sp->qconf.conf.tx = *tx_conf; + /* Queue config should reflect global offloads */ + txq_sp->qconf.conf.tx.offloads = dev->tx_offloads; txq_sp->qconf.nb_desc = nb_desc; plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p" @@ -229,20 +391,20 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, } static void -cnxk_nix_tx_queue_release(void *txq) +cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) { + void *txq = eth_dev->data->tx_queues[qid]; struct cnxk_eth_txq_sp *txq_sp; struct cnxk_eth_dev *dev; struct roc_nix_sq *sq; - uint16_t qid; int rc; if (!txq) return; txq_sp = cnxk_eth_txq_to_sp(txq); + dev = txq_sp->dev; - qid = txq_sp->qid; plt_nix_dbg("Releasing txq %u", qid); @@ -263,6 +425,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, struct rte_mempool *mp) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct roc_nix *nix = &dev->nix; struct cnxk_eth_rxq_sp *rxq_sp; struct rte_mempool_ops *ops; const char *platform_ops; @@ -296,10 +459,23 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; plt_nix_dbg("Freeing memory prior to re-allocation %d", qid); - dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]); + dev_ops->rx_queue_release(eth_dev, qid); eth_dev->data->rx_queues[qid] = NULL; } + /* Clam up cq limit to size of packet pool aura for LBK + * to avoid meta packet drop as LBK does not currently support + * backpressure. + */ + if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) { + uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get(); + + /* Use current RQ's aura limit if inl rq is not available */ + if (!pkt_pool_limit) + pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id); + nb_desc = RTE_MAX(nb_desc, pkt_pool_limit); + } + /* Setup ROC CQ */ cq = &dev->cqs[qid]; cq->qid = qid; @@ -325,6 +501,10 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, rq->later_skip = sizeof(struct rte_mbuf); rq->lpb_size = mp->elt_size; + /* Enable Inline IPSec on RQ, will not be used for Poll mode */ + if (roc_nix_inl_inb_is_enabled(nix)) + rq->ipsech_ena = true; + rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started); if (rc) { plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc); @@ -344,9 +524,18 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, rxq_sp->dev = dev; rxq_sp->qid = qid; rxq_sp->qconf.conf.rx = *rx_conf; + /* Queue config should reflect global offloads */ + rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads; rxq_sp->qconf.nb_desc = nb_desc; rxq_sp->qconf.mp = mp; + if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + /* Setup rq reference for inline dev if present */ + rc = roc_nix_inl_dev_rq_get(rq); + if (rc) + goto free_mem; + } + plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc, cq->nb_desc); @@ -354,7 +543,21 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, eth_dev->data->rx_queues[qid] = rxq_sp + 1; eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED; + /* Calculating delta and freq mult between PTP HI clock and tsc. + * These are needed in deriving raw clock value from tsc counter. + * read_clock eth op returns raw clock value. + */ + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) { + rc = cnxk_nix_tsc_convert(dev); + if (rc) { + plt_err("Failed to calculate delta and freq mult"); + goto rq_fini; + } + } + return 0; +free_mem: + plt_free(rxq_sp); rq_fini: rc |= roc_nix_rq_fini(rq); cq_fini: @@ -364,13 +567,13 @@ fail: } static void -cnxk_nix_rx_queue_release(void *rxq) +cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) { + void *rxq = eth_dev->data->rx_queues[qid]; struct cnxk_eth_rxq_sp *rxq_sp; struct cnxk_eth_dev *dev; struct roc_nix_rq *rq; struct roc_nix_cq *cq; - uint16_t qid; int rc; if (!rxq) @@ -378,12 +581,15 @@ cnxk_nix_rx_queue_release(void *rxq) rxq_sp = cnxk_eth_rxq_to_sp(rxq); dev = rxq_sp->dev; - qid = rxq_sp->qid; + rq = &dev->rqs[qid]; plt_nix_dbg("Releasing rxq %u", qid); + /* Release rq reference for inline dev if present */ + if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) + roc_nix_inl_dev_rq_put(rq); + /* Cleanup ROC RQ */ - rq = &dev->rqs[qid]; rc = roc_nix_rq_fini(rq); if (rc) plt_err("Failed to cleanup rq, rc=%d", rc); @@ -419,8 +625,10 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss, dev->ethdev_rss_hf = ethdev_rss; - if (ethdev_rss & ETH_RSS_L2_PAYLOAD) + if (ethdev_rss & ETH_RSS_L2_PAYLOAD && + dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) { flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B; + } if (ethdev_rss & ETH_RSS_C_VLAN) flowkey_cfg |= FLOW_KEY_TYPE_VLAN; @@ -541,7 +749,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) txq_sp = cnxk_eth_txq_to_sp(txq[i]); memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf)); tx_qconf[i].valid = true; - dev_ops->tx_queue_release(txq[i]); + dev_ops->tx_queue_release(eth_dev, i); eth_dev->data->tx_queues[i] = NULL; } @@ -555,7 +763,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]); memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf)); rx_qconf[i].valid = true; - dev_ops->rx_queue_release(rxq[i]); + dev_ops->rx_queue_release(eth_dev, i); eth_dev->data->rx_queues[i] = NULL; } @@ -577,7 +785,6 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf; struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf; int rc, i, nb_rxq, nb_txq; - void **txq, **rxq; nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues); nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues); @@ -612,9 +819,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) &tx_qconf[i].conf.tx); if (rc) { plt_err("Failed to setup tx queue rc=%d", rc); - txq = eth_dev->data->tx_queues; for (i -= 1; i >= 0; i--) - dev_ops->tx_queue_release(txq[i]); + dev_ops->tx_queue_release(eth_dev, i); goto fail; } } @@ -630,9 +836,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) rx_qconf[i].mp); if (rc) { plt_err("Failed to setup rx queue rc=%d", rc); - rxq = eth_dev->data->rx_queues; for (i -= 1; i >= 0; i--) - dev_ops->rx_queue_release(rxq[i]); + dev_ops->rx_queue_release(eth_dev, i); goto tx_queue_release; } } @@ -643,9 +848,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) return 0; tx_queue_release: - txq = eth_dev->data->tx_queues; for (i = 0; i < eth_dev->data->nb_tx_queues; i++) - dev_ops->tx_queue_release(txq[i]); + dev_ops->tx_queue_release(eth_dev, i); fail: if (tx_qconf) free(tx_qconf); @@ -787,6 +991,12 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) rc = nix_store_queue_cfg_and_then_release(eth_dev); if (rc) goto fail_configure; + + /* Cleanup security support */ + rc = nix_security_release(dev); + if (rc) + goto fail_configure; + roc_nix_tm_fini(nix); roc_nix_lf_free(nix); } @@ -805,6 +1015,15 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 | ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3); + if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT; + /* Disable drop re if rx offload security is enabled and + * platform does not support it. + */ + if (dev->ipsecd_drop_re_dis) + rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE); + } + nb_rxq = RTE_MAX(data->nb_rx_queues, 1); nb_txq = RTE_MAX(data->nb_tx_queues, 1); @@ -815,6 +1034,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) goto fail_configure; } + dev->npc.channel = roc_nix_get_base_chan(nix); + nb_rxq = data->nb_rx_queues; nb_txq = data->nb_tx_queues; rc = -ENOMEM; @@ -849,11 +1070,18 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) roc_nix_err_intr_ena_dis(nix, true); roc_nix_ras_intr_ena_dis(nix, true); - if (nix->rx_ptp_ena) { + if (nix->rx_ptp_ena && + dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) { plt_err("Both PTP and switch header enabled"); goto free_nix_lf; } + rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type); + if (rc) { + plt_err("Failed to enable switch type nix_lf rc=%d", rc); + goto free_nix_lf; + } + /* Setup LSO if needed */ rc = nix_lso_fmt_setup(dev); if (rc) { @@ -932,6 +1160,12 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) plt_err("Failed to initialize flow control rc=%d", rc); goto cq_fini; } + + /* Setup Inline security support */ + rc = nix_security_setup(dev); + if (rc) + goto cq_fini; + /* * Restore queue config when reconfigure followed by * reconfigure and no queue configure invoked from application case. @@ -939,7 +1173,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) if (dev->configured == 1) { rc = nix_restore_queue_cfg(eth_dev); if (rc) - goto cq_fini; + goto sec_release; } /* Update the mac address */ @@ -961,6 +1195,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) dev->nb_txq = data->nb_tx_queues; return 0; +sec_release: + rc |= nix_security_release(dev); cq_fini: roc_nix_unregister_cq_irqs(nix); q_irq_fini: @@ -1070,6 +1306,7 @@ cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev) struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; struct rte_mbuf *rx_pkts[32]; + struct rte_eth_link link; int count, i, j, rc; void *rxq; @@ -1102,6 +1339,10 @@ cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev) for (i = 0; i < eth_dev->data->nb_tx_queues; i++) dev_ops->tx_queue_stop(eth_dev, i); + /* Bring down link status internally */ + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(eth_dev, &link); + return 0; } @@ -1111,7 +1352,7 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev) struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); int rc, i; - if (eth_dev->data->nb_rx_queues != 0) { + if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) { rc = nix_recalc_mtu(eth_dev); if (rc) return rc; @@ -1156,6 +1397,25 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev) } } + /* Enable PTP if it is requested by the user or already + * enabled on PF owning this VF + */ + memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info)); + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) + cnxk_eth_dev_ops.timesync_enable(eth_dev); + else + cnxk_eth_dev_ops.timesync_disable(eth_dev); + + if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) { + rc = rte_mbuf_dyn_rx_timestamp_register + (&dev->tstamp.tstamp_dynfield_offset, + &dev->tstamp.rx_tstamp_dynflag); + if (rc != 0) { + plt_err("Failed to register Rx timestamp field/flag"); + goto rx_disable; + } + } + cnxk_nix_toggle_flag_link_cfg(dev, false); return 0; @@ -1166,6 +1426,9 @@ rx_disable: return rc; } +static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev); +static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev); + /* CNXK platform independent eth dev ops */ struct eth_dev_ops cnxk_eth_dev_ops = { .mtu_set = cnxk_nix_mtu_set, @@ -1177,6 +1440,8 @@ struct eth_dev_ops cnxk_eth_dev_ops = { .tx_queue_release = cnxk_nix_tx_queue_release, .rx_queue_release = cnxk_nix_rx_queue_release, .dev_stop = cnxk_nix_dev_stop, + .dev_close = cnxk_nix_dev_close, + .dev_reset = cnxk_nix_dev_reset, .tx_queue_start = cnxk_nix_tx_queue_start, .rx_queue_start = cnxk_nix_rx_queue_start, .rx_queue_stop = cnxk_nix_rx_queue_stop, @@ -1193,18 +1458,62 @@ struct eth_dev_ops cnxk_eth_dev_ops = { .dev_set_link_down = cnxk_nix_set_link_down, .get_module_info = cnxk_nix_get_module_info, .get_module_eeprom = cnxk_nix_get_module_eeprom, + .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable, + .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable, + .pool_ops_supported = cnxk_nix_pool_ops_supported, + .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping, + .stats_get = cnxk_nix_stats_get, + .stats_reset = cnxk_nix_stats_reset, + .xstats_get = cnxk_nix_xstats_get, + .xstats_get_names = cnxk_nix_xstats_get_names, + .xstats_reset = cnxk_nix_xstats_reset, + .xstats_get_by_id = cnxk_nix_xstats_get_by_id, + .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id, + .fw_version_get = cnxk_nix_fw_version_get, + .rxq_info_get = cnxk_nix_rxq_info_get, + .txq_info_get = cnxk_nix_txq_info_get, + .tx_done_cleanup = cnxk_nix_tx_done_cleanup, + .flow_ops_get = cnxk_nix_flow_ops_get, + .get_reg = cnxk_nix_dev_get_reg, + .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp, + .timesync_read_time = cnxk_nix_timesync_read_time, + .timesync_write_time = cnxk_nix_timesync_write_time, + .timesync_adjust_time = cnxk_nix_timesync_adjust_time, + .read_clock = cnxk_nix_read_clock, + .reta_update = cnxk_nix_reta_update, + .reta_query = cnxk_nix_reta_query, + .rss_hash_update = cnxk_nix_rss_hash_update, + .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get, + .set_mc_addr_list = cnxk_nix_mc_addr_list_configure, + .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit, + .tm_ops_get = cnxk_nix_tm_ops_get, + .mtr_ops_get = cnxk_nix_mtr_ops_get, }; static int cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct rte_security_ctx *sec_ctx; struct roc_nix *nix = &dev->nix; struct rte_pci_device *pci_dev; int rc, max_entries; eth_dev->dev_ops = &cnxk_eth_dev_ops; + /* Alloc security context */ + sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0); + if (!sec_ctx) + return -ENOMEM; + sec_ctx->device = eth_dev; + sec_ctx->ops = &cnxk_eth_sec_ops; + sec_ctx->flags = + (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA); + eth_dev->security_ctx = sec_ctx; + TAILQ_INIT(&dev->inb.list); + TAILQ_INIT(&dev->outb.list); + /* For secondary processes, the primary has done all the work */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -1221,6 +1530,7 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) /* Initialize base roc nix */ nix->pci_dev = pci_dev; + nix->hw_vlan_ins = true; rc = roc_nix_dev_init(nix); if (rc) { plt_err("Failed to initialize roc nix rc=%d", rc); @@ -1230,6 +1540,10 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) /* Register up msg callbacks */ roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb); + /* Register up msg callbacks */ + roc_nix_mac_link_info_get_cb_register(nix, + cnxk_eth_dev_link_status_get_cb); + dev->eth_dev = eth_dev; dev->configured = 0; dev->ptype_disable = 0; @@ -1257,6 +1571,7 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) } dev->max_mac_entries = max_entries; + dev->dmac_filter_count = 1; /* Get mac address */ rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr); @@ -1286,6 +1601,11 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) dev->speed_capa = nix_get_speed_capa(dev); /* Initialize roc npc */ + dev->npc.roc_nix = nix; + rc = roc_npc_init(&dev->npc); + if (rc) + goto free_mac_addrs; + plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64, eth_dev->data->port_id, roc_nix_get_pf(nix), @@ -1303,13 +1623,16 @@ error: } static int -cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) +cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; struct roc_nix *nix = &dev->nix; int rc, i; + plt_free(eth_dev->security_ctx); + eth_dev->security_ctx = NULL; + /* Nothing to be done for secondary processes */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -1319,23 +1642,34 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) roc_nix_npc_rx_ena_dis(nix, false); + /* Disable and free rte_flow entries */ + roc_npc_fini(&dev->npc); + /* Disable link status events */ roc_nix_mac_link_event_start_stop(nix, false); + /* Unregister the link update op, this is required to stop VFs from + * receiving link status updates on exit path. + */ + roc_nix_mac_link_cb_unregister(nix); + /* Free up SQs */ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { - dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]); + dev_ops->tx_queue_release(eth_dev, i); eth_dev->data->tx_queues[i] = NULL; } eth_dev->data->nb_tx_queues = 0; /* Free up RQ's and CQ's */ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { - dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]); + dev_ops->rx_queue_release(eth_dev, i); eth_dev->data->rx_queues[i] = NULL; } eth_dev->data->nb_rx_queues = 0; + /* Free security resources */ + nix_security_release(dev); + /* Free tm resources */ roc_nix_tm_fini(nix); @@ -1357,14 +1691,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; - /* Check if mbox close is needed */ - if (!mbox_close) - return 0; - rc = roc_nix_dev_fini(nix); /* Can be freed later by PMD if NPA LF is in use */ if (rc == -EAGAIN) { - eth_dev->data->dev_private = NULL; + if (!reset) + eth_dev->data->dev_private = NULL; return 0; } else if (rc) { plt_err("Failed in nix dev fini, rc=%d", rc); @@ -1373,6 +1704,25 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) return rc; } +static int +cnxk_nix_dev_close(struct rte_eth_dev *eth_dev) +{ + cnxk_eth_dev_uninit(eth_dev, false); + return 0; +} + +static int +cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev) +{ + int rc; + + rc = cnxk_eth_dev_uninit(eth_dev, true); + if (rc) + return rc; + + return cnxk_eth_dev_init(eth_dev); +} + int cnxk_nix_remove(struct rte_pci_device *pci_dev) { @@ -1383,7 +1733,7 @@ cnxk_nix_remove(struct rte_pci_device *pci_dev) eth_dev = rte_eth_dev_allocated(pci_dev->device.name); if (eth_dev) { /* Cleanup eth dev */ - rc = cnxk_eth_dev_uninit(eth_dev, true); + rc = cnxk_eth_dev_uninit(eth_dev, false); if (rc) return rc;