X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Focteontx2%2Fotx2_ethdev.c;h=75d4cabf2e7c184f5c4e498d0c7d328ae9faafe0;hb=b37ed6def36798342172f298516c5fc6d0d8e070;hp=1a6afedf59fdbea5197fb6e4a0376d12abe16851;hpb=3b635472a99844ea47e0dbe9231294ccd6c07f85;p=dpdk.git diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c index 1a6afedf59..75d4cabf2e 100644 --- a/drivers/net/octeontx2/otx2_ethdev.c +++ b/drivers/net/octeontx2/otx2_ethdev.c @@ -4,7 +4,7 @@ #include -#include +#include #include #include #include @@ -12,13 +12,15 @@ #include #include "otx2_ethdev.h" +#include "otx2_ethdev_sec.h" static inline uint64_t nix_get_rx_offload_capa(struct otx2_eth_dev *dev) { uint64_t capa = NIX_RX_OFFLOAD_CAPA; - if (otx2_dev_is_vf(dev)) + if (otx2_dev_is_vf(dev) || + dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) capa &= ~DEV_RX_OFFLOAD_TIMESTAMP; return capa; @@ -33,13 +35,15 @@ nix_get_tx_offload_capa(struct otx2_eth_dev *dev) if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev)) capa &= ~(DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO); return capa; } static const struct otx2_dev_ops otx2_dev_ops = { .link_status_update = otx2_eth_dev_link_status_update, - .ptp_info_update = otx2_eth_dev_ptp_info_update + .ptp_info_update = otx2_eth_dev_ptp_info_update, + .link_status_get = otx2_eth_dev_link_status_get, }; static int @@ -67,7 +71,15 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq) req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */); req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */); } - req->rx_cfg |= BIT_ULL(32 /* DROP_RE */); + req->rx_cfg |= (BIT_ULL(32 /* DROP_RE */) | + BIT_ULL(33 /* Outer L2 Length */) | + BIT_ULL(38 /* Inner L4 UDP Length */) | + BIT_ULL(39 /* Inner L3 Length */) | + BIT_ULL(40 /* Outer L4 UDP Length */) | + BIT_ULL(41 /* Outer L3 Length */)); + + if (dev->rss_tag_as_xor == 0) + req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER; rc = otx2_mbox_process_msg(mbox, (void *)&rsp); if (rc) @@ -85,10 +97,60 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq) dev->cints = rsp->cints; dev->qints = rsp->qints; dev->npc_flow.channel = dev->rx_chan_base; + dev->ptp_en = rsp->hw_rx_tstamp_en; return 0; } +static int +nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable) +{ + struct otx2_mbox *mbox = dev->mbox; + struct npc_set_pkind *req; + struct msg_resp *rsp; + int rc; + + if (dev->npc_flow.switch_header_type == 0) + return 0; + + /* Notify AF about higig2 config */ + req = otx2_mbox_alloc_msg_npc_set_pkind(mbox); + req->mode = dev->npc_flow.switch_header_type; + if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) { + req->mode = OTX2_PRIV_FLAGS_CUSTOM; + req->pkind = NPC_RX_CHLEN90B_PKIND; + } else if (dev->npc_flow.switch_header_type == + OTX2_PRIV_FLAGS_CH_LEN_24B) { + req->mode = OTX2_PRIV_FLAGS_CUSTOM; + req->pkind = NPC_RX_CHLEN24B_PKIND; + } else if (dev->npc_flow.switch_header_type == + OTX2_PRIV_FLAGS_EXDSA) { + req->mode = OTX2_PRIV_FLAGS_CUSTOM; + req->pkind = NPC_RX_EXDSA_PKIND; + } else if (dev->npc_flow.switch_header_type == + OTX2_PRIV_FLAGS_VLAN_EXDSA) { + req->mode = OTX2_PRIV_FLAGS_CUSTOM; + req->pkind = NPC_RX_VLAN_EXDSA_PKIND; + } + + if (enable == 0) + req->mode = OTX2_PRIV_FLAGS_DEFAULT; + req->dir = PKIND_RX; + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + req = otx2_mbox_alloc_msg_npc_set_pkind(mbox); + req->mode = dev->npc_flow.switch_header_type; + if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B || + dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_24B) + req->mode = OTX2_PRIV_FLAGS_DEFAULT; + + if (enable == 0) + req->mode = OTX2_PRIV_FLAGS_DEFAULT; + req->dir = PKIND_TX; + return otx2_mbox_process_msg(mbox, (void *)&rsp); +} + static int nix_lf_free(struct otx2_eth_dev *dev) { @@ -119,7 +181,7 @@ otx2_cgx_rxtx_start(struct otx2_eth_dev *dev) { struct otx2_mbox *mbox = dev->mbox; - if (otx2_dev_is_vf(dev)) + if (otx2_dev_is_vf_or_sdp(dev)) return 0; otx2_mbox_alloc_msg_cgx_start_rxtx(mbox); @@ -132,7 +194,7 @@ otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev) { struct otx2_mbox *mbox = dev->mbox; - if (otx2_dev_is_vf(dev)) + if (otx2_dev_is_vf_or_sdp(dev)) return 0; otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox); @@ -165,7 +227,7 @@ nix_cgx_start_link_event(struct otx2_eth_dev *dev) { struct otx2_mbox *mbox = dev->mbox; - if (otx2_dev_is_vf(dev)) + if (otx2_dev_is_vf_or_sdp(dev)) return 0; otx2_mbox_alloc_msg_cgx_start_linkevents(mbox); @@ -178,8 +240,8 @@ cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en) { struct otx2_mbox *mbox = dev->mbox; - if (otx2_dev_is_vf(dev)) - return 0; + if (en && otx2_dev_is_vf_or_sdp(dev)) + return -ENOTSUP; if (en) otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox); @@ -194,7 +256,7 @@ nix_cgx_stop_link_event(struct otx2_eth_dev *dev) { struct otx2_mbox *mbox = dev->mbox; - if (otx2_dev_is_vf(dev)) + if (otx2_dev_is_vf_or_sdp(dev)) return 0; otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox); @@ -252,8 +314,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, NIX_CQ_ALIGN, dev->node); if (rz == NULL) { otx2_err("Failed to allocate mem for cq hw ring"); - rc = -ENOMEM; - goto fail; + return -ENOMEM; } memset(rz->addr, 0, rz->len); rxq->desc = (uintptr_t)rz->addr; @@ -302,7 +363,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, rc = otx2_mbox_process(mbox); if (rc) { otx2_err("Failed to init cq context"); - goto fail; + return rc; } aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); @@ -311,6 +372,10 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, aq->op = NIX_AQ_INSTOP_INIT; aq->rq.sso_ena = 0; + + if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY) + aq->rq.ipsech_ena = 1; + aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */ aq->rq.spb_ena = 0; aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id); @@ -323,10 +388,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, aq->rq.first_skip = first_skip; aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8); aq->rq.flow_tagw = 32; /* 32-bits */ - aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp); - aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp); - aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf); - aq->rq.lpb_sizem1 /= 8; + aq->rq.lpb_sizem1 = mp->elt_size / 8; aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ aq->rq.ena = 1; aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ @@ -340,12 +402,44 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, rc = otx2_mbox_process(mbox); if (rc) { otx2_err("Failed to init rq context"); - goto fail; + return rc; + } + + if (dev->lock_rx_ctx) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = qid; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_LOCK; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) { + otx2_err("Failed to LOCK cq context"); + return rc; + } + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + otx2_err("Failed to LOCK rq context"); + return -ENOMEM; + } + } + aq->qidx = qid; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_LOCK; + rc = otx2_mbox_process(mbox); + if (rc < 0) { + otx2_err("Failed to LOCK rq context"); + return rc; + } } return 0; -fail: - return rc; } static int @@ -392,6 +486,40 @@ nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq) return rc; } + if (dev->lock_rx_ctx) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = rxq->rq; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_UNLOCK; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) { + otx2_err("Failed to UNLOCK cq context"); + return rc; + } + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + otx2_err("Failed to UNLOCK rq context"); + return -ENOMEM; + } + } + aq->qidx = rxq->rq; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_UNLOCK; + rc = otx2_mbox_process(mbox); + if (rc < 0) { + otx2_err("Failed to UNLOCK rq context"); + return rc; + } + } + return 0; } @@ -481,6 +609,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq, if (eth_dev->data->rx_queues[rq] != NULL) { otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq); otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]); + rte_eth_dma_zone_free(eth_dev, "cq", rq); eth_dev->data->rx_queues[rq] = NULL; } @@ -544,6 +673,9 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq, } } + /* Setup scatter mode if needed by jumbo */ + otx2_nix_enable_mseg_on_jumbo(rxq); + return 0; free_rxq: @@ -574,7 +706,8 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) struct rte_eth_rxmode *rxmode = &conf->rxmode; uint16_t flags = 0; - if (rxmode->mq_mode == ETH_MQ_RX_RSS) + if (rxmode->mq_mode == ETH_MQ_RX_RSS && + (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH)) flags |= NIX_RX_OFFLOAD_RSS_F; if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM | @@ -595,6 +728,12 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) flags |= NIX_RX_OFFLOAD_TSTAMP_F; + if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) + flags |= NIX_RX_OFFLOAD_SECURITY_F; + + if (!dev->ptype_disable) + flags |= NIX_RX_OFFLOAD_PTYPE_F; + return flags; } @@ -655,17 +794,136 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev) /* Enable Inner and Outer checksum for Tunnel TSO */ if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO)) flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F); + if (conf & DEV_TX_OFFLOAD_SECURITY) + flags |= NIX_TX_OFFLOAD_SECURITY_F; + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) flags |= NIX_TX_OFFLOAD_TSTAMP_F; return flags; } +static int +nix_sqb_lock(struct rte_mempool *mp) +{ + struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf; + struct npa_aq_enq_req *req; + int rc; + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id); + req->ctype = NPA_AQ_CTYPE_AURA; + req->op = NPA_AQ_INSTOP_LOCK; + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + if (!req) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + otx2_mbox_msg_send(npa_lf->mbox, 0); + rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0); + if (rc < 0) { + otx2_err("Failed to LOCK AURA context"); + return rc; + } + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + if (!req) { + otx2_err("Failed to LOCK POOL context"); + return -ENOMEM; + } + } + + req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id); + req->ctype = NPA_AQ_CTYPE_POOL; + req->op = NPA_AQ_INSTOP_LOCK; + + rc = otx2_mbox_process(npa_lf->mbox); + if (rc < 0) { + otx2_err("Unable to lock POOL in NDC"); + return rc; + } + + return 0; +} + +static int +nix_sqb_unlock(struct rte_mempool *mp) +{ + struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf; + struct npa_aq_enq_req *req; + int rc; + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id); + req->ctype = NPA_AQ_CTYPE_AURA; + req->op = NPA_AQ_INSTOP_UNLOCK; + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + if (!req) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + otx2_mbox_msg_send(npa_lf->mbox, 0); + rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0); + if (rc < 0) { + otx2_err("Failed to UNLOCK AURA context"); + return rc; + } + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + if (!req) { + otx2_err("Failed to UNLOCK POOL context"); + return -ENOMEM; + } + } + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id); + req->ctype = NPA_AQ_CTYPE_POOL; + req->op = NPA_AQ_INSTOP_UNLOCK; + + rc = otx2_mbox_process(npa_lf->mbox); + if (rc < 0) { + otx2_err("Unable to UNLOCK AURA in NDC"); + return rc; + } + + return 0; +} + +void +otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq) +{ + struct rte_pktmbuf_pool_private *mbp_priv; + struct rte_eth_dev *eth_dev; + struct otx2_eth_dev *dev; + uint32_t buffsz; + + eth_dev = rxq->eth_dev; + dev = otx2_eth_pmd_priv(eth_dev); + + /* Get rx buffer size */ + mbp_priv = rte_mempool_get_priv(rxq->pool); + buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; + + if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) { + dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER; + dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + + /* Setting up the rx[tx]_offload_flags due to change + * in rx[tx]_offloads. + */ + dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev); + dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev); + } +} + static int nix_sq_init(struct otx2_eth_txq *txq) { @@ -708,7 +966,20 @@ nix_sq_init(struct otx2_eth_txq *txq) /* Many to one reduction */ sq->sq.qint_idx = txq->sq % dev->qints; - return otx2_mbox_process(mbox); + rc = otx2_mbox_process(mbox); + if (rc < 0) + return rc; + + if (dev->lock_tx_ctx) { + sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + sq->qidx = txq->sq; + sq->ctype = NIX_AQ_CTYPE_SQ; + sq->op = NIX_AQ_INSTOP_LOCK; + + rc = otx2_mbox_process(mbox); + } + + return rc; } static int @@ -751,6 +1022,20 @@ nix_sq_uninit(struct otx2_eth_txq *txq) if (rc) return rc; + if (dev->lock_tx_ctx) { + /* Unlock sq */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = txq->sq; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_UNLOCK; + + rc = otx2_mbox_process(mbox); + if (rc < 0) + return rc; + + nix_sqb_unlock(txq->sqb_pool); + } + /* Read SQ and free sqb's */ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); aq->qidx = txq->sq; @@ -872,6 +1157,8 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc) } nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs); + if (dev->lock_tx_ctx) + nix_sqb_lock(txq->sqb_pool); return 0; fail: @@ -943,7 +1230,7 @@ otx2_nix_tx_queue_release(void *_txq) otx2_nix_dbg("Releasing txq %u", txq->sq); /* Flush and disable tm */ - otx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started); + otx2_nix_sq_flush_pre(txq, eth_dev->data->dev_started); /* Free sqb's and disable sq */ nix_sq_uninit(txq); @@ -952,6 +1239,7 @@ otx2_nix_tx_queue_release(void *_txq) rte_mempool_free(txq->sqb_pool); txq->sqb_pool = NULL; } + otx2_nix_sq_flush_post(txq); rte_free(txq); } @@ -1039,6 +1327,7 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq, txq->qconf.nb_desc = nb_desc; memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf)); + txq->lso_tun_fmt = dev->lso_tun_fmt; otx2_nix_form_default_desc(txq); otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 "" @@ -1083,10 +1372,12 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues; for (i = 0; i < nb_txq; i++) { if (txq[i] == NULL) { - otx2_err("txq[%d] is already released", i); - goto fail; + tx_qconf[i].valid = false; + otx2_info("txq[%d] is already released", i); + continue; } memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf)); + tx_qconf[i].valid = true; otx2_nix_tx_queue_release(txq[i]); eth_dev->data->tx_queues[i] = NULL; } @@ -1094,10 +1385,12 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues; for (i = 0; i < nb_rxq; i++) { if (rxq[i] == NULL) { - otx2_err("rxq[%d] is already released", i); - goto fail; + rx_qconf[i].valid = false; + otx2_info("rxq[%d] is already released", i); + continue; } memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf)); + rx_qconf[i].valid = true; otx2_nix_rx_queue_release(rxq[i]); eth_dev->data->rx_queues[i] = NULL; } @@ -1107,10 +1400,8 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) return 0; fail: - if (tx_qconf) - free(tx_qconf); - if (rx_qconf) - free(rx_qconf); + free(tx_qconf); + free(rx_qconf); return -ENOMEM; } @@ -1152,6 +1443,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) * queues are already setup in port_configure(). */ for (i = 0; i < nb_txq; i++) { + if (!tx_qconf[i].valid) + continue; rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, tx_qconf[i].socket_id, &tx_qconf[i].conf.tx); @@ -1167,6 +1460,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) free(tx_qconf); tx_qconf = NULL; for (i = 0; i < nb_rxq; i++) { + if (!rx_qconf[i].valid) + continue; rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, rx_qconf[i].socket_id, &rx_qconf[i].conf.rx, @@ -1322,13 +1617,68 @@ nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req, field++; } +static void +nix_lso_tun_tcp(struct nix_lso_format_cfg *req, + bool outer_v4, bool inner_v4) +{ + volatile struct nix_lso_format *field; + + field = (volatile struct nix_lso_format *)&req->fields[0]; + req->field_mask = NIX_LSO_FIELD_MASK; + /* Outer IPv4/IPv6 len */ + field->layer = NIX_TXLAYER_OL3; + field->offset = outer_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (outer_v4) { + /* IPID */ + field->layer = NIX_TXLAYER_OL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* Inner IPv4/IPv6 */ + field->layer = NIX_TXLAYER_IL3; + field->offset = inner_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (inner_v4) { + /* IPID field */ + field->layer = NIX_TXLAYER_IL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* TCP sequence number update */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 4; + field->sizem1 = 3; /* 4 bytes */ + field->alg = NIX_LSOALG_ADD_OFFSET; + field++; + + /* TCP flags field */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 12; + field->sizem1 = 1; + field->alg = NIX_LSOALG_TCP_FLAGS; + field++; +} + static int nix_setup_lso_formats(struct otx2_eth_dev *dev) { struct otx2_mbox *mbox = dev->mbox; struct nix_lso_format_cfg_rsp *rsp; struct nix_lso_format_cfg *req; - uint8_t base; + uint8_t *fmt; int rc; /* Skip if TSO was not requested */ @@ -1343,11 +1693,9 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev) if (rc) return rc; - base = rsp->lso_format_idx; - if (base != NIX_LSO_FORMAT_IDX_TSOV4) + if (rsp->lso_format_idx != NIX_LSO_FORMAT_IDX_TSOV4) return -EFAULT; - dev->lso_base_idx = base; - otx2_nix_dbg("tcpv4 lso fmt=%u", base); + otx2_nix_dbg("tcpv4 lso fmt=%u", rsp->lso_format_idx); /* @@ -1359,9 +1707,9 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev) if (rc) return rc; - if (rsp->lso_format_idx != base + 1) + if (rsp->lso_format_idx != NIX_LSO_FORMAT_IDX_TSOV6) return -EFAULT; - otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1); + otx2_nix_dbg("tcpv6 lso fmt=%u\n", rsp->lso_format_idx); /* * IPv4/UDP/TUN HDR/IPv4/TCP LSO @@ -1372,9 +1720,8 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev) if (rc) return rc; - if (rsp->lso_format_idx != base + 2) - return -EFAULT; - otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2); + dev->lso_udp_tun_idx[NIX_LSO_TUN_V4V4] = rsp->lso_format_idx; + otx2_nix_dbg("udp tun v4v4 fmt=%u\n", rsp->lso_format_idx); /* * IPv4/UDP/TUN HDR/IPv6/TCP LSO @@ -1385,9 +1732,8 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev) if (rc) return rc; - if (rsp->lso_format_idx != base + 3) - return -EFAULT; - otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3); + dev->lso_udp_tun_idx[NIX_LSO_TUN_V4V6] = rsp->lso_format_idx; + otx2_nix_dbg("udp tun v4v6 fmt=%u\n", rsp->lso_format_idx); /* * IPv6/UDP/TUN HDR/IPv4/TCP LSO @@ -1398,9 +1744,8 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev) if (rc) return rc; - if (rsp->lso_format_idx != base + 4) - return -EFAULT; - otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4); + dev->lso_udp_tun_idx[NIX_LSO_TUN_V6V4] = rsp->lso_format_idx; + otx2_nix_dbg("udp tun v6v4 fmt=%u\n", rsp->lso_format_idx); /* * IPv6/UDP/TUN HDR/IPv6/TCP LSO @@ -1410,9 +1755,73 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev) rc = otx2_mbox_process_msg(mbox, (void *)&rsp); if (rc) return rc; - if (rsp->lso_format_idx != base + 5) - return -EFAULT; - otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5); + + dev->lso_udp_tun_idx[NIX_LSO_TUN_V6V6] = rsp->lso_format_idx; + otx2_nix_dbg("udp tun v6v6 fmt=%u\n", rsp->lso_format_idx); + + /* + * IPv4/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, true, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + dev->lso_tun_idx[NIX_LSO_TUN_V4V4] = rsp->lso_format_idx; + otx2_nix_dbg("tun v4v4 fmt=%u\n", rsp->lso_format_idx); + + /* + * IPv4/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, true, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + dev->lso_tun_idx[NIX_LSO_TUN_V4V6] = rsp->lso_format_idx; + otx2_nix_dbg("tun v4v6 fmt=%u\n", rsp->lso_format_idx); + + /* + * IPv6/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, false, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + dev->lso_tun_idx[NIX_LSO_TUN_V6V4] = rsp->lso_format_idx; + otx2_nix_dbg("tun v6v4 fmt=%u\n", rsp->lso_format_idx); + + /* + * IPv6/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, false, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + dev->lso_tun_idx[NIX_LSO_TUN_V6V6] = rsp->lso_format_idx; + otx2_nix_dbg("tun v6v6 fmt=%u\n", rsp->lso_format_idx); + + /* Save all tun formats into u64 for fast path. + * Lower 32bit has non-udp tunnel formats. + * Upper 32bit has udp tunnel formats. + */ + fmt = dev->lso_tun_idx; + dev->lso_tun_fmt = ((uint64_t)fmt[NIX_LSO_TUN_V4V4] | + (uint64_t)fmt[NIX_LSO_TUN_V4V6] << 8 | + (uint64_t)fmt[NIX_LSO_TUN_V6V4] << 16 | + (uint64_t)fmt[NIX_LSO_TUN_V6V6] << 24); + + fmt = dev->lso_udp_tun_idx; + dev->lso_tun_fmt |= ((uint64_t)fmt[NIX_LSO_TUN_V4V4] << 32 | + (uint64_t)fmt[NIX_LSO_TUN_V4V6] << 40 | + (uint64_t)fmt[NIX_LSO_TUN_V6V4] << 48 | + (uint64_t)fmt[NIX_LSO_TUN_V6V6] << 56); return 0; } @@ -1438,11 +1847,6 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) goto fail_configure; } - if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { - otx2_err("Setting link speed/duplex not supported"); - goto fail_configure; - } - if (conf->dcb_capability_en == 1) { otx2_err("dcb enable is not supported"); goto fail_configure; @@ -1474,8 +1878,10 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) /* Free the resources allocated from the previous configure */ if (dev->configured == 1) { + otx2_eth_sec_fini(eth_dev); otx2_nix_rxchan_bpid_cfg(eth_dev, false); otx2_nix_vlan_fini(eth_dev); + otx2_nix_mc_addr_list_uninstall(eth_dev); otx2_flow_free_all_resources(dev); oxt2_nix_unregister_queue_irqs(eth_dev); if (eth_dev->data->dev_conf.intr_conf.rxq) @@ -1504,6 +1910,21 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) goto fail_offloads; } + otx2_nix_err_intr_enb_dis(eth_dev, true); + otx2_nix_ras_intr_enb_dis(eth_dev, true); + + if (dev->ptp_en && + dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) { + otx2_err("Both PTP and switch header enabled"); + goto free_nix_lf; + } + + rc = nix_lf_switch_header_type_enable(dev, true); + if (rc) { + otx2_err("Failed to enable switch type nix_lf rc=%d", rc); + goto free_nix_lf; + } + rc = nix_setup_lso_formats(dev); if (rc) { otx2_err("failed to setup nix lso format fields, rc=%d", rc); @@ -1562,13 +1983,30 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode); if (rc) { otx2_err("Failed to configure cgx loop back mode rc=%d", rc); - goto q_irq_fini; + goto cq_fini; } rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true); if (rc) { otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc); - goto q_irq_fini; + goto cq_fini; + } + + /* Enable security */ + rc = otx2_eth_sec_init(eth_dev); + if (rc) + goto cq_fini; + + rc = otx2_nix_flow_ctrl_init(eth_dev); + if (rc) { + otx2_err("Failed to init flow ctrl mode %d", rc); + goto cq_fini; + } + + rc = otx2_nix_mc_addr_list_install(eth_dev); + if (rc < 0) { + otx2_err("Failed to install mc address list rc=%d", rc); + goto sec_fini; } /* @@ -1578,7 +2016,7 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) if (dev->configured == 1) { rc = nix_restore_queue_cfg(eth_dev); if (rc) - goto cq_fini; + goto uninstall_mc_list; } /* Update the mac address */ @@ -1589,6 +2027,13 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea); + /* Apply new link configurations if changed */ + rc = otx2_apply_link_speed(eth_dev); + if (rc) { + otx2_err("Failed to set link configuration"); + goto uninstall_mc_list; + } + otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d" " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "" " rx_flags=0x%x tx_flags=0x%x", @@ -1602,6 +2047,10 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) dev->configured_nb_tx_qs = data->nb_tx_queues; return 0; +uninstall_mc_list: + otx2_nix_mc_addr_list_uninstall(eth_dev); +sec_fini: + otx2_eth_sec_fini(eth_dev); cq_fini: oxt2_nix_unregister_cq_irqs(eth_dev); q_irq_fini: @@ -1716,14 +2165,16 @@ done: return rc; } -static void +static int otx2_nix_dev_stop(struct rte_eth_dev *eth_dev) { struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); struct rte_mbuf *rx_pkts[32]; struct otx2_eth_rxq *rxq; + struct rte_eth_link link; int count, i, j, rc; + nix_lf_switch_header_type_enable(dev, false); nix_cgx_stop_link_event(dev); npc_rx_disable(dev); @@ -1745,6 +2196,12 @@ otx2_nix_dev_stop(struct rte_eth_dev *eth_dev) /* Stop tx queues */ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) otx2_nix_tx_queue_stop(eth_dev, i); + + /* Bring down link status internally */ + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(eth_dev, &link); + + return 0; } static int @@ -1753,7 +2210,11 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev) struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); int rc, i; - if (eth_dev->data->nb_rx_queues != 0) { + /* MTU recalculate should be avoided here if PTP is enabled by PF, as + * otx2_nix_recalc_mtu would be invoked during otx2_nix_ptp_enable_vf + * call below. + */ + if (eth_dev->data->nb_rx_queues != 0 && !otx2_ethdev_is_ptp_en(dev)) { rc = otx2_nix_recalc_mtu(eth_dev); if (rc) return rc; @@ -1789,6 +2250,22 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev) else otx2_nix_timesync_disable(eth_dev); + /* Update VF about data off shifted by 8 bytes if PTP already + * enabled in PF owning this VF + */ + if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev)) + otx2_nix_ptp_enable_vf(eth_dev); + + if (dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F) { + rc = rte_mbuf_dyn_rx_timestamp_register( + &dev->tstamp.tstamp_dynfield_offset, + &dev->tstamp.rx_tstamp_dynflag); + if (rc != 0) { + otx2_err("Failed to register Rx timestamp field/flag"); + return -rte_errno; + } + } + rc = npc_rx_enable(dev); if (rc) { otx2_err("Failed to enable NPC rx %d", rc); @@ -1816,7 +2293,7 @@ rx_disable: } static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev); -static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev); +static int otx2_nix_dev_close(struct rte_eth_dev *eth_dev); /* Initialize and register driver with DPDK Application */ static const struct eth_dev_ops otx2_eth_dev_ops = { @@ -1825,6 +2302,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = { .link_update = otx2_nix_link_update, .tx_queue_setup = otx2_nix_tx_queue_setup, .tx_queue_release = otx2_nix_tx_queue_release, + .tm_ops_get = otx2_nix_tm_ops_get, .rx_queue_setup = otx2_nix_rx_queue_setup, .rx_queue_release = otx2_nix_rx_queue_release, .dev_start = otx2_nix_dev_start, @@ -1837,6 +2315,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = { .dev_set_link_up = otx2_nix_dev_set_link_up, .dev_set_link_down = otx2_nix_dev_set_link_down, .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get, + .dev_ptypes_set = otx2_nix_ptypes_set, .dev_reset = otx2_nix_dev_reset, .stats_get = otx2_nix_dev_stats_get, .stats_reset = otx2_nix_dev_stats_reset, @@ -1845,6 +2324,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = { .mac_addr_add = otx2_nix_mac_addr_add, .mac_addr_remove = otx2_nix_mac_addr_del, .mac_addr_set = otx2_nix_mac_addr_set, + .set_mc_addr_list = otx2_nix_set_mc_addr_list, .promiscuous_enable = otx2_nix_promisc_enable, .promiscuous_disable = otx2_nix_promisc_disable, .allmulticast_enable = otx2_nix_allmulticast_enable, @@ -1861,13 +2341,12 @@ static const struct eth_dev_ops otx2_eth_dev_ops = { .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id, .rxq_info_get = otx2_nix_rxq_info_get, .txq_info_get = otx2_nix_txq_info_get, - .rx_queue_count = otx2_nix_rx_queue_count, - .rx_descriptor_done = otx2_nix_rx_descriptor_done, - .rx_descriptor_status = otx2_nix_rx_descriptor_status, - .tx_descriptor_status = otx2_nix_tx_descriptor_status, + .rx_burst_mode_get = otx2_rx_burst_mode_get, + .tx_burst_mode_get = otx2_tx_burst_mode_get, .tx_done_cleanup = otx2_nix_tx_done_cleanup, + .set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit, .pool_ops_supported = otx2_nix_pool_ops_supported, - .filter_ctrl = otx2_nix_dev_filter_ctrl, + .flow_ops_get = otx2_nix_dev_flow_ops_get, .get_module_info = otx2_nix_get_module_info, .get_module_eeprom = otx2_nix_get_module_eeprom, .fw_version_get = otx2_nix_fw_version_get, @@ -1939,6 +2418,29 @@ otx2_eth_dev_lf_detach(struct otx2_mbox *mbox) return otx2_mbox_process(mbox); } +static bool +otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev) +{ + if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF || + pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF) + return true; + return false; +} + +static inline uint64_t +nix_get_blkaddr(struct otx2_eth_dev *dev) +{ + uint64_t reg; + + /* Reading the discovery register to know which NIX is the LF + * attached to. + */ + reg = otx2_read64(dev->bar2 + + RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_NIX0)); + + return reg & 0x1FFULL ? RVU_BLOCK_ADDR_NIX0 : RVU_BLOCK_ADDR_NIX1; +} + static int otx2_eth_dev_init(struct rte_eth_dev *eth_dev) { @@ -1947,6 +2449,10 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) int rc, max_entries; eth_dev->dev_ops = &otx2_eth_dev_ops; + eth_dev->rx_descriptor_done = otx2_nix_rx_descriptor_done; + eth_dev->rx_queue_count = otx2_nix_rx_queue_count; + eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status; + eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status; /* For secondary processes, the primary has done all the work */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { @@ -1959,7 +2465,7 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; /* Zero out everything after OTX2_DEV to allow proper dev_reset() */ memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) - @@ -1982,6 +2488,10 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) goto error; } } + if (otx2_eth_dev_is_sdp(pci_dev)) + dev->sdp_link = true; + else + dev->sdp_link = false; /* Device generic callbacks */ dev->ops = &otx2_dev_ops; dev->eth_dev = eth_dev; @@ -1993,7 +2503,7 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) dev->configured = 0; dev->drv_inited = true; - dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20); + dev->ptype_disable = 0; dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20); /* Attach NIX LF */ @@ -2001,6 +2511,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) if (rc) goto otx2_npa_uninit; + dev->base = dev->bar2 + (nix_get_blkaddr(dev) << 20); + /* Get NIX MSIX offset */ rc = nix_lf_get_msix_offset(dev); if (rc) @@ -2057,10 +2569,19 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL; } + /* Create security ctx */ + rc = otx2_eth_sec_ctx_create(eth_dev); + if (rc) + goto free_mac_addrs; + dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; + dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + /* Initialize rte-flow */ rc = otx2_flow_init(dev); if (rc) - goto free_mac_addrs; + goto sec_ctx_destroy; + + otx2_nix_mc_filter_init(dev); otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64, @@ -2069,6 +2590,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) dev->rx_offload_capa, dev->tx_offload_capa); return 0; +sec_ctx_destroy: + otx2_eth_sec_ctx_destroy(eth_dev); free_mac_addrs: rte_free(eth_dev->data->mac_addrs); unregister_irq: @@ -2109,12 +2632,20 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) /* Disable other rte_flow entries */ otx2_flow_fini(dev); + /* Free multicast filter list */ + otx2_nix_mc_filter_fini(dev); + /* Disable PTP if already enabled */ if (otx2_ethdev_is_ptp_en(dev)) otx2_nix_timesync_disable(eth_dev); nix_cgx_stop_link_event(dev); + /* Unregister the dev ops, this is required to stop VFs from + * receiving link status updates on exit path. + */ + dev->ops = NULL; + /* Free up SQs */ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]); @@ -2149,6 +2680,12 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) if (rc) otx2_err("Failed to cleanup npa lf, rc=%d", rc); + /* Disable security */ + otx2_eth_sec_fini(eth_dev); + + /* Destroy security ctx */ + otx2_eth_sec_ctx_destroy(eth_dev); + rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; dev->drv_inited = false; @@ -2174,10 +2711,11 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) return 0; } -static void +static int otx2_nix_dev_close(struct rte_eth_dev *eth_dev) { otx2_eth_dev_uninit(eth_dev, true); + return 0; } static int @@ -2207,7 +2745,7 @@ nix_remove(struct rte_pci_device *pci_dev) if (rc) return rc; - rte_eth_dev_pci_release(eth_dev); + rte_eth_dev_release_port(eth_dev); } /* Nothing to be done for secondary processes */ @@ -2264,6 +2802,14 @@ static const struct rte_pci_id pci_nix_map[] = { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF_VF) }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_SDP_PF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_SDP_VF) + }, { .vendor_id = 0, }, @@ -2277,6 +2823,6 @@ static struct rte_pci_driver pci_nix = { .remove = nix_remove, }; -RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix); -RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map); -RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci"); +RTE_PMD_REGISTER_PCI(OCTEONTX2_PMD, pci_nix); +RTE_PMD_REGISTER_PCI_TABLE(OCTEONTX2_PMD, pci_nix_map); +RTE_PMD_REGISTER_KMOD_DEP(OCTEONTX2_PMD, "vfio-pci");