X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Focteontx2%2Fotx2_ethdev.c;h=33b72bd4db51db0bb0ee0b74807f4fb5d6113b9f;hb=de06137cb295ee4c8db579635e7adf9226dda2eb;hp=62d5ee6302bc76913388ab005834168c72b804c8;hpb=25d6419cecf5515180c394b06d371960c6d5ecf1;p=dpdk.git diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c index 62d5ee6302..33b72bd4db 100644 --- a/drivers/net/octeontx2/otx2_ethdev.c +++ b/drivers/net/octeontx2/otx2_ethdev.c @@ -3,7 +3,6 @@ */ #include -#include #include #include @@ -13,25 +12,15 @@ #include #include "otx2_ethdev.h" - -static inline void -otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev) -{ - RTE_SET_USED(eth_dev); -} - -static inline void -otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev) -{ - RTE_SET_USED(eth_dev); -} +#include "otx2_ethdev_sec.h" static inline uint64_t nix_get_rx_offload_capa(struct otx2_eth_dev *dev) { uint64_t capa = NIX_RX_OFFLOAD_CAPA; - if (otx2_dev_is_vf(dev)) + if (otx2_dev_is_vf(dev) || + dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) capa &= ~DEV_RX_OFFLOAD_TIMESTAMP; return capa; @@ -40,9 +29,15 @@ nix_get_rx_offload_capa(struct otx2_eth_dev *dev) static inline uint64_t nix_get_tx_offload_capa(struct otx2_eth_dev *dev) { - RTE_SET_USED(dev); - - return NIX_TX_OFFLOAD_CAPA; + uint64_t capa = NIX_TX_OFFLOAD_CAPA; + + /* TSO not supported for earlier chip revisions */ + if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev)) + capa &= ~(DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO); + return capa; } static const struct otx2_dev_ops otx2_dev_ops = { @@ -75,6 +70,15 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq) req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */); req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */); } + req->rx_cfg |= (BIT_ULL(32 /* DROP_RE */) | + BIT_ULL(33 /* Outer L2 Length */) | + BIT_ULL(38 /* Inner L4 UDP Length */) | + BIT_ULL(39 /* Inner L3 Length */) | + BIT_ULL(40 /* Outer L4 UDP Length */) | + BIT_ULL(41 /* Outer L3 Length */)); + + if (dev->rss_tag_as_xor == 0) + req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER; rc = otx2_mbox_process_msg(mbox, (void *)&rsp); if (rc) @@ -92,10 +96,45 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq) dev->cints = rsp->cints; dev->qints = rsp->qints; dev->npc_flow.channel = dev->rx_chan_base; + dev->ptp_en = rsp->hw_rx_tstamp_en; return 0; } +static int +nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable) +{ + struct otx2_mbox *mbox = dev->mbox; + struct npc_set_pkind *req; + struct msg_resp *rsp; + int rc; + + if (dev->npc_flow.switch_header_type == 0) + return 0; + + if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_LEN_90B && + !otx2_dev_is_sdp(dev)) { + otx2_err("chlen90b is not supported on non-SDP device"); + return -EINVAL; + } + + /* Notify AF about higig2 config */ + req = otx2_mbox_alloc_msg_npc_set_pkind(mbox); + req->mode = dev->npc_flow.switch_header_type; + if (enable == 0) + req->mode = OTX2_PRIV_FLAGS_DEFAULT; + req->dir = PKIND_RX; + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + req = otx2_mbox_alloc_msg_npc_set_pkind(mbox); + req->mode = dev->npc_flow.switch_header_type; + if (enable == 0) + req->mode = OTX2_PRIV_FLAGS_DEFAULT; + req->dir = PKIND_TX; + return otx2_mbox_process_msg(mbox, (void *)&rsp); +} + static int nix_lf_free(struct otx2_eth_dev *dev) { @@ -126,7 +165,7 @@ otx2_cgx_rxtx_start(struct otx2_eth_dev *dev) { struct otx2_mbox *mbox = dev->mbox; - if (otx2_dev_is_vf(dev)) + if (otx2_dev_is_vf_or_sdp(dev)) return 0; otx2_mbox_alloc_msg_cgx_start_rxtx(mbox); @@ -139,7 +178,7 @@ otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev) { struct otx2_mbox *mbox = dev->mbox; - if (otx2_dev_is_vf(dev)) + if (otx2_dev_is_vf_or_sdp(dev)) return 0; otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox); @@ -147,6 +186,68 @@ otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev) return otx2_mbox_process(mbox); } +static int +npc_rx_enable(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + + otx2_mbox_alloc_msg_nix_lf_start_rx(mbox); + + return otx2_mbox_process(mbox); +} + +static int +npc_rx_disable(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + + otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox); + + return otx2_mbox_process(mbox); +} + +static int +nix_cgx_start_link_event(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + + if (otx2_dev_is_vf_or_sdp(dev)) + return 0; + + otx2_mbox_alloc_msg_cgx_start_linkevents(mbox); + + return otx2_mbox_process(mbox); +} + +static int +cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en) +{ + struct otx2_mbox *mbox = dev->mbox; + + if (en && otx2_dev_is_vf_or_sdp(dev)) + return -ENOTSUP; + + if (en) + otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox); + else + otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox); + + return otx2_mbox_process(mbox); +} + +static int +nix_cgx_stop_link_event(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + + if (otx2_dev_is_vf_or_sdp(dev)) + return 0; + + otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox); + + return otx2_mbox_process(mbox); +} + static inline void nix_rx_queue_reset(struct otx2_eth_rxq *rxq) { @@ -197,8 +298,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, NIX_CQ_ALIGN, dev->node); if (rz == NULL) { otx2_err("Failed to allocate mem for cq hw ring"); - rc = -ENOMEM; - goto fail; + return -ENOMEM; } memset(rz->addr, 0, rz->len); rxq->desc = (uintptr_t)rz->addr; @@ -217,30 +317,37 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT); aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR); - /* TX pause frames enable flowctrl on RX side */ - if (dev->fc_info.tx_pause) { - /* Single bpid is allocated for all rx channels for now */ - aq->cq.bpid = dev->fc_info.bpid[0]; - aq->cq.bp = NIX_CQ_BP_LEVEL; - aq->cq.bp_ena = 1; - } - /* Many to one reduction */ aq->cq.qint_idx = qid % dev->qints; + /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */ + aq->cq.cint_idx = qid; if (otx2_ethdev_fixup_is_limit_cq_full(dev)) { + const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID; uint16_t min_rx_drop; - const float rx_cq_skid = 1024 * 256; min_rx_drop = ceil(rx_cq_skid / (float)cq_size); aq->cq.drop = min_rx_drop; aq->cq.drop_ena = 1; + rxq->cq_drop = min_rx_drop; + } else { + rxq->cq_drop = NIX_CQ_THRESH_LEVEL; + aq->cq.drop = rxq->cq_drop; + aq->cq.drop_ena = 1; + } + + /* TX pause frames enable flowctrl on RX side */ + if (dev->fc_info.tx_pause) { + /* Single bpid is allocated for all rx channels for now */ + aq->cq.bpid = dev->fc_info.bpid[0]; + aq->cq.bp = rxq->cq_drop; + aq->cq.bp_ena = 1; } rc = otx2_mbox_process(mbox); if (rc) { otx2_err("Failed to init cq context"); - goto fail; + return rc; } aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); @@ -249,6 +356,10 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, aq->op = NIX_AQ_INSTOP_INIT; aq->rq.sso_ena = 0; + + if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY) + aq->rq.ipsech_ena = 1; + aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */ aq->rq.spb_ena = 0; aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id); @@ -261,10 +372,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, aq->rq.first_skip = first_skip; aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8); aq->rq.flow_tagw = 32; /* 32-bits */ - aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp); - aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp); - aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf); - aq->rq.lpb_sizem1 /= 8; + aq->rq.lpb_sizem1 = mp->elt_size / 8; aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ aq->rq.ena = 1; aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ @@ -273,18 +381,49 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, /* Many to one reduction */ aq->rq.qint_idx = qid % dev->qints; - if (otx2_ethdev_fixup_is_limit_cq_full(dev)) - aq->rq.xqe_drop_ena = 1; + aq->rq.xqe_drop_ena = 1; rc = otx2_mbox_process(mbox); if (rc) { otx2_err("Failed to init rq context"); - goto fail; + return rc; + } + + if (dev->lock_rx_ctx) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = qid; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_LOCK; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) { + otx2_err("Failed to LOCK cq context"); + return rc; + } + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + otx2_err("Failed to LOCK rq context"); + return -ENOMEM; + } + } + aq->qidx = qid; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_LOCK; + rc = otx2_mbox_process(mbox); + if (rc < 0) { + otx2_err("Failed to LOCK rq context"); + return rc; + } } return 0; -fail: - return rc; } static int @@ -331,6 +470,40 @@ nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq) return rc; } + if (dev->lock_rx_ctx) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = rxq->rq; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_UNLOCK; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) { + otx2_err("Failed to UNLOCK cq context"); + return rc; + } + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + otx2_err("Failed to UNLOCK rq context"); + return -ENOMEM; + } + } + aq->qidx = rxq->rq; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_UNLOCK; + rc = otx2_mbox_process(mbox); + if (rc < 0) { + otx2_err("Failed to UNLOCK rq context"); + return rc; + } + } + return 0; } @@ -420,6 +593,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq, if (eth_dev->data->rx_queues[rq] != NULL) { otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq); otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]); + rte_eth_dma_zone_free(eth_dev, "cq", rq); eth_dev->data->rx_queues[rq] = NULL; } @@ -469,6 +643,20 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq, eth_dev->data->rx_queues[rq] = rxq; eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED; + + /* Calculating delta and freq mult between PTP HI clock and tsc. + * These are needed in deriving raw clock value from tsc counter. + * read_clock eth op returns raw clock value. + */ + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || + otx2_ethdev_is_ptp_en(dev)) { + rc = otx2_nix_raw_clock_tsc_conv(dev); + if (rc) { + otx2_err("Failed to calculate delta and freq mult"); + goto fail; + } + } + return 0; free_rxq: @@ -490,6 +678,206 @@ nix_sq_max_sqe_sz(struct otx2_eth_txq *txq) return NIX_MAXSQESZ_W8; } +static uint16_t +nix_rx_offload_flags(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct rte_eth_conf *conf = &data->dev_conf; + struct rte_eth_rxmode *rxmode = &conf->rxmode; + uint16_t flags = 0; + + if (rxmode->mq_mode == ETH_MQ_RX_RSS && + (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH)) + flags |= NIX_RX_OFFLOAD_RSS_F; + + if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM)) + flags |= NIX_RX_OFFLOAD_CHECKSUM_F; + + if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) + flags |= NIX_RX_OFFLOAD_CHECKSUM_F; + + if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) + flags |= NIX_RX_MULTI_SEG_F; + + if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP)) + flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F; + + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) + flags |= NIX_RX_OFFLOAD_TSTAMP_F; + + if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) + flags |= NIX_RX_OFFLOAD_SECURITY_F; + + if (!dev->ptype_disable) + flags |= NIX_RX_OFFLOAD_PTYPE_F; + + return flags; +} + +static uint16_t +nix_tx_offload_flags(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t conf = dev->tx_offloads; + uint16_t flags = 0; + + /* Fastpath is dependent on these enums */ + RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52)); + RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52)); + RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52)); + RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54)); + RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41)); + RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7); + RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9); + RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7); + RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) != + offsetof(struct rte_mbuf, buf_iova) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, buf_iova) + 16); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, ol_flags) + 12); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) != + offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *)); + + if (conf & DEV_TX_OFFLOAD_VLAN_INSERT || + conf & DEV_TX_OFFLOAD_QINQ_INSERT) + flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F; + + if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM || + conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) + flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F; + + if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM || + conf & DEV_TX_OFFLOAD_TCP_CKSUM || + conf & DEV_TX_OFFLOAD_UDP_CKSUM || + conf & DEV_TX_OFFLOAD_SCTP_CKSUM) + flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F; + + if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F; + + if (conf & DEV_TX_OFFLOAD_MULTI_SEGS) + flags |= NIX_TX_MULTI_SEG_F; + + /* Enable Inner checksum for TSO */ + if (conf & DEV_TX_OFFLOAD_TCP_TSO) + flags |= (NIX_TX_OFFLOAD_TSO_F | + NIX_TX_OFFLOAD_L3_L4_CSUM_F); + + /* Enable Inner and Outer checksum for Tunnel TSO */ + if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO)) + flags |= (NIX_TX_OFFLOAD_TSO_F | + NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | + NIX_TX_OFFLOAD_L3_L4_CSUM_F); + + if (conf & DEV_TX_OFFLOAD_SECURITY) + flags |= NIX_TX_OFFLOAD_SECURITY_F; + + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) + flags |= NIX_TX_OFFLOAD_TSTAMP_F; + + return flags; +} + +static int +nix_sqb_lock(struct rte_mempool *mp) +{ + struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf; + struct npa_aq_enq_req *req; + int rc; + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id); + req->ctype = NPA_AQ_CTYPE_AURA; + req->op = NPA_AQ_INSTOP_LOCK; + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + if (!req) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + otx2_mbox_msg_send(npa_lf->mbox, 0); + rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0); + if (rc < 0) { + otx2_err("Failed to LOCK AURA context"); + return rc; + } + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + if (!req) { + otx2_err("Failed to LOCK POOL context"); + return -ENOMEM; + } + } + + req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id); + req->ctype = NPA_AQ_CTYPE_POOL; + req->op = NPA_AQ_INSTOP_LOCK; + + rc = otx2_mbox_process(npa_lf->mbox); + if (rc < 0) { + otx2_err("Unable to lock POOL in NDC"); + return rc; + } + + return 0; +} + +static int +nix_sqb_unlock(struct rte_mempool *mp) +{ + struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf; + struct npa_aq_enq_req *req; + int rc; + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id); + req->ctype = NPA_AQ_CTYPE_AURA; + req->op = NPA_AQ_INSTOP_UNLOCK; + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + if (!req) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + otx2_mbox_msg_send(npa_lf->mbox, 0); + rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0); + if (rc < 0) { + otx2_err("Failed to UNLOCK AURA context"); + return rc; + } + + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + if (!req) { + otx2_err("Failed to UNLOCK POOL context"); + return -ENOMEM; + } + } + req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id); + req->ctype = NPA_AQ_CTYPE_POOL; + req->op = NPA_AQ_INSTOP_UNLOCK; + + rc = otx2_mbox_process(npa_lf->mbox); + if (rc < 0) { + otx2_err("Unable to UNLOCK AURA in NDC"); + return rc; + } + + return 0; +} + static int nix_sq_init(struct otx2_eth_txq *txq) { @@ -532,7 +920,20 @@ nix_sq_init(struct otx2_eth_txq *txq) /* Many to one reduction */ sq->sq.qint_idx = txq->sq % dev->qints; - return otx2_mbox_process(mbox); + rc = otx2_mbox_process(mbox); + if (rc < 0) + return rc; + + if (dev->lock_tx_ctx) { + sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + sq->qidx = txq->sq; + sq->ctype = NIX_AQ_CTYPE_SQ; + sq->op = NIX_AQ_INSTOP_LOCK; + + rc = otx2_mbox_process(mbox); + } + + return rc; } static int @@ -575,6 +976,20 @@ nix_sq_uninit(struct otx2_eth_txq *txq) if (rc) return rc; + if (dev->lock_tx_ctx) { + /* Unlock sq */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = txq->sq; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_UNLOCK; + + rc = otx2_mbox_process(mbox); + if (rc < 0) + return rc; + + nix_sqb_unlock(txq->sqb_pool); + } + /* Read SQ and free sqb's */ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); aq->qidx = txq->sq; @@ -595,7 +1010,8 @@ nix_sq_uninit(struct otx2_eth_txq *txq) while (count) { void *next_sqb; - next_sqb = *(void **)((uintptr_t)sqb_buf + ((sqes_per_sqb - 1) * + next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t) + ((sqes_per_sqb - 1) * nix_sq_max_sqe_sz(txq))); npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1, (uint64_t)sqb_buf); @@ -656,7 +1072,7 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc) nb_sqb_bufs = nb_desc / sqes_per_sqb; /* Clamp up to devarg passed SQB count */ - nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_MIN_SQB, + nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB, nb_sqb_bufs + NIX_SQB_LIST_SPACE)); txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz, @@ -695,6 +1111,8 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc) } nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs); + if (dev->lock_tx_ctx) + nix_sqb_lock(txq->sqb_pool); return 0; fail: @@ -733,8 +1151,6 @@ otx2_nix_form_default_desc(struct otx2_eth_txq *txq) send_mem = (struct nix_send_mem_s *)(txq->cmd + (send_hdr->w0.sizem1 << 1)); send_mem->subdc = NIX_SUBDC_MEM; - send_mem->dsz = 0x0; - send_mem->wmem = 0x1; send_mem->alg = NIX_SENDMEMALG_SETTSTMP; send_mem->addr = txq->dev->tstamp.tx_tstamp_iova; } @@ -768,7 +1184,7 @@ otx2_nix_tx_queue_release(void *_txq) otx2_nix_dbg("Releasing txq %u", txq->sq); /* Flush and disable tm */ - otx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started); + otx2_nix_sq_flush_pre(txq, eth_dev->data->dev_started); /* Free sqb's and disable sq */ nix_sq_uninit(txq); @@ -777,6 +1193,7 @@ otx2_nix_tx_queue_release(void *_txq) rte_mempool_free(txq->sqb_pool); txq->sqb_pool = NULL; } + otx2_nix_sq_flush_post(txq); rte_free(txq); } @@ -908,10 +1325,12 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues; for (i = 0; i < nb_txq; i++) { if (txq[i] == NULL) { - otx2_err("txq[%d] is already released", i); - goto fail; + tx_qconf[i].valid = false; + otx2_info("txq[%d] is already released", i); + continue; } memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf)); + tx_qconf[i].valid = true; otx2_nix_tx_queue_release(txq[i]); eth_dev->data->tx_queues[i] = NULL; } @@ -919,10 +1338,12 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues; for (i = 0; i < nb_rxq; i++) { if (rxq[i] == NULL) { - otx2_err("rxq[%d] is already released", i); - goto fail; + rx_qconf[i].valid = false; + otx2_info("rxq[%d] is already released", i); + continue; } memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf)); + rx_qconf[i].valid = true; otx2_nix_rx_queue_release(rxq[i]); eth_dev->data->rx_queues[i] = NULL; } @@ -977,6 +1398,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) * queues are already setup in port_configure(). */ for (i = 0; i < nb_txq; i++) { + if (!tx_qconf[i].valid) + continue; rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, tx_qconf[i].socket_id, &tx_qconf[i].conf.tx); @@ -992,6 +1415,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) free(tx_qconf); tx_qconf = NULL; for (i = 0; i < nb_rxq; i++) { + if (!rx_qconf[i].valid) + continue; rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, rx_qconf[i].socket_id, &rx_qconf[i].conf.rx, @@ -1047,6 +1472,306 @@ nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev) rte_mb(); } +static void +nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4) +{ + volatile struct nix_lso_format *field; + + /* Format works only with TCP packet marked by OL3/OL4 */ + field = (volatile struct nix_lso_format *)&req->fields[0]; + req->field_mask = NIX_LSO_FIELD_MASK; + /* Outer IPv4/IPv6 */ + field->layer = NIX_TXLAYER_OL3; + field->offset = v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (v4) { + /* IPID field */ + field->layer = NIX_TXLAYER_OL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* TCP sequence number update */ + field->layer = NIX_TXLAYER_OL4; + field->offset = 4; + field->sizem1 = 3; /* 4 bytes */ + field->alg = NIX_LSOALG_ADD_OFFSET; + field++; + /* TCP flags field */ + field->layer = NIX_TXLAYER_OL4; + field->offset = 12; + field->sizem1 = 1; + field->alg = NIX_LSOALG_TCP_FLAGS; + field++; +} + +static void +nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req, + bool outer_v4, bool inner_v4) +{ + volatile struct nix_lso_format *field; + + field = (volatile struct nix_lso_format *)&req->fields[0]; + req->field_mask = NIX_LSO_FIELD_MASK; + /* Outer IPv4/IPv6 len */ + field->layer = NIX_TXLAYER_OL3; + field->offset = outer_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (outer_v4) { + /* IPID */ + field->layer = NIX_TXLAYER_OL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* Outer UDP length */ + field->layer = NIX_TXLAYER_OL4; + field->offset = 4; + field->sizem1 = 1; + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + + /* Inner IPv4/IPv6 */ + field->layer = NIX_TXLAYER_IL3; + field->offset = inner_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (inner_v4) { + /* IPID field */ + field->layer = NIX_TXLAYER_IL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* TCP sequence number update */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 4; + field->sizem1 = 3; /* 4 bytes */ + field->alg = NIX_LSOALG_ADD_OFFSET; + field++; + + /* TCP flags field */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 12; + field->sizem1 = 1; + field->alg = NIX_LSOALG_TCP_FLAGS; + field++; +} + +static void +nix_lso_tun_tcp(struct nix_lso_format_cfg *req, + bool outer_v4, bool inner_v4) +{ + volatile struct nix_lso_format *field; + + field = (volatile struct nix_lso_format *)&req->fields[0]; + req->field_mask = NIX_LSO_FIELD_MASK; + /* Outer IPv4/IPv6 len */ + field->layer = NIX_TXLAYER_OL3; + field->offset = outer_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (outer_v4) { + /* IPID */ + field->layer = NIX_TXLAYER_OL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* Inner IPv4/IPv6 */ + field->layer = NIX_TXLAYER_IL3; + field->offset = inner_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (inner_v4) { + /* IPID field */ + field->layer = NIX_TXLAYER_IL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* TCP sequence number update */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 4; + field->sizem1 = 3; /* 4 bytes */ + field->alg = NIX_LSOALG_ADD_OFFSET; + field++; + + /* TCP flags field */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 12; + field->sizem1 = 1; + field->alg = NIX_LSOALG_TCP_FLAGS; + field++; +} + +static int +nix_setup_lso_formats(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + struct nix_lso_format_cfg_rsp *rsp; + struct nix_lso_format_cfg *req; + uint8_t base; + int rc; + + /* Skip if TSO was not requested */ + if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)) + return 0; + /* + * IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tcp(req, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + base = rsp->lso_format_idx; + if (base != NIX_LSO_FORMAT_IDX_TSOV4) + return -EFAULT; + dev->lso_base_idx = base; + otx2_nix_dbg("tcpv4 lso fmt=%u", base); + + + /* + * IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tcp(req, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 1) + return -EFAULT; + otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1); + + /* + * IPv4/UDP/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_udp_tun_tcp(req, true, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 2) + return -EFAULT; + otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2); + + /* + * IPv4/UDP/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_udp_tun_tcp(req, true, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 3) + return -EFAULT; + otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3); + + /* + * IPv6/UDP/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_udp_tun_tcp(req, false, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 4) + return -EFAULT; + otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4); + + /* + * IPv6/UDP/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_udp_tun_tcp(req, false, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + if (rsp->lso_format_idx != base + 5) + return -EFAULT; + otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5); + + /* + * IPv4/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, true, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 6) + return -EFAULT; + otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6); + + /* + * IPv4/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, true, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 7) + return -EFAULT; + otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7); + + /* + * IPv6/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, false, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 8) + return -EFAULT; + otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8); + + /* + * IPv6/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, false, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + if (rsp->lso_format_idx != base + 9) + return -EFAULT; + otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9); + return 0; +} + static int otx2_nix_configure(struct rte_eth_dev *eth_dev) { @@ -1065,63 +1790,60 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) /* Sanity checks */ if (rte_eal_has_hugepages() == 0) { otx2_err("Huge page is not configured"); - goto fail; - } - - if (rte_eal_iova_mode() != RTE_IOVA_VA) { - otx2_err("iova mode should be va"); - goto fail; - } - - if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { - otx2_err("Setting link speed/duplex not supported"); - goto fail; + goto fail_configure; } if (conf->dcb_capability_en == 1) { otx2_err("dcb enable is not supported"); - goto fail; + goto fail_configure; } if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { otx2_err("Flow director is not supported"); - goto fail; + goto fail_configure; } if (rxmode->mq_mode != ETH_MQ_RX_NONE && rxmode->mq_mode != ETH_MQ_RX_RSS) { otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode); - goto fail; + goto fail_configure; } if (txmode->mq_mode != ETH_MQ_TX_NONE) { otx2_err("Unsupported mq tx mode %d", txmode->mq_mode); - goto fail; + goto fail_configure; + } + + if (otx2_dev_is_Ax(dev) && + (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && + ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || + (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) { + otx2_err("Outer IP and SCTP checksum unsupported"); + goto fail_configure; } /* Free the resources allocated from the previous configure */ if (dev->configured == 1) { + otx2_eth_sec_fini(eth_dev); otx2_nix_rxchan_bpid_cfg(eth_dev, false); + otx2_nix_vlan_fini(eth_dev); + otx2_nix_mc_addr_list_uninstall(eth_dev); + otx2_flow_free_all_resources(dev); oxt2_nix_unregister_queue_irqs(eth_dev); + if (eth_dev->data->dev_conf.intr_conf.rxq) + oxt2_nix_unregister_cq_irqs(eth_dev); nix_set_nop_rxtx_function(eth_dev); rc = nix_store_queue_cfg_and_then_release(eth_dev); if (rc) - goto fail; + goto fail_configure; otx2_nix_tm_fini(eth_dev); nix_lf_free(dev); } - if (otx2_dev_is_A0(dev) && - (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && - ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || - (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) { - otx2_err("Outer IP and SCTP checksum unsupported"); - rc = -EINVAL; - goto fail; - } - dev->rx_offloads = rxmode->offloads; dev->tx_offloads = txmode->offloads; + dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev); + dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev); dev->rss_info.rss_grps = NIX_RSS_GRPS; nb_rxq = RTE_MAX(data->nb_rx_queues, 1); @@ -1131,7 +1853,28 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) rc = nix_lf_alloc(dev, nb_rxq, nb_txq); if (rc) { otx2_err("Failed to init nix_lf rc=%d", rc); - goto fail; + goto fail_offloads; + } + + otx2_nix_err_intr_enb_dis(eth_dev, true); + otx2_nix_ras_intr_enb_dis(eth_dev, true); + + if (dev->ptp_en && + dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) { + otx2_err("Both PTP and switch header enabled"); + goto free_nix_lf; + } + + rc = nix_lf_switch_header_type_enable(dev, true); + if (rc) { + otx2_err("Failed to enable switch type nix_lf rc=%d", rc); + goto free_nix_lf; + } + + rc = nix_setup_lso_formats(dev); + if (rc) { + otx2_err("failed to setup nix lso format fields, rc=%d", rc); + goto free_nix_lf; } /* Configure RSS */ @@ -1148,28 +1891,69 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) goto free_nix_lf; } + rc = otx2_nix_vlan_offload_init(eth_dev); + if (rc) { + otx2_err("Failed to init vlan offload rc=%d", rc); + goto tm_fini; + } + /* Register queue IRQs */ rc = oxt2_nix_register_queue_irqs(eth_dev); if (rc) { otx2_err("Failed to register queue interrupts rc=%d", rc); - goto free_nix_lf; + goto vlan_fini; + } + + /* Register cq IRQs */ + if (eth_dev->data->dev_conf.intr_conf.rxq) { + if (eth_dev->data->nb_rx_queues > dev->cints) { + otx2_err("Rx interrupt cannot be enabled, rxq > %d", + dev->cints); + goto q_irq_fini; + } + /* Rx interrupt feature cannot work with vector mode because, + * vector mode doesn't process packets unless min 4 pkts are + * received, while cq interrupts are generated even for 1 pkt + * in the CQ. + */ + dev->scalar_ena = true; + + rc = oxt2_nix_register_cq_irqs(eth_dev); + if (rc) { + otx2_err("Failed to register CQ interrupts rc=%d", rc); + goto q_irq_fini; + } + } + + /* Configure loop back mode */ + rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode); + if (rc) { + otx2_err("Failed to configure cgx loop back mode rc=%d", rc); + goto cq_fini; } rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true); if (rc) { otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc); - goto free_nix_lf; + goto cq_fini; } - /* Enable PTP if it was requested by the app or if it is already - * enabled in PF owning this VF - */ - memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info)); - if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || - otx2_ethdev_is_ptp_en(dev)) - otx2_nix_timesync_enable(eth_dev); - else - otx2_nix_timesync_disable(eth_dev); + /* Enable security */ + rc = otx2_eth_sec_init(eth_dev); + if (rc) + goto cq_fini; + + rc = otx2_nix_flow_ctrl_init(eth_dev); + if (rc) { + otx2_err("Failed to init flow ctrl mode %d", rc); + goto cq_fini; + } + + rc = otx2_nix_mc_addr_list_install(eth_dev); + if (rc < 0) { + otx2_err("Failed to install mc address list rc=%d", rc); + goto sec_fini; + } /* * Restore queue config when reconfigure followed by @@ -1178,7 +1962,7 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) if (dev->configured == 1) { rc = nix_restore_queue_cfg(eth_dev); if (rc) - goto free_nix_lf; + goto uninstall_mc_list; } /* Update the mac address */ @@ -1189,6 +1973,13 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea); + /* Apply new link configurations if changed */ + rc = otx2_apply_link_speed(eth_dev); + if (rc) { + otx2_err("Failed to set link configuration"); + goto uninstall_mc_list; + } + otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d" " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "" " rx_flags=0x%x tx_flags=0x%x", @@ -1202,9 +1993,25 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev) dev->configured_nb_tx_qs = data->nb_tx_queues; return 0; +uninstall_mc_list: + otx2_nix_mc_addr_list_uninstall(eth_dev); +sec_fini: + otx2_eth_sec_fini(eth_dev); +cq_fini: + oxt2_nix_unregister_cq_irqs(eth_dev); +q_irq_fini: + oxt2_nix_unregister_queue_irqs(eth_dev); +vlan_fini: + otx2_nix_vlan_fini(eth_dev); +tm_fini: + otx2_nix_tm_fini(eth_dev); free_nix_lf: - rc = nix_lf_free(dev); -fail: + nix_lf_free(dev); +fail_offloads: + dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev); + dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev); +fail_configure: + dev->configured = 0; return rc; } @@ -1304,6 +2111,119 @@ done: return rc; } +static void +otx2_nix_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_mbuf *rx_pkts[32]; + struct otx2_eth_rxq *rxq; + int count, i, j, rc; + + nix_lf_switch_header_type_enable(dev, false); + nix_cgx_stop_link_event(dev); + npc_rx_disable(dev); + + /* Stop rx queues and free up pkts pending */ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rc = otx2_nix_rx_queue_stop(eth_dev, i); + if (rc) + continue; + + rxq = eth_dev->data->rx_queues[i]; + count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32); + while (count) { + for (j = 0; j < count; j++) + rte_pktmbuf_free(rx_pkts[j]); + count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32); + } + } + + /* Stop tx queues */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + otx2_nix_tx_queue_stop(eth_dev, i); +} + +static int +otx2_nix_dev_start(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int rc, i; + + /* MTU recalculate should be avoided here if PTP is enabled by PF, as + * otx2_nix_recalc_mtu would be invoked during otx2_nix_ptp_enable_vf + * call below. + */ + if (eth_dev->data->nb_rx_queues != 0 && !otx2_ethdev_is_ptp_en(dev)) { + rc = otx2_nix_recalc_mtu(eth_dev); + if (rc) + return rc; + } + + /* Start rx queues */ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rc = otx2_nix_rx_queue_start(eth_dev, i); + if (rc) + return rc; + } + + /* Start tx queues */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = otx2_nix_tx_queue_start(eth_dev, i); + if (rc) + return rc; + } + + rc = otx2_nix_update_flow_ctrl_mode(eth_dev); + if (rc) { + otx2_err("Failed to update flow ctrl mode %d", rc); + return rc; + } + + /* Enable PTP if it was requested by the app or if it is already + * enabled in PF owning this VF + */ + memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info)); + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || + otx2_ethdev_is_ptp_en(dev)) + otx2_nix_timesync_enable(eth_dev); + else + otx2_nix_timesync_disable(eth_dev); + + /* Update VF about data off shifted by 8 bytes if PTP already + * enabled in PF owning this VF + */ + if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev)) + otx2_nix_ptp_enable_vf(eth_dev); + + rc = npc_rx_enable(dev); + if (rc) { + otx2_err("Failed to enable NPC rx %d", rc); + return rc; + } + + otx2_nix_toggle_flag_link_cfg(dev, true); + + rc = nix_cgx_start_link_event(dev); + if (rc) { + otx2_err("Failed to start cgx link event %d", rc); + goto rx_disable; + } + + otx2_nix_toggle_flag_link_cfg(dev, false); + otx2_eth_set_tx_function(eth_dev); + otx2_eth_set_rx_function(eth_dev); + + return 0; + +rx_disable: + npc_rx_disable(dev); + otx2_nix_toggle_flag_link_cfg(dev, false); + return rc; +} + +static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev); +static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev); + /* Initialize and register driver with DPDK Application */ static const struct eth_dev_ops otx2_eth_dev_ops = { .dev_infos_get = otx2_nix_info_get, @@ -1311,19 +2231,29 @@ static const struct eth_dev_ops otx2_eth_dev_ops = { .link_update = otx2_nix_link_update, .tx_queue_setup = otx2_nix_tx_queue_setup, .tx_queue_release = otx2_nix_tx_queue_release, + .tm_ops_get = otx2_nix_tm_ops_get, .rx_queue_setup = otx2_nix_rx_queue_setup, .rx_queue_release = otx2_nix_rx_queue_release, + .dev_start = otx2_nix_dev_start, + .dev_stop = otx2_nix_dev_stop, + .dev_close = otx2_nix_dev_close, .tx_queue_start = otx2_nix_tx_queue_start, .tx_queue_stop = otx2_nix_tx_queue_stop, .rx_queue_start = otx2_nix_rx_queue_start, .rx_queue_stop = otx2_nix_rx_queue_stop, + .dev_set_link_up = otx2_nix_dev_set_link_up, + .dev_set_link_down = otx2_nix_dev_set_link_down, .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get, + .dev_ptypes_set = otx2_nix_ptypes_set, + .dev_reset = otx2_nix_dev_reset, .stats_get = otx2_nix_dev_stats_get, .stats_reset = otx2_nix_dev_stats_reset, .get_reg = otx2_nix_dev_get_reg, + .mtu_set = otx2_nix_mtu_set, .mac_addr_add = otx2_nix_mac_addr_add, .mac_addr_remove = otx2_nix_mac_addr_del, .mac_addr_set = otx2_nix_mac_addr_set, + .set_mc_addr_list = otx2_nix_set_mc_addr_list, .promiscuous_enable = otx2_nix_promisc_enable, .promiscuous_disable = otx2_nix_promisc_disable, .allmulticast_enable = otx2_nix_allmulticast_enable, @@ -1340,14 +2270,19 @@ static const struct eth_dev_ops otx2_eth_dev_ops = { .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id, .rxq_info_get = otx2_nix_rxq_info_get, .txq_info_get = otx2_nix_txq_info_get, + .rx_burst_mode_get = otx2_rx_burst_mode_get, + .tx_burst_mode_get = otx2_tx_burst_mode_get, .rx_queue_count = otx2_nix_rx_queue_count, .rx_descriptor_done = otx2_nix_rx_descriptor_done, .rx_descriptor_status = otx2_nix_rx_descriptor_status, + .tx_descriptor_status = otx2_nix_tx_descriptor_status, .tx_done_cleanup = otx2_nix_tx_done_cleanup, + .set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit, .pool_ops_supported = otx2_nix_pool_ops_supported, .filter_ctrl = otx2_nix_dev_filter_ctrl, .get_module_info = otx2_nix_get_module_info, .get_module_eeprom = otx2_nix_get_module_eeprom, + .fw_version_get = otx2_nix_fw_version_get, .flow_ctrl_get = otx2_nix_flow_ctrl_get, .flow_ctrl_set = otx2_nix_flow_ctrl_set, .timesync_enable = otx2_nix_timesync_enable, @@ -1357,6 +2292,14 @@ static const struct eth_dev_ops otx2_eth_dev_ops = { .timesync_adjust_time = otx2_nix_timesync_adjust_time, .timesync_read_time = otx2_nix_timesync_read_time, .timesync_write_time = otx2_nix_timesync_write_time, + .vlan_offload_set = otx2_nix_vlan_offload_set, + .vlan_filter_set = otx2_nix_vlan_filter_set, + .vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set, + .vlan_tpid_set = otx2_nix_vlan_tpid_set, + .vlan_pvid_set = otx2_nix_vlan_pvid_set, + .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable, + .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable, + .read_clock = otx2_nix_read_clock, }; static inline int @@ -1408,6 +2351,29 @@ otx2_eth_dev_lf_detach(struct otx2_mbox *mbox) return otx2_mbox_process(mbox); } +static bool +otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev) +{ + if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF || + pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF) + return true; + return false; +} + +static inline uint64_t +nix_get_blkaddr(struct otx2_eth_dev *dev) +{ + uint64_t reg; + + /* Reading the discovery register to know which NIX is the LF + * attached to. + */ + reg = otx2_read64(dev->bar2 + + RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_NIX0)); + + return reg & 0x1FFULL ? RVU_BLOCK_ADDR_NIX0 : RVU_BLOCK_ADDR_NIX1; +} + static int otx2_eth_dev_init(struct rte_eth_dev *eth_dev) { @@ -1451,6 +2417,10 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) goto error; } } + if (otx2_eth_dev_is_sdp(pci_dev)) + dev->sdp_link = true; + else + dev->sdp_link = false; /* Device generic callbacks */ dev->ops = &otx2_dev_ops; dev->eth_dev = eth_dev; @@ -1462,7 +2432,7 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) dev->configured = 0; dev->drv_inited = true; - dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20); + dev->ptype_disable = 0; dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20); /* Attach NIX LF */ @@ -1470,6 +2440,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) if (rc) goto otx2_npa_uninit; + dev->base = dev->bar2 + (nix_get_blkaddr(dev) << 20); + /* Get NIX MSIX offset */ rc = nix_lf_get_msix_offset(dev); if (rc) @@ -1520,15 +2492,25 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) dev->tx_offload_capa = nix_get_tx_offload_capa(dev); dev->rx_offload_capa = nix_get_rx_offload_capa(dev); - if (otx2_dev_is_A0(dev)) { + if (otx2_dev_is_96xx_A0(dev) || + otx2_dev_is_95xx_Ax(dev)) { dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q; dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL; } + /* Create security ctx */ + rc = otx2_eth_sec_ctx_create(eth_dev); + if (rc) + goto free_mac_addrs; + dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; + dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + /* Initialize rte-flow */ rc = otx2_flow_init(dev); if (rc) - goto free_mac_addrs; + goto sec_ctx_destroy; + + otx2_nix_mc_filter_init(dev); otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64, @@ -1537,6 +2519,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev) dev->rx_offload_capa, dev->tx_offload_capa); return 0; +sec_ctx_destroy: + otx2_eth_sec_ctx_destroy(eth_dev); free_mac_addrs: rte_free(eth_dev->data->mac_addrs); unregister_irq: @@ -1563,16 +2547,29 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; + /* Clear the flag since we are closing down */ + dev->configured = 0; + /* Disable nix bpid config */ otx2_nix_rxchan_bpid_cfg(eth_dev, false); + npc_rx_disable(dev); + + /* Disable vlan offloads */ + otx2_nix_vlan_fini(eth_dev); + /* Disable other rte_flow entries */ otx2_flow_fini(dev); + /* Free multicast filter list */ + otx2_nix_mc_filter_fini(dev); + /* Disable PTP if already enabled */ if (otx2_ethdev_is_ptp_en(dev)) otx2_nix_timesync_disable(eth_dev); + nix_cgx_stop_link_event(dev); + /* Free up SQs */ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]); @@ -1595,6 +2592,10 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) /* Unregister queue irqs */ oxt2_nix_unregister_queue_irqs(eth_dev); + /* Unregister cq irqs */ + if (eth_dev->data->dev_conf.intr_conf.rxq) + oxt2_nix_unregister_cq_irqs(eth_dev); + rc = nix_lf_free(dev); if (rc) otx2_err("Failed to free nix lf, rc=%d", rc); @@ -1603,6 +2604,12 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) if (rc) otx2_err("Failed to cleanup npa lf, rc=%d", rc); + /* Disable security */ + otx2_eth_sec_fini(eth_dev); + + /* Destroy security ctx */ + otx2_eth_sec_ctx_destroy(eth_dev); + rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; dev->drv_inited = false; @@ -1628,6 +2635,24 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) return 0; } +static void +otx2_nix_dev_close(struct rte_eth_dev *eth_dev) +{ + otx2_eth_dev_uninit(eth_dev, true); +} + +static int +otx2_nix_dev_reset(struct rte_eth_dev *eth_dev) +{ + int rc; + + rc = otx2_eth_dev_uninit(eth_dev, false); + if (rc) + return rc; + + return otx2_eth_dev_init(eth_dev); +} + static int nix_remove(struct rte_pci_device *pci_dev) { @@ -1700,6 +2725,14 @@ static const struct rte_pci_id pci_nix_map[] = { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF_VF) }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_SDP_PF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_SDP_VF) + }, { .vendor_id = 0, }, @@ -1707,7 +2740,7 @@ static const struct rte_pci_id pci_nix_map[] = { static struct rte_pci_driver pci_nix = { .id_table = pci_nix_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA | + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA | RTE_PCI_DRV_INTR_LSC, .probe = nix_probe, .remove = nix_remove,