+static uint16_t
+nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ uint16_t flags = 0;
+
+ if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ flags |= NIX_RX_OFFLOAD_RSS_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM))
+ flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ flags |= NIX_RX_MULTI_SEG_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP))
+ flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ flags |= NIX_RX_OFFLOAD_SECURITY_F;
+
+ if (!dev->ptype_disable)
+ flags |= NIX_RX_OFFLOAD_PTYPE_F;
+
+ return flags;
+}
+
+static uint16_t
+nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t conf = dev->tx_offloads;
+ uint16_t flags = 0;
+
+ /* Fastpath is dependent on these enums */
+ RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
+ RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
+ RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
+ RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
+ RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
+ RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
+ offsetof(struct rte_mbuf, buf_iova) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, buf_iova) + 16);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, ol_flags) + 12);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
+ offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
+
+ if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
+ conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+ flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
+ if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
+
+ if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+ conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
+ conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
+ conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
+
+ if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
+
+ if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+ flags |= NIX_TX_MULTI_SEG_F;
+
+ /* Enable Inner checksum for TSO */
+ if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+ flags |= (NIX_TX_OFFLOAD_TSO_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+
+ /* Enable Inner and Outer checksum for Tunnel TSO */
+ if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ flags |= (NIX_TX_OFFLOAD_TSO_F |
+ NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+
+ if (conf & DEV_TX_OFFLOAD_SECURITY)
+ flags |= NIX_TX_OFFLOAD_SECURITY_F;
+
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
+ return flags;
+}
+
+static int
+nix_sqb_lock(struct rte_mempool *mp)
+{
+ struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+ struct npa_aq_enq_req *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_LOCK;
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ if (!req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(npa_lf->mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
+ if (rc < 0) {
+ otx2_err("Failed to LOCK AURA context");
+ return rc;
+ }
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ if (!req) {
+ otx2_err("Failed to LOCK POOL context");
+ return -ENOMEM;
+ }
+ }
+
+ req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ req->ctype = NPA_AQ_CTYPE_POOL;
+ req->op = NPA_AQ_INSTOP_LOCK;
+
+ rc = otx2_mbox_process(npa_lf->mbox);
+ if (rc < 0) {
+ otx2_err("Unable to lock POOL in NDC");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+nix_sqb_unlock(struct rte_mempool *mp)
+{
+ struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+ struct npa_aq_enq_req *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ if (!req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(npa_lf->mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
+ if (rc < 0) {
+ otx2_err("Failed to UNLOCK AURA context");
+ return rc;
+ }
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ if (!req) {
+ otx2_err("Failed to UNLOCK POOL context");
+ return -ENOMEM;
+ }
+ }
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ req->ctype = NPA_AQ_CTYPE_POOL;
+ req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(npa_lf->mbox);
+ if (rc < 0) {
+ otx2_err("Unable to UNLOCK AURA in NDC");
+ return rc;
+ }
+
+ return 0;
+}
+
+void
+otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
+{
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_eth_dev *eth_dev;
+ struct otx2_eth_dev *dev;
+ uint32_t buffsz;
+
+ eth_dev = rxq->eth_dev;
+ dev = otx2_eth_pmd_priv(eth_dev);
+
+ /* Get rx buffer size */
+ mbp_priv = rte_mempool_get_priv(rxq->pool);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ /* Setting up the rx[tx]_offload_flags due to change
+ * in rx[tx]_offloads.
+ */
+ dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
+ dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
+ }
+}
+