+static int
+nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct npc_set_pkind *req;
+ struct msg_resp *rsp;
+ int rc;
+
+ if (dev->npc_flow.switch_header_type == 0)
+ return 0;
+
+ /* Notify AF about higig2 config */
+ req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
+ req->mode = dev->npc_flow.switch_header_type;
+ if (enable == 0)
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ req->dir = PKIND_RX;
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
+ req->mode = dev->npc_flow.switch_header_type;
+ if (enable == 0)
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ req->dir = PKIND_TX;
+ return otx2_mbox_process_msg(mbox, (void *)&rsp);
+}
+
+static int
+nix_lf_free(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_lf_free_req *req;
+ struct ndc_sync_op *ndc_req;
+ int rc;
+
+ /* Sync NDC-NIX for LF */
+ ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
+ ndc_req->nix_lf_tx_sync = 1;
+ ndc_req->nix_lf_rx_sync = 1;
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
+
+ req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ /* Let AF driver free all this nix lf's
+ * NPC entries allocated using NPC MBOX.
+ */
+ req->flags = 0;
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+npc_rx_enable(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+npc_rx_disable(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+nix_cgx_start_link_event(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (en && otx2_dev_is_vf_or_sdp(dev))
+ return -ENOTSUP;
+
+ if (en)
+ otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
+ else
+ otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static inline void
+nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
+{
+ rxq->head = 0;
+ rxq->available = 0;
+}
+
+static inline uint32_t
+nix_qsize_to_val(enum nix_q_size_e qsize)
+{
+ return (16UL << (qsize * 2));
+}
+
+static inline enum nix_q_size_e
+nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
+{
+ int i;
+
+ if (otx2_ethdev_fixup_is_min_4k_q(dev))
+ i = nix_q_size_4K;
+ else
+ i = nix_q_size_16;
+
+ for (; i < nix_q_size_max; i++)
+ if (val <= nix_qsize_to_val(i))
+ break;
+
+ if (i >= nix_q_size_max)
+ i = nix_q_size_max - 1;
+
+ return i;
+}
+
+static int
+nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
+ uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ const struct rte_memzone *rz;
+ uint32_t ring_size, cq_size;
+ struct nix_aq_enq_req *aq;
+ uint16_t first_skip;
+ int rc;
+
+ cq_size = rxq->qlen;
+ ring_size = cq_size * NIX_CQ_ENTRY_SZ;
+ rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
+ NIX_CQ_ALIGN, dev->node);
+ if (rz == NULL) {
+ otx2_err("Failed to allocate mem for cq hw ring");
+ rc = -ENOMEM;
+ goto fail;
+ }
+ memset(rz->addr, 0, rz->len);
+ rxq->desc = (uintptr_t)rz->addr;
+ rxq->qmask = cq_size - 1;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ aq->cq.ena = 1;
+ aq->cq.caching = 1;
+ aq->cq.qsize = rxq->qsize;
+ aq->cq.base = rz->iova;
+ aq->cq.avg_level = 0xff;
+ aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
+ aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
+
+ /* Many to one reduction */
+ aq->cq.qint_idx = qid % dev->qints;
+ /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
+ aq->cq.cint_idx = qid;
+
+ if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
+ const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
+ uint16_t min_rx_drop;
+
+ min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
+ aq->cq.drop = min_rx_drop;
+ aq->cq.drop_ena = 1;
+ rxq->cq_drop = min_rx_drop;
+ } else {
+ rxq->cq_drop = NIX_CQ_THRESH_LEVEL;
+ aq->cq.drop = rxq->cq_drop;
+ aq->cq.drop_ena = 1;
+ }
+
+ /* TX pause frames enable flowctrl on RX side */
+ if (dev->fc_info.tx_pause) {
+ /* Single bpid is allocated for all rx channels for now */
+ aq->cq.bpid = dev->fc_info.bpid[0];
+ aq->cq.bp = rxq->cq_drop;
+ aq->cq.bp_ena = 1;
+ }
+
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("Failed to init cq context");
+ goto fail;
+ }
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ aq->rq.sso_ena = 0;
+ aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
+ aq->rq.spb_ena = 0;
+ aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
+ first_skip = (sizeof(struct rte_mbuf));
+ first_skip += RTE_PKTMBUF_HEADROOM;
+ first_skip += rte_pktmbuf_priv_size(mp);
+ rxq->data_off = first_skip;
+
+ first_skip /= 8; /* Expressed in number of dwords */
+ aq->rq.first_skip = first_skip;
+ aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
+ aq->rq.flow_tagw = 32; /* 32-bits */
+ aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp);
+ aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp);
+ aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf);
+ aq->rq.lpb_sizem1 /= 8;
+ aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
+ aq->rq.ena = 1;
+ aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+ aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
+ aq->rq.rq_int_ena = 0;
+ /* Many to one reduction */
+ aq->rq.qint_idx = qid % dev->qints;
+
+ aq->rq.xqe_drop_ena = 1;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("Failed to init rq context");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return rc;
+}
+
+static int
+nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
+ struct otx2_eth_rxq *rxq, const bool enb)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *aq;
+
+ /* Pkts will be dropped silently if RQ is disabled */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = rxq->rq;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->rq.ena = enb;
+ aq->rq_mask.ena = ~(aq->rq_mask.ena);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *aq;
+ int rc;
+
+ /* RQ is already disabled */
+ /* Disable CQ */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = rxq->rq;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->cq.ena = 0;
+ aq->cq_mask.ena = ~(aq->cq_mask.ena);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to disable cq context");
+ return rc;
+ }
+
+ return 0;
+}
+
+static inline int
+nix_get_data_off(struct otx2_eth_dev *dev)
+{
+ return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0;
+}
+
+uint64_t
+otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
+{
+ struct rte_mbuf mb_def;
+ uint64_t *tmp;
+
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+ offsetof(struct rte_mbuf, data_off) != 2);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+ offsetof(struct rte_mbuf, data_off) != 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+ offsetof(struct rte_mbuf, data_off) != 6);
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
+ mb_def.port = port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* Prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ tmp = (uint64_t *)&mb_def.rearm_data;
+
+ return *tmp;
+}
+
+static void
+otx2_nix_rx_queue_release(void *rx_queue)
+{
+ struct otx2_eth_rxq *rxq = rx_queue;
+
+ if (!rxq)
+ return;
+
+ otx2_nix_dbg("Releasing rxq %u", rxq->rq);
+ nix_cq_rq_uninit(rxq->eth_dev, rxq);
+ rte_free(rx_queue);
+}
+
+static int
+otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
+ uint16_t nb_desc, unsigned int socket,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_mempool_ops *ops;
+ struct otx2_eth_rxq *rxq;
+ const char *platform_ops;
+ enum nix_q_size_e qsize;
+ uint64_t offloads;
+ int rc;
+
+ rc = -EINVAL;
+
+ /* Compile time check to make sure all fast path elements in a CL */
+ RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
+
+ /* Sanity checks */
+ if (rx_conf->rx_deferred_start == 1) {
+ otx2_err("Deferred Rx start is not supported");
+ goto fail;
+ }
+
+ platform_ops = rte_mbuf_platform_mempool_ops();
+ /* This driver needs octeontx2_npa mempool ops to work */
+ ops = rte_mempool_get_ops(mp->ops_index);
+ if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
+ otx2_err("mempool ops should be of octeontx2_npa type");
+ goto fail;
+ }
+
+ if (mp->pool_id == 0) {
+ otx2_err("Invalid pool_id");
+ goto fail;
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (eth_dev->data->rx_queues[rq] != NULL) {
+ otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
+ otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
+ eth_dev->data->rx_queues[rq] = NULL;
+ }
+
+ offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
+ dev->rx_offloads |= offloads;
+
+ /* Find the CQ queue size */
+ qsize = nix_qsize_clampup_get(dev, nb_desc);
+ /* Allocate rxq memory */
+ rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
+ if (rxq == NULL) {
+ otx2_err("Failed to allocate rq=%d", rq);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ rxq->eth_dev = eth_dev;
+ rxq->rq = rq;
+ rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
+ rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
+ rxq->wdata = (uint64_t)rq << 32;
+ rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
+ rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
+ eth_dev->data->port_id);
+ rxq->offloads = offloads;
+ rxq->pool = mp;
+ rxq->qlen = nix_qsize_to_val(qsize);
+ rxq->qsize = qsize;
+ rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
+ rxq->tstamp = &dev->tstamp;
+
+ /* Alloc completion queue */
+ rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
+ if (rc) {
+ otx2_err("Failed to allocate rxq=%u", rq);
+ goto free_rxq;
+ }
+
+ rxq->qconf.socket_id = socket;
+ rxq->qconf.nb_desc = nb_desc;
+ rxq->qconf.mempool = mp;
+ memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
+
+ nix_rx_queue_reset(rxq);
+ otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
+ rq, mp->name, qsize, nb_desc, rxq->qlen);
+
+ eth_dev->data->rx_queues[rq] = rxq;
+ eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ /* Calculating delta and freq mult between PTP HI clock and tsc.
+ * These are needed in deriving raw clock value from tsc counter.
+ * read_clock eth op returns raw clock value.
+ */
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+ otx2_ethdev_is_ptp_en(dev)) {
+ rc = otx2_nix_raw_clock_tsc_conv(dev);
+ if (rc) {
+ otx2_err("Failed to calculate delta and freq mult");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+free_rxq:
+ otx2_nix_rx_queue_release(rxq);
+fail:
+ return rc;
+}
+
+static inline uint8_t
+nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
+{
+ /*
+ * Maximum three segments can be supported with W8, Choose
+ * NIX_MAXSQESZ_W16 for multi segment offload.
+ */
+ if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ return NIX_MAXSQESZ_W16;
+ else
+ return NIX_MAXSQESZ_W8;
+}
+
+static uint16_t
+nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ uint16_t flags = 0;
+
+ if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ flags |= NIX_RX_OFFLOAD_RSS_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM))
+ flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ flags |= NIX_RX_MULTI_SEG_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP))
+ flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
+ if (!dev->ptype_disable)
+ flags |= NIX_RX_OFFLOAD_PTYPE_F;
+
+ return flags;
+}
+
+static uint16_t
+nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t conf = dev->tx_offloads;
+ uint16_t flags = 0;
+
+ /* Fastpath is dependent on these enums */
+ RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
+ RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
+ RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
+ RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
+ RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
+ RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
+ offsetof(struct rte_mbuf, buf_iova) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, buf_iova) + 16);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, ol_flags) + 12);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
+ offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
+
+ if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
+ conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+ flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
+ if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
+
+ if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+ conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
+ conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
+ conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
+
+ if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
+
+ if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+ flags |= NIX_TX_MULTI_SEG_F;
+
+ /* Enable Inner checksum for TSO */
+ if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+ flags |= (NIX_TX_OFFLOAD_TSO_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+
+ /* Enable Inner and Outer checksum for Tunnel TSO */
+ if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ flags |= (NIX_TX_OFFLOAD_TSO_F |
+ NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
+ return flags;
+}
+
+static int
+nix_sq_init(struct otx2_eth_txq *txq)
+{
+ struct otx2_eth_dev *dev = txq->dev;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *sq;
+ uint32_t rr_quantum;
+ uint16_t smq;
+ int rc;
+
+ if (txq->sqb_pool->pool_id == 0)
+ return -EINVAL;
+
+ rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
+ if (rc) {
+ otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
+ return rc;
+ }
+
+ sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ sq->qidx = txq->sq;
+ sq->ctype = NIX_AQ_CTYPE_SQ;
+ sq->op = NIX_AQ_INSTOP_INIT;
+ sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
+
+ sq->sq.smq = smq;
+ sq->sq.smq_rr_quantum = rr_quantum;
+ sq->sq.default_chan = dev->tx_chan_base;
+ sq->sq.sqe_stype = NIX_STYPE_STF;
+ sq->sq.ena = 1;
+ if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
+ sq->sq.sqe_stype = NIX_STYPE_STP;
+ sq->sq.sqb_aura =
+ npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
+ sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
+ sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
+ sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
+ sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
+
+ /* Many to one reduction */
+ sq->sq.qint_idx = txq->sq % dev->qints;
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+nix_sq_uninit(struct otx2_eth_txq *txq)
+{
+ struct otx2_eth_dev *dev = txq->dev;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct ndc_sync_op *ndc_req;
+ struct nix_aq_enq_rsp *rsp;
+ struct nix_aq_enq_req *aq;
+ uint16_t sqes_per_sqb;
+ void *sqb_buf;
+ int rc, count;
+
+ otx2_nix_dbg("Cleaning up sq %u", txq->sq);
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = txq->sq;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ /* Check if sq is already cleaned up */
+ if (!rsp->sq.ena)
+ return 0;
+
+ /* Disable sq */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = txq->sq;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->sq_mask.ena = ~aq->sq_mask.ena;
+ aq->sq.ena = 0;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ /* Read SQ and free sqb's */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = txq->sq;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (aq->sq.smq_pend)
+ otx2_err("SQ has pending sqe's");
+
+ count = aq->sq.sqb_count;
+ sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
+ /* Free SQB's that are used */
+ sqb_buf = (void *)rsp->sq.head_sqb;
+ while (count) {
+ void *next_sqb;
+
+ next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t)
+ ((sqes_per_sqb - 1) *
+ nix_sq_max_sqe_sz(txq)));
+ npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
+ (uint64_t)sqb_buf);
+ sqb_buf = next_sqb;
+ count--;
+ }
+
+ /* Free next to use sqb */
+ if (rsp->sq.next_sqb)
+ npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
+ rsp->sq.next_sqb);
+
+ /* Sync NDC-NIX-TX for LF */
+ ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
+ ndc_req->nix_lf_tx_sync = 1;
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
+
+ return rc;
+}
+
+static int
+nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
+{
+ struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+ struct npa_aq_enq_req *aura_req;
+
+ aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_WRITE;
+
+ aura_req->aura.limit = nb_sqb_bufs;
+ aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
+
+ return otx2_mbox_process(npa_lf->mbox);
+}
+
+static int
+nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
+{
+ struct otx2_eth_dev *dev = txq->dev;
+ uint16_t sqes_per_sqb, nb_sqb_bufs;
+ char name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool_objsz sz;
+ struct npa_aura_s *aura;
+ uint32_t tmp, blk_sz;
+
+ aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
+ snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
+ blk_sz = dev->sqb_size;
+
+ if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
+ sqes_per_sqb = (dev->sqb_size / 8) / 16;
+ else
+ sqes_per_sqb = (dev->sqb_size / 8) / 8;
+
+ nb_sqb_bufs = nb_desc / sqes_per_sqb;
+ /* Clamp up to devarg passed SQB count */
+ nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB,
+ nb_sqb_bufs + NIX_SQB_LIST_SPACE));
+
+ txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
+ 0, 0, dev->node,
+ MEMPOOL_F_NO_SPREAD);
+ txq->nb_sqb_bufs = nb_sqb_bufs;
+ txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
+ txq->nb_sqb_bufs_adj = nb_sqb_bufs -
+ RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
+ txq->nb_sqb_bufs_adj =
+ (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
+
+ if (txq->sqb_pool == NULL) {
+ otx2_err("Failed to allocate sqe mempool");
+ goto fail;
+ }
+
+ memset(aura, 0, sizeof(*aura));
+ aura->fc_ena = 1;
+ aura->fc_addr = txq->fc_iova;
+ aura->fc_hyst_bits = 0; /* Store count on all updates */
+ if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
+ otx2_err("Failed to set ops for sqe mempool");
+ goto fail;
+ }
+ if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
+ otx2_err("Failed to populate sqe mempool");
+ goto fail;
+ }
+
+ tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
+ if (dev->sqb_size != sz.elt_size) {
+ otx2_err("sqe pool block size is not expected %d != %d",
+ dev->sqb_size, tmp);
+ goto fail;
+ }
+
+ nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
+
+ return 0;
+fail:
+ return -ENOMEM;
+}
+
+void
+otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
+{
+ struct nix_send_ext_s *send_hdr_ext;
+ struct nix_send_hdr_s *send_hdr;
+ struct nix_send_mem_s *send_mem;
+ union nix_send_sg_s *sg;
+
+ /* Initialize the fields based on basic single segment packet */
+ memset(&txq->cmd, 0, sizeof(txq->cmd));
+
+ if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
+ send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
+ /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
+ send_hdr->w0.sizem1 = 2;
+
+ send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
+ send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+ if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ /* Default: one seg packet would have:
+ * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
+ * => 8/2 - 1 = 3
+ */
+ send_hdr->w0.sizem1 = 3;
+ send_hdr_ext->w0.tstmp = 1;
+
+ /* To calculate the offset for send_mem,
+ * send_hdr->w0.sizem1 * 2
+ */
+ send_mem = (struct nix_send_mem_s *)(txq->cmd +
+ (send_hdr->w0.sizem1 << 1));
+ send_mem->subdc = NIX_SUBDC_MEM;
+ send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
+ send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
+ }
+ sg = (union nix_send_sg_s *)&txq->cmd[4];
+ } else {
+ send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
+ /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
+ send_hdr->w0.sizem1 = 1;
+ sg = (union nix_send_sg_s *)&txq->cmd[2];
+ }
+
+ send_hdr->w0.sq = txq->sq;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 1;
+ sg->ld_type = NIX_SENDLDTYPE_LDD;
+
+ rte_smp_wmb();
+}
+
+static void
+otx2_nix_tx_queue_release(void *_txq)
+{
+ struct otx2_eth_txq *txq = _txq;
+ struct rte_eth_dev *eth_dev;
+
+ if (!txq)
+ return;
+
+ eth_dev = txq->dev->eth_dev;
+
+ otx2_nix_dbg("Releasing txq %u", txq->sq);
+
+ /* Flush and disable tm */
+ otx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started);
+
+ /* Free sqb's and disable sq */
+ nix_sq_uninit(txq);
+
+ if (txq->sqb_pool) {
+ rte_mempool_free(txq->sqb_pool);
+ txq->sqb_pool = NULL;
+ }
+ rte_free(txq);
+}
+
+
+static int
+otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ const struct rte_memzone *fc;
+ struct otx2_eth_txq *txq;
+ uint64_t offloads;
+ int rc;
+
+ rc = -EINVAL;
+
+ /* Compile time check to make sure all fast path elements in a CL */
+ RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
+
+ if (tx_conf->tx_deferred_start) {
+ otx2_err("Tx deferred start is not supported");
+ goto fail;
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (eth_dev->data->tx_queues[sq] != NULL) {
+ otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
+ otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
+ eth_dev->data->tx_queues[sq] = NULL;
+ }
+
+ /* Find the expected offloads for this queue */
+ offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
+ OTX2_ALIGN, socket_id);
+ if (txq == NULL) {
+ otx2_err("Failed to alloc txq=%d", sq);
+ rc = -ENOMEM;
+ goto fail;
+ }
+ txq->sq = sq;
+ txq->dev = dev;
+ txq->sqb_pool = NULL;
+ txq->offloads = offloads;
+ dev->tx_offloads |= offloads;
+
+ /*
+ * Allocate memory for flow control updates from HW.
+ * Alloc one cache line, so that fits all FC_STYPE modes.
+ */
+ fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
+ OTX2_ALIGN + sizeof(struct npa_aura_s),
+ OTX2_ALIGN, dev->node);
+ if (fc == NULL) {
+ otx2_err("Failed to allocate mem for fcmem");
+ rc = -ENOMEM;
+ goto free_txq;
+ }
+ txq->fc_iova = fc->iova;
+ txq->fc_mem = fc->addr;
+
+ /* Initialize the aura sqb pool */
+ rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
+ if (rc) {
+ otx2_err("Failed to alloc sqe pool rc=%d", rc);
+ goto free_txq;
+ }
+
+ /* Initialize the SQ */
+ rc = nix_sq_init(txq);
+ if (rc) {
+ otx2_err("Failed to init sq=%d context", sq);
+ goto free_txq;
+ }
+
+ txq->fc_cache_pkts = 0;
+ txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
+ /* Evenly distribute LMT slot for each sq */
+ txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
+
+ txq->qconf.socket_id = socket_id;
+ txq->qconf.nb_desc = nb_desc;
+ memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
+
+ otx2_nix_form_default_desc(txq);
+
+ otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
+ " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
+ fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
+ txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
+ eth_dev->data->tx_queues[sq] = txq;
+ eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+
+free_txq:
+ otx2_nix_tx_queue_release(txq);
+fail:
+ return rc;
+}
+
+static int
+nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_eth_qconf *tx_qconf = NULL;
+ struct otx2_eth_qconf *rx_qconf = NULL;
+ struct otx2_eth_txq **txq;
+ struct otx2_eth_rxq **rxq;
+ int i, nb_rxq, nb_txq;
+
+ nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
+ nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
+
+ tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
+ if (tx_qconf == NULL) {
+ otx2_err("Failed to allocate memory for tx_qconf");
+ goto fail;
+ }
+
+ rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
+ if (rx_qconf == NULL) {
+ otx2_err("Failed to allocate memory for rx_qconf");
+ goto fail;
+ }
+
+ txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
+ for (i = 0; i < nb_txq; i++) {
+ if (txq[i] == NULL) {
+ otx2_err("txq[%d] is already released", i);
+ goto fail;
+ }
+ memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
+ otx2_nix_tx_queue_release(txq[i]);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+
+ rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
+ for (i = 0; i < nb_rxq; i++) {
+ if (rxq[i] == NULL) {
+ otx2_err("rxq[%d] is already released", i);
+ goto fail;
+ }
+ memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
+ otx2_nix_rx_queue_release(rxq[i]);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+
+ dev->tx_qconf = tx_qconf;
+ dev->rx_qconf = rx_qconf;
+ return 0;
+
+fail:
+ if (tx_qconf)
+ free(tx_qconf);
+ if (rx_qconf)
+ free(rx_qconf);
+
+ return -ENOMEM;
+}
+
+static int
+nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
+ struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
+ struct otx2_eth_txq **txq;
+ struct otx2_eth_rxq **rxq;
+ int rc, i, nb_rxq, nb_txq;
+
+ nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
+ nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
+
+ rc = -ENOMEM;
+ /* Setup tx & rx queues with previous configuration so
+ * that the queues can be functional in cases like ports
+ * are started without re configuring queues.
+ *
+ * Usual re config sequence is like below:
+ * port_configure() {
+ * if(reconfigure) {
+ * queue_release()
+ * queue_setup()
+ * }
+ * queue_configure() {
+ * queue_release()
+ * queue_setup()
+ * }
+ * }
+ * port_start()
+ *
+ * In some application's control path, queue_configure() would
+ * NOT be invoked for TXQs/RXQs in port_configure().
+ * In such cases, queues can be functional after start as the
+ * queues are already setup in port_configure().
+ */
+ for (i = 0; i < nb_txq; i++) {
+ rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
+ tx_qconf[i].socket_id,
+ &tx_qconf[i].conf.tx);
+ if (rc) {
+ otx2_err("Failed to setup tx queue rc=%d", rc);
+ txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
+ for (i -= 1; i >= 0; i--)
+ otx2_nix_tx_queue_release(txq[i]);
+ goto fail;
+ }
+ }
+
+ free(tx_qconf); tx_qconf = NULL;
+
+ for (i = 0; i < nb_rxq; i++) {
+ rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
+ rx_qconf[i].socket_id,
+ &rx_qconf[i].conf.rx,
+ rx_qconf[i].mempool);
+ if (rc) {
+ otx2_err("Failed to setup rx queue rc=%d", rc);
+ rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
+ for (i -= 1; i >= 0; i--)
+ otx2_nix_rx_queue_release(rxq[i]);
+ goto release_tx_queues;
+ }
+ }
+
+ free(rx_qconf); rx_qconf = NULL;
+
+ return 0;
+
+release_tx_queues:
+ txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ otx2_nix_tx_queue_release(txq[i]);
+fail:
+ if (tx_qconf)
+ free(tx_qconf);
+ if (rx_qconf)
+ free(rx_qconf);
+
+ return rc;
+}
+
+static uint16_t
+nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
+{
+ RTE_SET_USED(queue);
+ RTE_SET_USED(mbufs);
+ RTE_SET_USED(pkts);
+
+ return 0;
+}
+
+static void
+nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
+{
+ /* These dummy functions are required for supporting
+ * some applications which reconfigure queues without
+ * stopping tx burst and rx burst threads(eg kni app)
+ * When the queues context is saved, txq/rxqs are released
+ * which caused app crash since rx/tx burst is still
+ * on different lcores
+ */
+ eth_dev->tx_pkt_burst = nix_eth_nop_burst;
+ eth_dev->rx_pkt_burst = nix_eth_nop_burst;
+ rte_mb();
+}
+
+static void
+nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
+{
+ volatile struct nix_lso_format *field;
+
+ /* Format works only with TCP packet marked by OL3/OL4 */
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static void
+nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req,
+ bool outer_v4, bool inner_v4)
+{
+ volatile struct nix_lso_format *field;
+
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 len */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = outer_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (outer_v4) {
+ /* IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Outer UDP length */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+
+ /* Inner IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = inner_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (inner_v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static void
+nix_lso_tun_tcp(struct nix_lso_format_cfg *req,
+ bool outer_v4, bool inner_v4)
+{
+ volatile struct nix_lso_format *field;
+
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 len */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = outer_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (outer_v4) {
+ /* IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Inner IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = inner_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (inner_v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static int
+nix_setup_lso_formats(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_lso_format_cfg_rsp *rsp;
+ struct nix_lso_format_cfg *req;
+ uint8_t base;
+ int rc;
+
+ /* Skip if TSO was not requested */
+ if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F))
+ return 0;
+ /*
+ * IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tcp(req, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ base = rsp->lso_format_idx;
+ if (base != NIX_LSO_FORMAT_IDX_TSOV4)
+ return -EFAULT;
+ dev->lso_base_idx = base;
+ otx2_nix_dbg("tcpv4 lso fmt=%u", base);
+
+
+ /*
+ * IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tcp(req, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 1)
+ return -EFAULT;
+ otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1);
+
+ /*
+ * IPv4/UDP/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, true, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 2)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2);
+
+ /*
+ * IPv4/UDP/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, true, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 3)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3);
+
+ /*
+ * IPv6/UDP/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, false, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 4)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4);
+
+ /*
+ * IPv6/UDP/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, false, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ if (rsp->lso_format_idx != base + 5)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5);
+
+ /*
+ * IPv4/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, true, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 6)
+ return -EFAULT;
+ otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6);