+otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct otx2_eth_txq *txq;
+
+ txq = eth_dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->qconf.nb_desc;
+
+ qinfo->conf.tx_thresh.pthresh = 0;
+ qinfo->conf.tx_thresh.hthresh = 0;
+ qinfo->conf.tx_thresh.wthresh = 0;
+
+ qinfo->conf.tx_free_thresh = 0;
+ qinfo->conf.tx_rs_thresh = 0;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = 0;
+}
+
+int
+otx2_rx_burst_mode_get(struct rte_eth_dev *eth_dev,
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ const struct burst_info {
+ uint16_t flags;
+ const char *output;
+ } rx_offload_map[] = {
+ {NIX_RX_OFFLOAD_RSS_F, "RSS,"},
+ {NIX_RX_OFFLOAD_PTYPE_F, " Ptype,"},
+ {NIX_RX_OFFLOAD_CHECKSUM_F, " Checksum,"},
+ {NIX_RX_OFFLOAD_VLAN_STRIP_F, " VLAN Strip,"},
+ {NIX_RX_OFFLOAD_MARK_UPDATE_F, " Mark Update,"},
+ {NIX_RX_OFFLOAD_TSTAMP_F, " Timestamp,"},
+ {NIX_RX_MULTI_SEG_F, " Scattered,"}
+ };
+ static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
+ "Scalar, Rx Offloads:"
+ };
+ uint32_t i;
+
+ /* Update burst mode info */
+ rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
+ str_size - bytes);
+ if (rc < 0)
+ goto done;
+
+ bytes += rc;
+
+ /* Update Rx offload info */
+ for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
+ if (dev->rx_offload_flags & rx_offload_map[i].flags) {
+ rc = rte_strscpy(mode->info + bytes,
+ rx_offload_map[i].output,
+ str_size - bytes);
+ if (rc < 0)
+ goto done;
+
+ bytes += rc;
+ }
+ }
+
+done:
+ return 0;
+}
+
+int
+otx2_tx_burst_mode_get(struct rte_eth_dev *eth_dev,
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ const struct burst_info {
+ uint16_t flags;
+ const char *output;
+ } tx_offload_map[] = {
+ {NIX_TX_OFFLOAD_L3_L4_CSUM_F, " Inner L3/L4 csum,"},
+ {NIX_TX_OFFLOAD_OL3_OL4_CSUM_F, " Outer L3/L4 csum,"},
+ {NIX_TX_OFFLOAD_VLAN_QINQ_F, " VLAN Insertion,"},
+ {NIX_TX_OFFLOAD_MBUF_NOFF_F, " MBUF free disable,"},
+ {NIX_TX_OFFLOAD_TSTAMP_F, " Timestamp,"},
+ {NIX_TX_OFFLOAD_TSO_F, " TSO,"},
+ {NIX_TX_MULTI_SEG_F, " Scattered,"}
+ };
+ static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
+ "Scalar, Tx Offloads:"
+ };
+ uint32_t i;
+
+ /* Update burst mode info */
+ rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
+ str_size - bytes);
+ if (rc < 0)
+ goto done;
+
+ bytes += rc;
+
+ /* Update Tx offload info */
+ for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
+ if (dev->tx_offload_flags & tx_offload_map[i].flags) {
+ rc = rte_strscpy(mode->info + bytes,
+ tx_offload_map[i].output,
+ str_size - bytes);
+ if (rc < 0)
+ goto done;
+
+ bytes += rc;
+ }
+ }
+
+done:
+ return 0;
+}
+
+static void
+nix_rx_head_tail_get(struct otx2_eth_dev *dev,
+ uint32_t *head, uint32_t *tail, uint16_t queue_idx)
+{
+ uint64_t reg, val;
+
+ if (head == NULL || tail == NULL)
+ return;
+
+ reg = (((uint64_t)queue_idx) << 32);
+ val = otx2_atomic64_add_nosync(reg, (int64_t *)
+ (dev->base + NIX_LF_CQ_OP_STATUS));
+ if (val & (OP_ERR | CQ_ERR))
+ val = 0;
+
+ *tail = (uint32_t)(val & 0xFFFFF);
+ *head = (uint32_t)((val >> 20) & 0xFFFFF);
+}
+
+uint32_t
+otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t queue_idx)
+{
+ struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[queue_idx];
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint32_t head, tail;
+
+ nix_rx_head_tail_get(dev, &head, &tail, queue_idx);
+ return (tail - head) % rxq->qlen;
+}
+
+static inline int
+nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset)
+{
+ /* Check given offset(queue index) has packet filled by HW */
+ if (tail > head && offset <= tail && offset >= head)
+ return 1;
+ /* Wrap around case */
+ if (head > tail && (offset >= head || offset <= tail))
+ return 1;
+
+ return 0;
+}
+
+int
+otx2_nix_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ struct otx2_eth_rxq *rxq = rx_queue;
+ uint32_t head, tail;
+
+ nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
+ &head, &tail, rxq->rq);
+
+ return nix_offset_has_packet(head, tail, offset);
+}
+
+int
+otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct otx2_eth_rxq *rxq = rx_queue;
+ uint32_t head, tail;
+
+ if (rxq->qlen <= offset)
+ return -EINVAL;
+
+ nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
+ &head, &tail, rxq->rq);
+
+ if (nix_offset_has_packet(head, tail, offset))
+ return RTE_ETH_RX_DESC_DONE;
+ else
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+static void
+nix_tx_head_tail_get(struct otx2_eth_dev *dev,
+ uint32_t *head, uint32_t *tail, uint16_t queue_idx)
+{
+ uint64_t reg, val;
+
+ if (head == NULL || tail == NULL)
+ return;
+
+ reg = (((uint64_t)queue_idx) << 32);
+ val = otx2_atomic64_add_nosync(reg, (int64_t *)
+ (dev->base + NIX_LF_SQ_OP_STATUS));
+ if (val & OP_ERR)
+ val = 0;
+
+ *tail = (uint32_t)((val >> 28) & 0x3F);
+ *head = (uint32_t)((val >> 20) & 0x3F);
+}
+
+int
+otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct otx2_eth_txq *txq = tx_queue;
+ uint32_t head, tail;
+
+ if (txq->qconf.nb_desc <= offset)
+ return -EINVAL;
+
+ nix_tx_head_tail_get(txq->dev, &head, &tail, txq->sq);
+
+ if (nix_offset_has_packet(head, tail, offset))
+ return RTE_ETH_TX_DESC_DONE;
+ else
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+/* It is a NOP for octeontx2 as HW frees the buffer on xmit */
+int
+otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ RTE_SET_USED(txq);
+ RTE_SET_USED(free_cnt);
+
+ return 0;
+}
+
+int
+otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+ size_t fw_size)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc = (int)fw_size;
+
+ if (fw_size > sizeof(dev->mkex_pfl_name))
+ rc = sizeof(dev->mkex_pfl_name);
+
+ rc = strlcpy(fw_version, (char *)dev->mkex_pfl_name, rc);
+
+ rc += 1; /* Add the size of '\0' */
+ if (fw_size < (size_t)rc)
+ return rc;
+
+ return 0;
+}
+
+int
+otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
+{
+ RTE_SET_USED(eth_dev);
+
+ if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
+ return 0;
+
+ return -ENOTSUP;
+}
+
+int
+otx2_nix_dev_flow_ops_get(struct rte_eth_dev *eth_dev __rte_unused,
+ const struct rte_flow_ops **ops)
+{
+ *ops = &otx2_flow_ops;
+ return 0;
+}
+
+static struct cgx_fw_data *
+nix_get_fwdata(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_fw_data *rsp = NULL;
+ int rc;
+
+ otx2_mbox_alloc_msg_cgx_get_aux_link_info(mbox);
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to get fw data: %d", rc);
+ return NULL;
+ }
+
+ return rsp;
+}
+
+int
+otx2_nix_get_module_info(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct cgx_fw_data *rsp;
+
+ rsp = nix_get_fwdata(dev);
+ if (rsp == NULL)
+ return -EIO;
+
+ modinfo->type = rsp->fwdata.sfp_eeprom.sff_id;
+ modinfo->eeprom_len = SFP_EEPROM_SIZE;
+
+ return 0;
+}
+
+int
+otx2_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct cgx_fw_data *rsp;
+
+ if (info->offset + info->length > SFP_EEPROM_SIZE)
+ return -EINVAL;
+
+ rsp = nix_get_fwdata(dev);
+ if (rsp == NULL)
+ return -EIO;
+
+ otx2_mbox_memcpy(info->data, rsp->fwdata.sfp_eeprom.buf + info->offset,
+ info->length);
+
+ return 0;
+}
+
+int