HNS3_DEV_SUPPORT_COPPER_B,
HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
HNS3_DEV_SUPPORT_PTP_B,
+ HNS3_DEV_SUPPORT_TX_PUSH_B,
HNS3_DEV_SUPPORT_INDEP_TXRX_B,
HNS3_DEV_SUPPORT_STASH_B,
HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
#define hns3_dev_ras_imp_supported(hw) \
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RAS_IMP_B)
+#define hns3_dev_tx_push_supported(hw) \
+ hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TX_PUSH_B)
+
#define HNS3_DEV_PRIVATE_TO_HW(adapter) \
(&((struct hns3_adapter *)adapter)->hw)
#define HNS3_DEV_PRIVATE_TO_PF(adapter) \
return 0;
}
+static void *
+hns3_tx_push_get_queue_tail_reg(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+#define HNS3_TX_PUSH_TQP_REGION_SIZE 0x10000
+#define HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET 64
+#define HNS3_TX_PUSH_PCI_BAR_INDEX 4
+
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ uint8_t bar_id = HNS3_TX_PUSH_PCI_BAR_INDEX;
+
+ /*
+ * If device support Tx push then its PCIe bar45 must exist, and DPDK
+ * framework will mmap the bar45 default in PCI probe stage.
+ *
+ * In the bar45, the first half is for RoCE (RDMA over Converged
+ * Ethernet), and the second half is for NIC, every TQP occupy 64KB.
+ *
+ * The quick doorbell located at 64B offset in the TQP region.
+ */
+ return (char *)pci_dev->mem_resource[bar_id].addr +
+ (pci_dev->mem_resource[bar_id].len >> 1) +
+ HNS3_TX_PUSH_TQP_REGION_SIZE * queue_id +
+ HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET;
+}
+
+void
+hns3_tx_push_init(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ volatile uint32_t *reg;
+ uint32_t val;
+
+ if (!hns3_dev_tx_push_supported(hw))
+ return;
+
+ reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
+ /*
+ * Because the size of bar45 is about 8GB size, it may take a long time
+ * to do the page fault in Tx process when work with vfio-pci, so use
+ * one read operation to make kernel setup page table mapping for bar45
+ * in the init stage.
+ * Note: the bar45 is readable but the result is all 1.
+ */
+ val = *reg;
+ RTE_SET_USED(val);
+}
+
+static void
+hns3_tx_push_queue_init(struct rte_eth_dev *dev,
+ uint16_t queue_id,
+ struct hns3_tx_queue *txq)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hns3_dev_tx_push_supported(hw)) {
+ txq->tx_push_enable = false;
+ return;
+ }
+
+ txq->io_tail_reg = (volatile void *)hns3_tx_push_get_queue_tail_reg(dev,
+ queue_id);
+ txq->tx_push_enable = true;
+}
+
int
hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
unsigned int socket_id, const struct rte_eth_txconf *conf)
memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
+ /*
+ * Call hns3_tx_push_queue_init after assigned io_tail_reg field because
+ * it may overwrite the io_tail_reg field.
+ */
+ hns3_tx_push_queue_init(dev, idx, txq);
+
rte_spinlock_lock(&hw->lock);
dev->data->tx_queues[idx] = txq;
rte_spinlock_unlock(&hw->lock);
hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
txq->next_to_use += nb_pkts - nb_tx;
- hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
+ hns3_write_txq_tail_reg(txq, nb_pkts);
return nb_pkts;
}
end_of_tx:
if (likely(nb_tx))
- hns3_write_reg_opt(txq->io_tail_reg, nb_hold);
+ hns3_write_txq_tail_reg(txq, nb_hold);
return nb_tx;
}
};
struct hns3_tx_queue {
+ /* The io_tail_reg is write-only if working in tx push mode */
volatile void *io_tail_reg;
struct hns3_desc *tx_ring;
struct hns3_entry *sw_ring;
return ptype_tbl->l3table[l3id] | ptype_tbl->l4table[l4id];
}
+/*
+ * If enable using Tx push feature and also device support it, then use quick
+ * doorbell (bar45) to inform the hardware.
+ *
+ * The other cases (such as: device don't support or user don't enable using)
+ * then use normal doorbell (bar23) to inform the hardware.
+ */
+static inline void
+hns3_write_txq_tail_reg(struct hns3_tx_queue *txq, uint32_t value)
+{
+ rte_io_wmb();
+ if (txq->tx_push_enable)
+ rte_write64_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);
+ else
+ rte_write32_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);
+}
+
void hns3_dev_rx_queue_release(void *queue);
void hns3_dev_tx_queue_release(void *queue);
void hns3_free_all_queues(struct rte_eth_dev *dev);
void hns3_enable_rxd_adv_layout(struct hns3_hw *hw);
int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
+void hns3_tx_push_init(struct rte_eth_dev *dev);
#endif /* _HNS3_RXTX_H_ */