}
}
-void
-hns3_dev_rx_queue_release(void *queue)
+static void
+hns3_rx_queue_release_lock(void *queue)
{
struct hns3_rx_queue *rxq = queue;
struct hns3_adapter *hns;
}
void
-hns3_dev_tx_queue_release(void *queue)
+hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ hns3_rx_queue_release_lock(dev->data->rx_queues[queue_id]);
+}
+
+static void
+hns3_tx_queue_release_lock(void *queue)
{
struct hns3_tx_queue *txq = queue;
struct hns3_adapter *hns;
rte_spinlock_unlock(&hns->hw.lock);
}
+void
+hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ hns3_tx_queue_release_lock(dev->data->tx_queues[queue_id]);
+}
+
static void
hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
{
int i;
for (i = 0; i < hw->cfg_max_queues; i++) {
- if (hns3_dev_indep_txrx_supported(hw)) {
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
struct hns3_hw *hw = &txq->hns->hw;
uint32_t reg;
- if (hns3_dev_indep_txrx_supported(hw)) {
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
if (en)
reg |= BIT(HNS3_RING_EN_B);
struct hns3_hw *hw = &rxq->hns->hw;
uint32_t reg;
- if (hns3_dev_indep_txrx_supported(hw)) {
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
if (en)
reg |= BIT(HNS3_RING_EN_B);
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
req = (struct hns3_reset_cmd *)desc.data;
- hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_RCB_B, 1);
+ hns3_set_bit(req->fun_reset_rcb, HNS3_CFG_RESET_RCB_B, 1);
/*
* The start qid should be the global qid of the first tqp of the
/* re-configure */
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_rx_queue_release(rxq[i]);
+ hns3_rx_queue_release_lock(rxq[i]);
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_rx_queue_release(rxq[i]);
+ hns3_rx_queue_release_lock(rxq[i]);
rte_free(hw->fkq_data.rx_queues);
hw->fkq_data.rx_queues = NULL;
/* re-configure */
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_tx_queue_release(txq[i]);
+ hns3_tx_queue_release_lock(txq[i]);
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_tx_queue_release(txq[i]);
+ hns3_tx_queue_release_lock(txq[i]);
rte_free(hw->fkq_data.tx_queues);
hw->fkq_data.tx_queues = NULL;
uint16_t q;
int ret;
- if (hns3_dev_indep_txrx_supported(hw))
+ if (hns3_dev_get_support(hw, INDEP_TXRX))
return 0;
/* Setup new number of fake RX/TX queues and reconfigure device. */
conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
rxq->rx_deferred_start = conf->rx_deferred_start;
- if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+ if (rxq->rx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
hns3_warn(hw, "deferred start is not supported.");
rxq->rx_deferred_start = false;
}
HNS3_PORT_BASE_VLAN_ENABLE;
else
rxq->pvid_sw_discard_en = false;
- rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false;
+ rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false;
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
dev->rx_pkt_burst == hns3_recv_pkts_vec ||
dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
- if (hns3_dev_rxd_adv_layout_supported(hw))
+ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
return adv_layout_ptypes;
else
return ptypes;
volatile uint32_t *reg;
uint32_t val;
- if (!hns3_dev_tx_push_supported(hw))
+ if (!hns3_dev_get_support(hw, TX_PUSH))
return;
reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
struct hns3_tx_queue *txq)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!hns3_dev_tx_push_supported(hw)) {
+ if (!hns3_dev_get_support(hw, TX_PUSH)) {
txq->tx_push_enable = false;
return;
}
}
txq->tx_deferred_start = conf->tx_deferred_start;
- if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+ if (txq->tx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
hns3_warn(hw, "deferred start is not supported.");
txq->tx_deferred_start = false;
}
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (hns3_dev_ptp_supported(hw))
+ if (hns3_dev_get_support(hw, PTP))
return false;
return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
int ret;
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
int ret;
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
* If the hardware support rxd advanced layout, then driver enable it
* default.
*/
- if (hns3_dev_rxd_adv_layout_supported(hw))
+ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
}