git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/i40e/base: update FW API version to 1.15
[dpdk.git]
/
drivers
/
net
/
hns3
/
hns3_rxtx.c
diff --git
a/drivers/net/hns3/hns3_rxtx.c
b/drivers/net/hns3/hns3_rxtx.c
index
0f222b3
..
70de0d2
100644
(file)
--- a/
drivers/net/hns3/hns3_rxtx.c
+++ b/
drivers/net/hns3/hns3_rxtx.c
@@
-108,8
+108,8
@@
hns3_tx_queue_release(void *queue)
}
}
}
}
-void
-hns3_
dev_rx_queue_release
(void *queue)
+
static
void
+hns3_
rx_queue_release_lock
(void *queue)
{
struct hns3_rx_queue *rxq = queue;
struct hns3_adapter *hns;
{
struct hns3_rx_queue *rxq = queue;
struct hns3_adapter *hns;
@@
-124,7
+124,13
@@
hns3_dev_rx_queue_release(void *queue)
}
void
}
void
-hns3_dev_tx_queue_release(void *queue)
+hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ hns3_rx_queue_release_lock(dev->data->rx_queues[queue_id]);
+}
+
+static void
+hns3_tx_queue_release_lock(void *queue)
{
struct hns3_tx_queue *txq = queue;
struct hns3_adapter *hns;
{
struct hns3_tx_queue *txq = queue;
struct hns3_adapter *hns;
@@
-138,6
+144,12
@@
hns3_dev_tx_queue_release(void *queue)
rte_spinlock_unlock(&hns->hw.lock);
}
rte_spinlock_unlock(&hns->hw.lock);
}
+void
+hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ hns3_tx_queue_release_lock(dev->data->tx_queues[queue_id]);
+}
+
static void
hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
{
static void
hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
{
@@
-381,7
+393,7
@@
hns3_enable_all_queues(struct hns3_hw *hw, bool en)
int i;
for (i = 0; i < hw->cfg_max_queues; i++) {
int i;
for (i = 0; i < hw->cfg_max_queues; i++) {
- if (hns3_dev_
indep_txrx_supported(hw
)) {
+ if (hns3_dev_
get_support(hw, INDEP_TXRX
)) {
rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
@@
-426,7
+438,7
@@
hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
struct hns3_hw *hw = &txq->hns->hw;
uint32_t reg;
struct hns3_hw *hw = &txq->hns->hw;
uint32_t reg;
- if (hns3_dev_
indep_txrx_supported(hw
)) {
+ if (hns3_dev_
get_support(hw, INDEP_TXRX
)) {
reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
if (en)
reg |= BIT(HNS3_RING_EN_B);
reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
if (en)
reg |= BIT(HNS3_RING_EN_B);
@@
-443,7
+455,7
@@
hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
struct hns3_hw *hw = &rxq->hns->hw;
uint32_t reg;
struct hns3_hw *hw = &rxq->hns->hw;
uint32_t reg;
- if (hns3_dev_
indep_txrx_supported(hw
)) {
+ if (hns3_dev_
get_support(hw, INDEP_TXRX
)) {
reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
if (en)
reg |= BIT(HNS3_RING_EN_B);
reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
if (en)
reg |= BIT(HNS3_RING_EN_B);
@@
-697,7
+709,7
@@
hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status)
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
req = (struct hns3_reset_cmd *)desc.data;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
req = (struct hns3_reset_cmd *)desc.data;
- hns3_set_bit(req->
mac_func_reset
, HNS3_CFG_RESET_RCB_B, 1);
+ hns3_set_bit(req->
fun_reset_rcb
, HNS3_CFG_RESET_RCB_B, 1);
/*
* The start qid should be the global qid of the first tqp of the
/*
* The start qid should be the global qid of the first tqp of the
@@
-1536,7
+1548,7
@@
hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
/* re-configure */
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
/* re-configure */
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_
dev_rx_queue_release
(rxq[i]);
+ hns3_
rx_queue_release_lock
(rxq[i]);
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
@@
-1551,7
+1563,7
@@
hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_
dev_rx_queue_release
(rxq[i]);
+ hns3_
rx_queue_release_lock
(rxq[i]);
rte_free(hw->fkq_data.rx_queues);
hw->fkq_data.rx_queues = NULL;
rte_free(hw->fkq_data.rx_queues);
hw->fkq_data.rx_queues = NULL;
@@
-1583,7
+1595,7
@@
hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
/* re-configure */
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
/* re-configure */
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_
dev_tx_queue_release
(txq[i]);
+ hns3_
tx_queue_release_lock
(txq[i]);
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
@@
-1597,7
+1609,7
@@
hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_
dev_tx_queue_release
(txq[i]);
+ hns3_
tx_queue_release_lock
(txq[i]);
rte_free(hw->fkq_data.tx_queues);
hw->fkq_data.tx_queues = NULL;
rte_free(hw->fkq_data.tx_queues);
hw->fkq_data.tx_queues = NULL;
@@
-1618,7
+1630,7
@@
hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
uint16_t q;
int ret;
uint16_t q;
int ret;
- if (hns3_dev_
indep_txrx_supported(hw
))
+ if (hns3_dev_
get_support(hw, INDEP_TXRX
))
return 0;
/* Setup new number of fake RX/TX queues and reconfigure device. */
return 0;
/* Setup new number of fake RX/TX queues and reconfigure device. */
@@
-1862,7
+1874,7
@@
hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
rxq->rx_deferred_start = conf->rx_deferred_start;
conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
rxq->rx_deferred_start = conf->rx_deferred_start;
- if (rxq->rx_deferred_start && !hns3_dev_
indep_txrx_supported(hw
)) {
+ if (rxq->rx_deferred_start && !hns3_dev_
get_support(hw, INDEP_TXRX
)) {
hns3_warn(hw, "deferred start is not supported.");
rxq->rx_deferred_start = false;
}
hns3_warn(hw, "deferred start is not supported.");
rxq->rx_deferred_start = false;
}
@@
-1898,7
+1910,7
@@
hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
HNS3_PORT_BASE_VLAN_ENABLE;
else
rxq->pvid_sw_discard_en = false;
HNS3_PORT_BASE_VLAN_ENABLE;
else
rxq->pvid_sw_discard_en = false;
- rxq->ptype_en = hns3_dev_
rxd_adv_layout_supported(hw
) ? true : false;
+ rxq->ptype_en = hns3_dev_
get_support(hw, RXD_ADV_LAYOUT
) ? true : false;
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
@@
-2026,7
+2038,7
@@
hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
dev->rx_pkt_burst == hns3_recv_pkts_vec ||
dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
dev->rx_pkt_burst == hns3_recv_pkts_vec ||
dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
- if (hns3_dev_
rxd_adv_layout_supported(hw
))
+ if (hns3_dev_
get_support(hw, RXD_ADV_LAYOUT
))
return adv_layout_ptypes;
else
return ptypes;
return adv_layout_ptypes;
else
return ptypes;
@@
-2928,7
+2940,7
@@
hns3_tx_push_init(struct rte_eth_dev *dev)
volatile uint32_t *reg;
uint32_t val;
volatile uint32_t *reg;
uint32_t val;
- if (!hns3_dev_
tx_push_supported(hw
))
+ if (!hns3_dev_
get_support(hw, TX_PUSH
))
return;
reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
return;
reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
@@
-2949,7
+2961,7
@@
hns3_tx_push_queue_init(struct rte_eth_dev *dev,
struct hns3_tx_queue *txq)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_tx_queue *txq)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!hns3_dev_
tx_push_supported(hw
)) {
+ if (!hns3_dev_
get_support(hw, TX_PUSH
)) {
txq->tx_push_enable = false;
return;
}
txq->tx_push_enable = false;
return;
}
@@
-2994,7
+3006,7
@@
hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
}
txq->tx_deferred_start = conf->tx_deferred_start;
}
txq->tx_deferred_start = conf->tx_deferred_start;
- if (txq->tx_deferred_start && !hns3_dev_
indep_txrx_supported(hw
)) {
+ if (txq->tx_deferred_start && !hns3_dev_
get_support(hw, INDEP_TXRX
)) {
hns3_warn(hw, "deferred start is not supported.");
txq->tx_deferred_start = false;
}
hns3_warn(hw, "deferred start is not supported.");
txq->tx_deferred_start = false;
}
@@
-4276,7
+4288,7
@@
hns3_tx_check_simple_support(struct rte_eth_dev *dev)
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (hns3_dev_
ptp_supported(hw
))
+ if (hns3_dev_
get_support(hw, PTP
))
return false;
return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
return false;
return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
@@
-4437,7
+4449,7
@@
hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
int ret;
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
int ret;
- if (!hns3_dev_
indep_txrx_supported(hw
))
+ if (!hns3_dev_
get_support(hw, INDEP_TXRX
))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
@@
-4483,7
+4495,7
@@
hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
- if (!hns3_dev_
indep_txrx_supported(hw
))
+ if (!hns3_dev_
get_support(hw, INDEP_TXRX
))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
@@
-4505,7
+4517,7
@@
hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
int ret;
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
int ret;
- if (!hns3_dev_
indep_txrx_supported(hw
))
+ if (!hns3_dev_
get_support(hw, INDEP_TXRX
))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
@@
-4531,7
+4543,7
@@
hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
- if (!hns3_dev_
indep_txrx_supported(hw
))
+ if (!hns3_dev_
get_support(hw, INDEP_TXRX
))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
@@
-4704,7
+4716,7
@@
hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
* If the hardware support rxd advanced layout, then driver enable it
* default.
*/
* If the hardware support rxd advanced layout, then driver enable it
* default.
*/
- if (hns3_dev_
rxd_adv_layout_supported(hw
))
+ if (hns3_dev_
get_support(hw, RXD_ADV_LAYOUT
))
hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
}
hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
}