static int
hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
{
- if (result != 0 && hns3_dev_copper_supported(hw)) {
+ if (result != 0 && hns3_dev_get_support(hw, COPPER)) {
hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
result);
return result;
}
if (revision == PCI_REVISION_ID_HIP09_A) {
struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
- if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) {
+ if (hns3_dev_get_support(hw, COPPER) == 0 || pf->is_tmp_phy) {
PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
pf->is_tmp_phy = true;
hns3_set_bit(hw->capability,
if (is_init) {
hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
- if (hns3_dev_copper_supported(hw))
+ if (hns3_dev_get_support(hw, COPPER))
hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
}
req->compat = rte_cpu_to_le_32(compat);
if (ret)
return ret;
- if (!hns3_dev_dcb_supported(hw))
+ if (!hns3_dev_get_support(hw, DCB))
return 0;
ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
}
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
- if (!hns3_dev_dcb_supported(hw))
+ if (!hns3_dev_get_support(hw, DCB))
return 0;
ret = hns3_pfc_setup_hw(hw);
struct hns3_hw *hw = &hns->hw;
int ret;
- if (!hns3_dev_dcb_supported(hw)) {
+ if (!hns3_dev_get_support(hw, DCB)) {
hns3_err(hw, "this port does not support dcb configurations.");
return -EOPNOTSUPP;
}
DEV_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
- if (hns3_dev_outer_udp_cksum_supported(hw))
+ if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
- if (hns3_dev_indep_txrx_supported(hw))
+ if (hns3_dev_get_support(hw, INDEP_TXRX))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
- if (hns3_dev_ptp_supported(hw))
+ if (hns3_dev_get_support(hw, PTP))
info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
info->rx_desc_lim = (struct rte_eth_desc_lim) {
switch (media_type) {
case HNS3_MEDIA_TYPE_COPPER:
- if (!hns3_dev_copper_supported(hw)) {
+ if (!hns3_dev_get_support(hw, COPPER)) {
PMD_INIT_LOG(ERR,
"Media type is copper, not supported.");
ret = -EOPNOTSUPP;
}
/* Dev does not support DCB */
- if (!hns3_dev_dcb_supported(hw)) {
+ if (!hns3_dev_get_support(hw, DCB)) {
pf->tc_max = 1;
pf->pfc_max = 0;
} else
tc_num = hns3_get_tc_num(hw);
aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
- if (hns3_dev_dcb_supported(hw))
+ if (hns3_dev_get_support(hw, DCB))
shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
pf->dv_buf_size;
else
shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
buf_alloc->s_buf.buf_size = shared_buf;
- if (hns3_dev_dcb_supported(hw)) {
+ if (hns3_dev_get_support(hw, DCB)) {
buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
- roundup(aligned_mps / HNS3_BUF_DIV_BY,
buf_alloc->s_buf.self.low = aligned_mps;
}
- if (hns3_dev_dcb_supported(hw)) {
+ if (hns3_dev_get_support(hw, DCB)) {
hi_thrd = shared_buf - pf->dv_buf_size;
if (tc_num <= NEED_RESERVE_TC_NUM)
hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
{
/* When DCB is not supported, rx private buffer is not allocated. */
- if (!hns3_dev_dcb_supported(hw)) {
+ if (!hns3_dev_get_support(hw, DCB)) {
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
struct hns3_pf *pf = &hns->pf;
uint32_t rx_all = pf->pkt_buf_size;
return ret;
}
- if (hns3_dev_dcb_supported(hw)) {
+ if (hns3_dev_get_support(hw, DCB)) {
ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
if (ret) {
PMD_INIT_LOG(ERR,
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret;
- if (!hns3_dev_dcb_supported(hw)) {
+ if (!hns3_dev_get_support(hw, DCB)) {
hns3_err(hw, "This port does not support dcb configurations.");
return -EOPNOTSUPP;
}
HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B,
};
-#define hns3_dev_dcb_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_DCB_B)
-
-/* Support copper media type */
-#define hns3_dev_copper_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_COPPER_B)
-
-/* Support the queue region action rule of flow directory */
-#define hns3_dev_fd_queue_region_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B)
-
-/* Support PTP timestamp offload */
-#define hns3_dev_ptp_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_PTP_B)
-
-/* Support to Independently enable/disable/reset Tx or Rx queues */
-#define hns3_dev_indep_txrx_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B)
-
-#define hns3_dev_stash_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_STASH_B)
-
-#define hns3_dev_rxd_adv_layout_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B)
-
-#define hns3_dev_outer_udp_cksum_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B)
-
-#define hns3_dev_ras_imp_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RAS_IMP_B)
-
-#define hns3_dev_tx_push_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TX_PUSH_B)
-
-#define hns3_dev_tm_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TM_B)
-
-#define hns3_dev_vf_vlan_flt_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B)
+#define hns3_dev_get_support(hw, _name) \
+ hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_##_name##_B)
#define HNS3_DEV_PRIVATE_TO_HW(adapter) \
(&((struct hns3_adapter *)adapter)->hw)
DEV_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
- if (hns3_dev_outer_udp_cksum_supported(hw))
+ if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
- if (hns3_dev_indep_txrx_supported(hw))
+ if (hns3_dev_get_support(hw, INDEP_TXRX))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
uint8_t msg_data;
int ret;
- if (!hns3_dev_vf_vlan_flt_supported(hw))
+ if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD))
return 0;
msg_data = enable ? 1 : 0;
struct hns3_hw *hw = &hns->hw;
uint16_t idx;
- if (!hns3_dev_fd_queue_region_supported(hw))
+ if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"Not support config queue region!");
{
struct hns3_hw *hw = &hns->hw;
- if (hns3_dev_ras_imp_supported(hw)) {
+ if (hns3_dev_get_support(hw, RAS_IMP)) {
hns3_handle_hw_error_v2(hw);
hns3_schedule_reset(hns);
} else {
{
int ret;
- if (!hns3_dev_ptp_supported(hw))
+ if (!hns3_dev_get_support(hw, PTP))
return 0;
ret = hns3_ptp_int_en(hw, true);
struct hns3_pf *pf = &hns->pf;
int ret;
- if (!hns3_dev_ptp_supported(hw))
+ if (!hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
if (pf->ptp_enable)
struct hns3_pf *pf = &hns->pf;
int ret;
- if (!hns3_dev_ptp_supported(hw))
+ if (!hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
if (!pf->ptp_enable)
struct hns3_pf *pf = &hns->pf;
uint64_t ns, sec;
- if (!hns3_dev_ptp_supported(hw))
+ if (!hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
ns = pf->rx_timestamp & TIME_RX_STAMP_NS_MASK;
uint64_t ns;
int ts_cnt;
- if (!hns3_dev_ptp_supported(hw))
+ if (!hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
ts_cnt = hns3_read_dev(hw, HNS3_TX_1588_BACK_TSP_CNT) &
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint64_t ns, sec;
- if (!hns3_dev_ptp_supported(hw))
+ if (!hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
sec = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_L);
uint64_t sec = ts->tv_sec;
uint64_t ns = ts->tv_nsec;
- if (!hns3_dev_ptp_supported(hw))
+ if (!hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
/* Set the timecounters to a new value. */
struct timespec cur_time;
uint64_t ns;
- if (!hns3_dev_ptp_supported(hw))
+ if (!hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
(void)hns3_timesync_read_time(dev, &cur_time);
bool en = pf->ptp_enable;
int ret;
- if (!hns3_dev_ptp_supported(hw))
+ if (!hns3_dev_get_support(hw, PTP))
return 0;
ret = hns3_timesync_configure(hns, en);
int i;
for (i = 0; i < hw->cfg_max_queues; i++) {
- if (hns3_dev_indep_txrx_supported(hw)) {
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
struct hns3_hw *hw = &txq->hns->hw;
uint32_t reg;
- if (hns3_dev_indep_txrx_supported(hw)) {
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
if (en)
reg |= BIT(HNS3_RING_EN_B);
struct hns3_hw *hw = &rxq->hns->hw;
uint32_t reg;
- if (hns3_dev_indep_txrx_supported(hw)) {
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
if (en)
reg |= BIT(HNS3_RING_EN_B);
uint16_t q;
int ret;
- if (hns3_dev_indep_txrx_supported(hw))
+ if (hns3_dev_get_support(hw, INDEP_TXRX))
return 0;
/* Setup new number of fake RX/TX queues and reconfigure device. */
conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
rxq->rx_deferred_start = conf->rx_deferred_start;
- if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+ if (rxq->rx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
hns3_warn(hw, "deferred start is not supported.");
rxq->rx_deferred_start = false;
}
HNS3_PORT_BASE_VLAN_ENABLE;
else
rxq->pvid_sw_discard_en = false;
- rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false;
+ rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false;
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
dev->rx_pkt_burst == hns3_recv_pkts_vec ||
dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
- if (hns3_dev_rxd_adv_layout_supported(hw))
+ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
return adv_layout_ptypes;
else
return ptypes;
volatile uint32_t *reg;
uint32_t val;
- if (!hns3_dev_tx_push_supported(hw))
+ if (!hns3_dev_get_support(hw, TX_PUSH))
return;
reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
struct hns3_tx_queue *txq)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!hns3_dev_tx_push_supported(hw)) {
+ if (!hns3_dev_get_support(hw, TX_PUSH)) {
txq->tx_push_enable = false;
return;
}
}
txq->tx_deferred_start = conf->tx_deferred_start;
- if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+ if (txq->tx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
hns3_warn(hw, "deferred start is not supported.");
txq->tx_deferred_start = false;
}
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (hns3_dev_ptp_supported(hw))
+ if (hns3_dev_get_support(hw, PTP))
return false;
return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
int ret;
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
int ret;
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
* If the hardware support rxd advanced layout, then driver enable it
* default.
*/
- if (hns3_dev_rxd_adv_layout_supported(hw))
+ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
}
struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (hns3_dev_ptp_supported(hw))
+ if (hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
/* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
DEV_RX_OFFLOAD_VLAN;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (hns3_dev_ptp_supported(hw))
+ if (hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
if (dev->data->scattered_rx)
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
- if (!hns3_dev_tm_supported(hw))
+ if (!hns3_dev_get_support(hw, TM))
return;
pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
struct hns3_tm_shaper_profile *shaper_profile;
struct hns3_tm_node *tm_node;
- if (!hns3_dev_tm_supported(hw))
+ if (!hns3_dev_get_support(hw, TM))
return;
if (pf->tm_conf.nb_queue_node > 0) {
if (arg == NULL)
return -EINVAL;
- if (!hns3_dev_tm_supported(hw))
+ if (!hns3_dev_get_support(hw, TM))
return -EOPNOTSUPP;
*(const void **)arg = &hns3_tm_ops;
{
struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
- if (!hns3_dev_tm_supported(hw))
+ if (!hns3_dev_get_support(hw, TM))
return;
if (pf->tm_conf.root && !pf->tm_conf.committed)
struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
struct rte_tm_error error;
- if (!hns3_dev_tm_supported(hw))
+ if (!hns3_dev_get_support(hw, TM))
return 0;
if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)