HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
}
-static void
+void
hns3_enable_all_queues(struct hns3_hw *hw, bool en)
{
uint16_t nb_rx_q = hw->data->nb_rx_queues;
}
void
-hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
+hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+ uint8_t gl_idx, uint16_t gl_value)
+{
+ uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
+ HNS3_TQP_INTR_GL1_REG,
+ HNS3_TQP_INTR_GL2_REG};
+ uint32_t addr, value;
+
+ if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
+ return;
+
+ addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ value = HNS3_GL_USEC_TO_REG(gl_value);
+
+ hns3_write_dev(hw, addr, value);
+}
+
+void
+hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
+{
+ uint32_t addr, value;
+
+ if (rl_value > HNS3_TQP_INTR_RL_MAX)
+ return;
+
+ addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ value = HNS3_RL_USEC_TO_REG(rl_value);
+ if (value > 0)
+ value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
+
+ hns3_write_dev(hw, addr, value);
+}
+
+static void
+hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
{
uint32_t addr, value;
- addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
+ addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
value = en ? 1 : 0;
hns3_write_dev(hw, addr, value);
}
+/*
+ * Enable all rx queue interrupt when in interrupt rx mode.
+ * This api was called before enable queue rx&tx (in normal start or reset
+ * recover scenes), used to fix hardware rx queue interrupt enable was clear
+ * when FLR.
+ */
+void
+hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+ uint16_t nb_rx_q = hw->data->nb_rx_queues;
+ int i;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return;
+
+ for (i = 0; i < nb_rx_q; i++)
+ hns3_queue_intr_enable(hw, i, en);
+}
+
int
hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
if (dev->data->dev_conf.intr_conf.rxq == 0)
return -ENOTSUP;
- /* enable the vectors */
- hns3_tqp_intr_enable(hw, queue_id, true);
+ hns3_queue_intr_enable(hw, queue_id, true);
return rte_intr_ack(intr_handle);
}
if (dev->data->dev_conf.intr_conf.rxq == 0)
return -ENOTSUP;
- /* disable the vectors */
- hns3_tqp_intr_enable(hw, queue_id, false);
+ hns3_queue_intr_enable(hw, queue_id, false);
return 0;
}
hns3_init_tx_ring_tc(hns);
}
+/*
+ * Start all queues.
+ * Note: just init and setup queues, and don't enable queue rx&tx.
+ */
int
hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
{
}
hns3_start_tx_queues(hns);
- hns3_enable_all_queues(hw, true);
return 0;
}
if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
/* first time configuration */
-
uint32_t size;
size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
}
} else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
/* re-configure */
-
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
hns3_dev_rx_queue_release(rxq[i]);
if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
/* first time configuration */
-
uint32_t size;
size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
}
} else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
/* re-configure */
-
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
hns3_dev_tx_queue_release(txq[i]);
static const uint32_t l2table[HNS3_L2TBL_NUM] = {
RTE_PTYPE_L2_ETHER,
- RTE_PTYPE_L2_ETHER_VLAN,
RTE_PTYPE_L2_ETHER_QINQ,
- 0
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_VLAN
};
static const uint32_t l3table[HNS3_L3TBL_NUM] = {
}
}
+static inline void
+hns3_rxd_to_vlan_tci(struct rte_eth_dev *dev, struct rte_mbuf *mb,
+ uint32_t l234_info, const struct hns3_desc *rxd)
+{
+#define HNS3_STRP_STATUS_NUM 0x4
+
+#define HNS3_NO_STRP_VLAN_VLD 0x0
+#define HNS3_INNER_STRP_VLAN_VLD 0x1
+#define HNS3_OUTER_STRP_VLAN_VLD 0x2
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t strip_status;
+ uint32_t report_mode;
+
+ /*
+ * Since HW limitation, the vlan tag will always be inserted into RX
+ * descriptor when strip the tag from packet, driver needs to determine
+ * reporting which tag to mbuf according to the PVID configuration
+ * and vlan striped status.
+ */
+ static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
+ {
+ HNS3_NO_STRP_VLAN_VLD,
+ HNS3_OUTER_STRP_VLAN_VLD,
+ HNS3_INNER_STRP_VLAN_VLD,
+ HNS3_OUTER_STRP_VLAN_VLD
+ },
+ {
+ HNS3_NO_STRP_VLAN_VLD,
+ HNS3_NO_STRP_VLAN_VLD,
+ HNS3_NO_STRP_VLAN_VLD,
+ HNS3_INNER_STRP_VLAN_VLD
+ }
+ };
+ strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S);
+ report_mode = report_type[hw->port_base_vlan_cfg.state][strip_status];
+ switch (report_mode) {
+ case HNS3_NO_STRP_VLAN_VLD:
+ mb->vlan_tci = 0;
+ return;
+ case HNS3_INNER_STRP_VLAN_VLD:
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
+ return;
+ case HNS3_OUTER_STRP_VLAN_VLD:
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
+ return;
+ }
+}
+
uint16_t
hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
first_seg->pkt_len = pkt_len;
first_seg->port = rxq->port_id;
first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
- first_seg->ol_flags |= PKT_RX_RSS_HASH;
+ first_seg->ol_flags = PKT_RX_RSS_HASH;
if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
first_seg->hash.fdir.hi =
rte_le_to_cpu_32(rxd.rx.fd_id);
ol_info);
if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
- hns3_rx_set_cksum_flag(rxm, first_seg->packet_type,
+ hns3_rx_set_cksum_flag(first_seg,
+ first_seg->packet_type,
cksum_err);
+ hns3_rxd_to_vlan_tci(dev, first_seg, l234_info, &rxd);
- first_seg->vlan_tci = rte_le_to_cpu_16(rxd.rx.vlan_tag);
- first_seg->vlan_tci_outer =
- rte_le_to_cpu_16(rxd.rx.ot_vlan_tag);
rx_pkts[nb_rx++] = first_seg;
first_seg = NULL;
continue;
txq->configured = true;
txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
+ txq->over_length_pkt_cnt = 0;
+ txq->exceed_limit_bd_pkt_cnt = 0;
+ txq->exceed_limit_bd_reassem_fail = 0;
+ txq->unsupported_tunnel_pkt_cnt = 0;
+ txq->queue_full_cnt = 0;
+ txq->pkt_padding_fail_cnt = 0;
rte_spinlock_lock(&hw->lock);
dev->data->tx_queues[idx] = txq;
rte_spinlock_unlock(&hw->lock);
return 0;
}
+static inline bool
+hns3_pkt_is_tso(struct rte_mbuf *m)
+{
+ return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
+}
+
static void
hns3_set_tso(struct hns3_desc *desc,
uint64_t ol_flags, struct rte_mbuf *rxm)
uint32_t tmp;
uint8_t l2_len = rxm->l2_len;
- if (!(ol_flags & PKT_TX_TCP_SEG))
+ if (!hns3_pkt_is_tso(rxm))
return;
if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
}
}
-static inline bool
-hns3_pkt_is_tso(struct rte_mbuf *m)
-{
- return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
-}
-
static int
hns3_check_tso_pkt_valid(struct rte_mbuf *m)
{
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
- /* check the size of packet */
- if (m->pkt_len < RTE_ETHER_MIN_LEN) {
- rte_errno = EINVAL;
- return i;
- }
-
if (hns3_pkt_is_tso(m) &&
(hns3_pkt_need_linearized(m, m->nb_segs) ||
hns3_check_tso_pkt_valid(m))) {
if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
(void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
- hdr_lens))
+ hdr_lens)) {
+ txq->unsupported_tunnel_pkt_cnt++;
return -EINVAL;
+ }
}
/* Enable checksum offloading */
if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
* If packet length is greater than HNS3_MAX_FRAME_LEN
* driver support, the packet will be ignored.
*/
- if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
+ if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
+ txq->over_length_pkt_cnt++;
return -EINVAL;
+ }
if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
+ txq->exceed_limit_bd_pkt_cnt++;
ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
- if (ret)
+ if (ret) {
+ txq->exceed_limit_bd_reassem_fail++;
return ret;
+ }
*m_seg = new_pkt;
}
nb_buf = tx_pkt->nb_segs;
if (nb_buf > txq->tx_bd_ready) {
+ txq->queue_full_cnt++;
if (nb_tx == 0)
return 0;
add_len = HNS3_MIN_PKT_SIZE -
rte_pktmbuf_pkt_len(tx_pkt);
appended = rte_pktmbuf_append(tx_pkt, add_len);
- if (appended == NULL)
+ if (appended == NULL) {
+ txq->pkt_padding_fail_cnt++;
break;
+ }
memset(appended, 0, add_len);
}