return;
addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
- value = HNS3_GL_USEC_TO_REG(gl_value);
+ if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
+ value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
+ else
+ value = HNS3_GL_USEC_TO_REG(gl_value);
hns3_write_dev(hw, addr, value);
}
hns3_write_dev(hw, addr, value);
}
+void
+hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
+{
+ uint32_t addr;
+
+ if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
+ return;
+
+ addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ hns3_write_dev(hw, addr, ql_value);
+
+ addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ hns3_write_dev(hw, addr, ql_value);
+}
+
static void
hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
{
txq->configured = true;
txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
+ txq->min_tx_pkt_len = hw->min_tx_pkt_len;
txq->over_length_pkt_cnt = 0;
txq->exceed_limit_bd_pkt_cnt = 0;
txq->exceed_limit_bd_reassem_fail = 0;
}
/*
- * If packet length is less than minimum packet size, driver
- * need to pad it.
+ * If packet length is less than minimum packet length supported
+ * by hardware in Tx direction, driver need to pad it to avoid
+ * error.
*/
- if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) {
+ if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
+ txq->min_tx_pkt_len)) {
uint16_t add_len;
char *appended;
- add_len = HNS3_MIN_PKT_SIZE -
+ add_len = txq->min_tx_pkt_len -
rte_pktmbuf_pkt_len(tx_pkt);
appended = rte_pktmbuf_append(tx_pkt, add_len);
if (appended == NULL) {