hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
+ hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE;
hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
return 0;
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
+ hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE;
hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
uint8_t gl_unit;
};
+#define HNS3_TSO_SW_CAL_PSEUDO_H_CSUM 0
+#define HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 1
+
struct hns3_hw {
struct rte_eth_dev_data *data;
void *io_base;
uint32_t min_tx_pkt_len;
struct hns3_queue_intr intr;
-
+ /*
+ * tso mode.
+ * value range:
+ * HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
+ *
+ * - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM
+ * In this mode, because of the hardware constraint, network driver
+ * software need erase the L4 len value of the TCP pseudo header
+ * and recalculate the TCP pseudo header checksum of packets that
+ * need TSO.
+ *
+ * - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
+ * In this mode, hardware support recalculate the TCP pseudo header
+ * checksum of packets that need TSO, so network driver software
+ * not need to recalculate it.
+ */
+ uint8_t tso_mode;
/*
* vlan mode.
* value range:
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
+ hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
return 0;
}
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
+ hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
return 0;
txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
HNS3_RING_TX_TAIL_REG);
txq->min_tx_pkt_len = hw->min_tx_pkt_len;
+ txq->tso_mode = hw->tso_mode;
txq->over_length_pkt_cnt = 0;
txq->exceed_limit_bd_pkt_cnt = 0;
txq->exceed_limit_bd_reassem_fail = 0;
}
#endif
-uint16_t
-hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+static int
+hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
{
- struct hns3_tx_queue *txq;
- struct rte_mbuf *m;
- uint16_t i;
int ret;
- txq = (struct hns3_tx_queue *)tx_queue;
-
- for (i = 0; i < nb_pkts; i++) {
- m = tx_pkts[i];
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return ret;
+ }
- if (hns3_pkt_is_tso(m) &&
- (hns3_pkt_need_linearized(m, m->nb_segs,
- txq->max_non_tso_bd_num) ||
- hns3_check_tso_pkt_valid(m))) {
+ ret = hns3_vld_vlan_chk(tx_queue, m);
+ if (ret != 0) {
+ rte_errno = EINVAL;
+ return ret;
+ }
+#endif
+ if (hns3_pkt_is_tso(m)) {
+ if (hns3_pkt_need_linearized(m, m->nb_segs,
+ tx_queue->max_non_tso_bd_num) ||
+ hns3_check_tso_pkt_valid(m)) {
rte_errno = EINVAL;
- return i;
+ return -EINVAL;
}
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- ret = rte_validate_tx_offload(m);
- if (ret != 0) {
- rte_errno = -ret;
- return i;
+ if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
+ /*
+ * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
+ * hardware support recalculate the TCP pseudo header
+ * checksum of packets that need TSO, so network driver
+ * software not need to recalculate it.
+ */
+ hns3_outer_header_cksum_prepare(m);
+ return 0;
}
+ }
- if (hns3_vld_vlan_chk(txq, m)) {
- rte_errno = EINVAL;
- return i;
- }
-#endif
- ret = rte_net_intel_cksum_prepare(m);
- if (ret != 0) {
- rte_errno = -ret;
- return i;
- }
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return ret;
+ }
+
+ hns3_outer_header_cksum_prepare(m);
+
+ return 0;
+}
- hns3_outer_header_cksum_prepare(m);
+uint16_t
+hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *m;
+ uint16_t i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ if (hns3_prep_pkt_proc(tx_queue, m))
+ return i;
}
return i;
uint16_t tx_rs_thresh;
struct rte_mbuf **free;
+ /*
+ * tso mode.
+ * value range:
+ * HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
+ *
+ * - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM
+ * In this mode, because of the hardware constraint, network driver
+ * software need erase the L4 len value of the TCP pseudo header
+ * and recalculate the TCP pseudo header checksum of packets that
+ * need TSO.
+ *
+ * - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
+ * In this mode, hardware support recalculate the TCP pseudo header
+ * checksum of packets that need TSO, so network driver software
+ * not need to recalculate it.
+ */
+ uint8_t tso_mode;
/*
* The minimum length of the packet supported by hardware in the Tx
* direction.