- /* Process the pseudo-header checksum */
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) {
- if (ol_flags & PKT_TX_IPV4) {
- ipv4_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
- inner_l3_offset);
-
- if (ol_flags & PKT_TX_IP_CKSUM)
- ipv4_hdr->hdr_checksum = 0;
-
- udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
- m->l3_len);
- udp_hdr->dgram_cksum =
- hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
- } else {
- ipv6_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
- inner_l3_offset);
-
- udp_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
- (inner_l3_offset + m->l3_len));
- udp_hdr->dgram_cksum =
- hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
- }
+static inline void
+hinic_calculate_sctp_checksum(struct hinic_tx_offload_info *off_info)
+{
+ off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE;
+ off_info->inner_l4_tcp_udp = 0;
+ off_info->inner_l4_len = sizeof(struct rte_sctp_hdr);
+}
+
+static inline void hinic_calculate_checksum(struct rte_mbuf *mbuf,
+ struct hinic_tx_offload_info *off_info,
+ uint64_t inner_l3_offset)
+{
+ uint64_t ol_flags = mbuf->ol_flags;
+
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ hinic_calculate_udp_checksum(mbuf, off_info, inner_l3_offset);
+ break;
+
+ case PKT_TX_TCP_CKSUM:
+ hinic_calculate_tcp_checksum(mbuf, off_info, inner_l3_offset);
+ break;
+
+ case PKT_TX_SCTP_CKSUM:
+ hinic_calculate_sctp_checksum(off_info);
+ break;
+
+ default:
+ if (ol_flags & PKT_TX_TCP_SEG)
+ hinic_calculate_tcp_checksum(mbuf, off_info,
+ inner_l3_offset);
+ break;
+ }
+}
+
+static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
+ struct hinic_tx_offload_info *off_info)
+{
+ uint64_t inner_l3_offset;
+ uint64_t ol_flags = m->ol_flags;
+
+ /* Check if the packets set available offload flags */
+ if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK))
+ return 0;
+
+ /* Support only vxlan offload */
+ if (unlikely((ol_flags & PKT_TX_TUNNEL_MASK) &&
+ !(ol_flags & PKT_TX_TUNNEL_VXLAN)))
+ return -ENOTSUP;