#include "base/hinic_pmd_hwif.h"
#include "base/hinic_pmd_wq.h"
#include "base/hinic_pmd_nicio.h"
+#include "base/hinic_pmd_niccfg.h"
#include "hinic_pmd_ethdev.h"
#include "hinic_pmd_tx.h"
/* packet header and tx offload info */
+#define ETHER_LEN_NO_VLAN 14
+#define ETHER_LEN_WITH_VLAN 18
+#define HEADER_LEN_OFFSET 2
#define VXLANLEN 8
#define MAX_PLD_OFFSET 221
#define MAX_SINGLE_SGE_SIZE 65536
mbuf = mbuf->next;
}
+ dst_mbuf->pkt_len = dst_mbuf->data_len;
+
return dst_mbuf;
}
i = 0;
for (sge_idx = sges; (u64)sge_idx <= txq->sq_bot_sge_addr;
sge_idx++) {
+ if (unlikely(mbuf == NULL)) {
+ txq->txq_stats.mbuf_null++;
+ return false;
+ }
+
dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
mbuf = mbuf->next;
sge_idx = (struct hinic_sq_bufdesc *)
((void *)txq->sq_head_addr);
for (; i < nb_segs; i++) {
+ if (unlikely(mbuf == NULL)) {
+ txq->txq_stats.mbuf_null++;
+ return false;
+ }
+
dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
mbuf = mbuf->next;
} else {
/* wqe is in continuous space */
for (i = 0; i < nb_segs; i++) {
+ if (unlikely(mbuf == NULL)) {
+ txq->txq_stats.mbuf_null++;
+ return false;
+ }
+
dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
mbuf = mbuf->next;
/* deal with the last mbuf */
dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
if (unlikely(sqe_info->around))
*poff_info,
struct hinic_wqe_info *sqe_info)
{
- u32 total_len, limit_len, checked_len, left_len;
+ u32 total_len, limit_len, checked_len, left_len, adjust_mss;
u32 i, first_mss_sges, left_sges;
struct rte_mbuf *mbuf_head, *mbuf_pre;
/* tso sge number validation */
if (unlikely(left_sges >= HINIC_NONTSO_PKT_MAX_SGE)) {
checked_len = 0;
- limit_len = mbuf->tso_segsz + poff_info->payload_offset;
+ adjust_mss = mbuf->tso_segsz >= TX_MSS_MIN ?
+ mbuf->tso_segsz : TX_MSS_MIN;
+ limit_len = adjust_mss + poff_info->payload_offset;
first_mss_sges = HINIC_NONTSO_PKT_MAX_SGE;
/* each continues 17 mbufs segmust do one check */
mbuf_pre = mbuf;
mbuf = mbuf->next;
if (total_len >= limit_len) {
- limit_len = mbuf_head->tso_segsz;
+ limit_len = adjust_mss;
break;
}
}
hinic_set_l4_csum_info(struct hinic_sq_task *task,
u32 *queue_info, struct hinic_tx_offload_info *poff_info)
{
- u32 tcp_udp_cs, sctp;
+ u32 tcp_udp_cs, sctp = 0;
u16 l2hdr_len;
- sctp = 0;
if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE))
sctp = 1;
tcp_udp_cs = poff_info->inner_l4_tcp_udp;
- if (poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
+ if (poff_info->tunnel_type == TUNNEL_UDP_CSUM ||
+ poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
l2hdr_len = poff_info->outer_l2_len;
task->pkt_info2 |=
static inline struct hinic_sq_wqe *
hinic_get_sq_wqe(struct hinic_txq *txq, int wqebb_cnt,
- struct hinic_wqe_info *wqe_info)
+ struct hinic_wqe_info *wqe_info)
{
u32 cur_pi, end_pi;
u16 remain_wqebbs;
return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi);
}
-static inline int
-hinic_validate_tx_offload(const struct rte_mbuf *m)
-{
- uint64_t ol_flags = m->ol_flags;
- uint64_t inner_l3_offset = m->l2_len;
-
- /* just support vxlan offload */
- if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
- !(ol_flags & PKT_TX_TUNNEL_VXLAN))
- return -ENOTSUP;
-
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
-
- /* Headers are fragmented */
- if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
- return -ENOTSUP;
-
- /* IP checksum can be counted only for IPv4 packet */
- if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
- return -EINVAL;
-
- /* IP type not set when required */
- if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
- if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
- return -EINVAL;
- }
-
- /* Check requirements for TSO packet */
- if (ol_flags & PKT_TX_TCP_SEG) {
- if (m->tso_segsz == 0 ||
- ((ol_flags & PKT_TX_IPV4) &&
- !(ol_flags & PKT_TX_IP_CKSUM)))
- return -EINVAL;
- }
-
- /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
- if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
- !(ol_flags & PKT_TX_OUTER_IPV4))
- return -EINVAL;
-
- return 0;
-}
-
static inline uint16_t
hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
{
return __rte_raw_cksum_reduce(sum);
}
-static inline int
-hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
- struct hinic_tx_offload_info *off_info)
+static inline void hinic_get_outer_cs_pld_offset(struct rte_mbuf *m,
+ struct hinic_tx_offload_info *off_info)
+{
+ uint64_t ol_flags = m->ol_flags;
+
+ if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
+ m->l2_len + m->l3_len;
+ else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG))
+ off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
+ m->l2_len + m->l3_len + m->l4_len;
+}
+
+static inline void hinic_get_pld_offset(struct rte_mbuf *m,
+ struct hinic_tx_offload_info *off_info)
+{
+ uint64_t ol_flags = m->ol_flags;
+
+ if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ off_info->payload_offset = m->l2_len + m->l3_len;
+ else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG))
+ off_info->payload_offset = m->l2_len + m->l3_len +
+ m->l4_len;
+}
+
+static inline void hinic_analyze_tx_info(struct rte_mbuf *mbuf,
+ struct hinic_tx_offload_info *off_info)
+{
+ struct rte_ether_hdr *eth_hdr;
+ struct rte_vlan_hdr *vlan_hdr;
+ struct rte_ipv4_hdr *ip4h;
+ u16 pkt_type;
+ u8 *hdr;
+
+ hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*);
+ eth_hdr = (struct rte_ether_hdr *)hdr;
+ pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+ if (pkt_type == RTE_ETHER_TYPE_VLAN) {
+ off_info->outer_l2_len = ETHER_LEN_WITH_VLAN;
+ vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1);
+ pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+ } else {
+ off_info->outer_l2_len = ETHER_LEN_NO_VLAN;
+ }
+
+ if (pkt_type == RTE_ETHER_TYPE_IPV4) {
+ ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len);
+ off_info->outer_l3_len = (ip4h->version_ihl & 0xf) <<
+ HEADER_LEN_OFFSET;
+ } else if (pkt_type == RTE_ETHER_TYPE_IPV6) {
+ /* not support ipv6 extension header */
+ off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr);
+ }
+}
+
+static inline void hinic_analyze_outer_ip_vxlan(struct rte_mbuf *mbuf,
+ struct hinic_tx_offload_info *off_info)
{
- struct rte_ipv4_hdr *ipv4_hdr;
- struct rte_ipv6_hdr *ipv6_hdr;
- struct rte_tcp_hdr *tcp_hdr;
- struct rte_udp_hdr *udp_hdr;
struct rte_ether_hdr *eth_hdr;
struct rte_vlan_hdr *vlan_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_udp_hdr *udp_hdr;
u16 eth_type = 0;
- uint64_t inner_l3_offset = m->l2_len;
- uint64_t ol_flags = m->ol_flags;
- /* Does packet set any of available offloads */
- if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK))
- return 0;
+ eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
+ eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
- if (unlikely(hinic_validate_tx_offload(m)))
- return -EINVAL;
+ if (eth_type == RTE_ETHER_TYPE_VLAN) {
+ vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
+ eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+ }
- if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
- (ol_flags & PKT_TX_OUTER_IPV6) ||
- (ol_flags & PKT_TX_TUNNEL_VXLAN)) {
- inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
- off_info->outer_l2_len = m->outer_l2_len;
- off_info->outer_l3_len = m->outer_l3_len;
- /* just support vxlan tunneling pkt */
- off_info->inner_l2_len = m->l2_len - VXLANLEN -
- sizeof(struct rte_udp_hdr);
- off_info->inner_l3_len = m->l3_len;
- off_info->inner_l4_len = m->l4_len;
- off_info->tunnel_length = m->l2_len;
- off_info->payload_offset = m->outer_l2_len +
- m->outer_l3_len + m->l2_len + m->l3_len;
- off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
- } else {
- off_info->inner_l2_len = m->l2_len;
- off_info->inner_l3_len = m->l3_len;
- off_info->inner_l4_len = m->l4_len;
- off_info->tunnel_type = NOT_TUNNEL;
- off_info->payload_offset = m->l2_len + m->l3_len;
+ if (eth_type == RTE_ETHER_TYPE_IPV4) {
+ ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+ mbuf->outer_l2_len);
+ off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ ipv4_hdr->hdr_checksum = 0;
+
+ udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
+ mbuf->outer_l3_len);
+ udp_hdr->dgram_cksum = 0;
+ } else if (eth_type == RTE_ETHER_TYPE_IPV6) {
+ off_info->outer_l3_type = IPV6_PKT;
+
+ udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *,
+ (mbuf->outer_l2_len +
+ mbuf->outer_l3_len));
+ udp_hdr->dgram_cksum = 0;
}
+}
- if (((ol_flags & PKT_TX_L4_MASK) != PKT_TX_SCTP_CKSUM) &&
- ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM))
- off_info->payload_offset += m->l4_len;
+static inline uint8_t hinic_analyze_l3_type(struct rte_mbuf *mbuf)
+{
+ uint8_t l3_type;
+ uint64_t ol_flags = mbuf->ol_flags;
- /* invalid udp or tcp header */
- if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET))
- return -EINVAL;
+ if (ol_flags & PKT_TX_IPV4)
+ l3_type = (ol_flags & PKT_TX_IP_CKSUM) ?
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD :
+ IPV4_PKT_NO_CHKSUM_OFFLOAD;
+ else if (ol_flags & PKT_TX_IPV6)
+ l3_type = IPV6_PKT;
+ else
+ l3_type = UNKNOWN_L3TYPE;
- /* Process outter udp pseudo-header checksum */
- if ((ol_flags & PKT_TX_TUNNEL_VXLAN) && ((ol_flags & PKT_TX_TCP_SEG) ||
- (ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
- (ol_flags & PKT_TX_OUTER_IPV6))) {
- off_info->tunnel_type = TUNNEL_UDP_CSUM;
+ return l3_type;
+}
- /* inner_l4_tcp_udp csum should be setted to calculate outter
- * udp checksum when vxlan packets without inner l3 and l4
- */
- off_info->inner_l4_tcp_udp = 1;
+static inline void hinic_calculate_tcp_checksum(struct rte_mbuf *mbuf,
+ struct hinic_tx_offload_info *off_info,
+ uint64_t inner_l3_offset)
+{
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
+ uint64_t ol_flags = mbuf->ol_flags;
- eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+ if (ol_flags & PKT_TX_IPV4) {
+ ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+ inner_l3_offset);
- if (eth_type == RTE_ETHER_TYPE_VLAN) {
- vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
- eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
- }
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ ipv4_hdr->hdr_checksum = 0;
+
+ tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
+ mbuf->l3_len);
+ tcp_hdr->cksum = hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+ } else {
+ ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv6_hdr *,
+ inner_l3_offset);
+ tcp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *,
+ (inner_l3_offset +
+ mbuf->l3_len));
+ tcp_hdr->cksum = hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+ }
+
+ off_info->inner_l4_type = TCP_OFFLOAD_ENABLE;
+ off_info->inner_l4_tcp_udp = 1;
+}
+
+static inline void hinic_calculate_udp_checksum(struct rte_mbuf *mbuf,
+ struct hinic_tx_offload_info *off_info,
+ uint64_t inner_l3_offset)
+{
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
+ struct rte_udp_hdr *udp_hdr;
+ uint64_t ol_flags = mbuf->ol_flags;
+
+ if (ol_flags & PKT_TX_IPV4) {
+ ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+ inner_l3_offset);
- if (eth_type == RTE_ETHER_TYPE_IPV4) {
- ipv4_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
- m->outer_l2_len);
- off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ if (ol_flags & PKT_TX_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
- udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
- m->outer_l3_len);
- udp_hdr->dgram_cksum =
- hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
- } else if (eth_type == RTE_ETHER_TYPE_IPV6) {
- off_info->outer_l3_type = IPV6_PKT;
- ipv6_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
- m->outer_l2_len);
-
- udp_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
- (m->outer_l2_len +
- m->outer_l3_len));
- udp_hdr->dgram_cksum =
- hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
- }
+ udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
+ mbuf->l3_len);
+ udp_hdr->dgram_cksum = hinic_ipv4_phdr_cksum(ipv4_hdr,
+ ol_flags);
+ } else {
+ ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv6_hdr *,
+ inner_l3_offset);
+
+ udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *,
+ (inner_l3_offset +
+ mbuf->l3_len));
+ udp_hdr->dgram_cksum = hinic_ipv6_phdr_cksum(ipv6_hdr,
+ ol_flags);
}
- if (ol_flags & PKT_TX_IPV4)
- off_info->inner_l3_type = (ol_flags & PKT_TX_IP_CKSUM) ?
- IPV4_PKT_WITH_CHKSUM_OFFLOAD :
- IPV4_PKT_NO_CHKSUM_OFFLOAD;
- else if (ol_flags & PKT_TX_IPV6)
- off_info->inner_l3_type = IPV6_PKT;
+ off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
+ off_info->inner_l4_tcp_udp = 1;
+}
- /* Process the pseudo-header checksum */
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) {
- if (ol_flags & PKT_TX_IPV4) {
- ipv4_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
- inner_l3_offset);
-
- if (ol_flags & PKT_TX_IP_CKSUM)
- ipv4_hdr->hdr_checksum = 0;
-
- udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
- m->l3_len);
- udp_hdr->dgram_cksum =
- hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
- } else {
- ipv6_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
- inner_l3_offset);
-
- udp_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
- (inner_l3_offset + m->l3_len));
- udp_hdr->dgram_cksum =
- hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
- }
+static inline void
+hinic_calculate_sctp_checksum(struct hinic_tx_offload_info *off_info)
+{
+ off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE;
+ off_info->inner_l4_tcp_udp = 0;
+ off_info->inner_l4_len = sizeof(struct rte_sctp_hdr);
+}
+
+static inline void hinic_calculate_checksum(struct rte_mbuf *mbuf,
+ struct hinic_tx_offload_info *off_info,
+ uint64_t inner_l3_offset)
+{
+ uint64_t ol_flags = mbuf->ol_flags;
+
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ hinic_calculate_udp_checksum(mbuf, off_info, inner_l3_offset);
+ break;
+
+ case PKT_TX_TCP_CKSUM:
+ hinic_calculate_tcp_checksum(mbuf, off_info, inner_l3_offset);
+ break;
+
+ case PKT_TX_SCTP_CKSUM:
+ hinic_calculate_sctp_checksum(off_info);
+ break;
+
+ default:
+ if (ol_flags & PKT_TX_TCP_SEG)
+ hinic_calculate_tcp_checksum(mbuf, off_info,
+ inner_l3_offset);
+ break;
+ }
+}
+
+static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
+ struct hinic_tx_offload_info *off_info)
+{
+ uint64_t inner_l3_offset;
+ uint64_t ol_flags = m->ol_flags;
+
+ /* Check if the packets set available offload flags */
+ if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK))
+ return 0;
+
+ /* Support only vxlan offload */
+ if (unlikely((ol_flags & PKT_TX_TUNNEL_MASK) &&
+ !(ol_flags & PKT_TX_TUNNEL_VXLAN)))
+ return -ENOTSUP;
- off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (rte_validate_tx_offload(m) != 0)
+ return -EINVAL;
+#endif
+
+ if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
+ off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+ /* inner_l4_tcp_udp csum should be set to calculate outer
+ * udp checksum when vxlan packets without inner l3 and l4
+ */
off_info->inner_l4_tcp_udp = 1;
- off_info->inner_l4_len = sizeof(struct rte_udp_hdr);
- } else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
- (ol_flags & PKT_TX_TCP_SEG)) {
- if (ol_flags & PKT_TX_IPV4) {
- ipv4_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
- inner_l3_offset);
-
- if (ol_flags & PKT_TX_IP_CKSUM)
- ipv4_hdr->hdr_checksum = 0;
-
- /* non-TSO tcp */
- tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
- m->l3_len);
- tcp_hdr->cksum =
- hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+
+ if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
+ (ol_flags & PKT_TX_OUTER_IPV6) ||
+ (ol_flags & PKT_TX_TCP_SEG)) {
+ inner_l3_offset = m->l2_len + m->outer_l2_len +
+ m->outer_l3_len;
+ off_info->outer_l2_len = m->outer_l2_len;
+ off_info->outer_l3_len = m->outer_l3_len;
+ /* just support vxlan tunneling pkt */
+ off_info->inner_l2_len = m->l2_len - VXLANLEN -
+ sizeof(struct rte_udp_hdr);
+ off_info->tunnel_length = m->l2_len;
+
+ hinic_analyze_outer_ip_vxlan(m, off_info);
+
+ hinic_get_outer_cs_pld_offset(m, off_info);
} else {
- ipv6_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
- inner_l3_offset);
- /* non-TSO tcp */
- tcp_hdr =
- rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
- (inner_l3_offset + m->l3_len));
- tcp_hdr->cksum =
- hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+ inner_l3_offset = m->l2_len;
+ hinic_analyze_tx_info(m, off_info);
+ /* just support vxlan tunneling pkt */
+ off_info->inner_l2_len = m->l2_len - VXLANLEN -
+ sizeof(struct rte_udp_hdr) -
+ off_info->outer_l2_len -
+ off_info->outer_l3_len;
+ off_info->tunnel_length = m->l2_len -
+ off_info->outer_l2_len -
+ off_info->outer_l3_len;
+ off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
+
+ hinic_get_pld_offset(m, off_info);
}
+ } else {
+ inner_l3_offset = m->l2_len;
+ off_info->inner_l2_len = m->l2_len;
+ off_info->tunnel_type = NOT_TUNNEL;
- off_info->inner_l4_type = TCP_OFFLOAD_ENABLE;
- off_info->inner_l4_tcp_udp = 1;
- } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) {
- off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE;
- off_info->inner_l4_tcp_udp = 0;
- off_info->inner_l4_len = sizeof(struct rte_sctp_hdr);
+ hinic_get_pld_offset(m, off_info);
}
+ /* invalid udp or tcp header */
+ if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET))
+ return -EINVAL;
+
+ off_info->inner_l3_len = m->l3_len;
+ off_info->inner_l4_len = m->l4_len;
+ off_info->inner_l3_type = hinic_analyze_l3_type(m);
+
+ /* Process the pseudo-header checksum */
+ hinic_calculate_checksum(m, off_info, inner_l3_offset);
+
return 0;
}
return nb_tx;
}
-void hinic_free_all_tx_skbs(struct hinic_txq *txq)
+void hinic_free_all_tx_mbufs(struct hinic_txq *txq)
{
u16 ci;
struct hinic_nic_dev *nic_dev = txq->nic_dev;
HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
for (q_id = 0; q_id < nic_dev->num_sq; q_id++) {
- eth_dev->data->tx_queues[q_id] = NULL;
+ if (eth_dev->data->tx_queues != NULL)
+ eth_dev->data->tx_queues[q_id] = NULL;
if (nic_dev->txqs[q_id] == NULL)
continue;
/* stop tx queue free tx mbuf */
- hinic_free_all_tx_skbs(nic_dev->txqs[q_id]);
+ hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]);
hinic_free_tx_resources(nic_dev->txqs[q_id]);
/* free txq */
for (q_id = 0; q_id < nic_dev->num_sq; q_id++)
/* stop tx queue free tx mbuf */
- hinic_free_all_tx_skbs(nic_dev->txqs[q_id]);
+ hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]);
}
int hinic_setup_tx_resources(struct hinic_txq *txq)
u64 tx_info_sz;
tx_info_sz = txq->q_depth * sizeof(*txq->tx_info);
- txq->tx_info = kzalloc_aligned(tx_info_sz, GFP_KERNEL);
+ txq->tx_info = rte_zmalloc_socket("tx_info", tx_info_sz,
+ RTE_CACHE_LINE_SIZE, txq->socket_id);
if (!txq->tx_info)
return -ENOMEM;
if (txq->tx_info == NULL)
return;
- kfree(txq->tx_info);
+ rte_free(txq->tx_info);
txq->tx_info = NULL;
}
-int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id, u16 sq_depth)
+int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id,
+ u16 sq_depth, unsigned int socket_id)
{
int err;
struct hinic_nic_io *nic_io = hwdev->nic_io;
/* alloc wq */
err = hinic_wq_allocate(nic_io->hwdev, &nic_io->sq_wq[q_id],
- HINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth);
+ HINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth,
+ socket_id);
if (err) {
PMD_DRV_LOG(ERR, "Failed to allocate WQ for SQ");
return err;