X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhinic%2Fhinic_pmd_tx.c;h=d9f251a32d5a32f8899a559f00a6ae34037fedde;hb=4fd0181f6c00785ba4e2ed211ad27661d70587ad;hp=0ef7add21fb64248f4c27bc8bf63237f7876dce4;hpb=076221c8fe1da262540d97633957102c7cf12122;p=dpdk.git diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c index 0ef7add21f..d9f251a32d 100644 --- a/drivers/net/hinic/hinic_pmd_tx.c +++ b/drivers/net/hinic/hinic_pmd_tx.c @@ -16,10 +16,14 @@ #include "base/hinic_pmd_hwif.h" #include "base/hinic_pmd_wq.h" #include "base/hinic_pmd_nicio.h" +#include "base/hinic_pmd_niccfg.h" #include "hinic_pmd_ethdev.h" #include "hinic_pmd_tx.h" /* packet header and tx offload info */ +#define ETHER_LEN_NO_VLAN 14 +#define ETHER_LEN_WITH_VLAN 18 +#define HEADER_LEN_OFFSET 2 #define VXLANLEN 8 #define MAX_PLD_OFFSET 221 #define MAX_SINGLE_SGE_SIZE 65536 @@ -306,6 +310,8 @@ static inline struct rte_mbuf *hinic_copy_tx_mbuf(struct hinic_nic_dev *nic_dev, mbuf = mbuf->next; } + dst_mbuf->pkt_len = dst_mbuf->data_len; + return dst_mbuf; } @@ -327,7 +333,16 @@ static inline bool hinic_mbuf_dma_map_sge(struct hinic_txq *txq, i = 0; for (sge_idx = sges; (u64)sge_idx <= txq->sq_bot_sge_addr; sge_idx++) { + if (unlikely(mbuf == NULL)) { + txq->txq_stats.mbuf_null++; + return false; + } + dma_addr = rte_mbuf_data_iova(mbuf); + if (unlikely(mbuf->data_len == 0)) { + txq->txq_stats.sge_len0++; + return false; + } hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, mbuf->data_len); mbuf = mbuf->next; @@ -338,7 +353,16 @@ static inline bool hinic_mbuf_dma_map_sge(struct hinic_txq *txq, sge_idx = (struct hinic_sq_bufdesc *) ((void *)txq->sq_head_addr); for (; i < nb_segs; i++) { + if (unlikely(mbuf == NULL)) { + txq->txq_stats.mbuf_null++; + return false; + } + dma_addr = rte_mbuf_data_iova(mbuf); + if (unlikely(mbuf->data_len == 0)) { + txq->txq_stats.sge_len0++; + return false; + } hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, mbuf->data_len); mbuf = mbuf->next; @@ -350,7 +374,16 @@ static inline bool hinic_mbuf_dma_map_sge(struct hinic_txq *txq, } else { /* wqe is in continuous space */ for (i = 0; i < nb_segs; i++) { + if (unlikely(mbuf == NULL)) { + txq->txq_stats.mbuf_null++; + return false; + } + dma_addr = rte_mbuf_data_iova(mbuf); + if (unlikely(mbuf->data_len == 0)) { + txq->txq_stats.sge_len0++; + return false; + } hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, mbuf->data_len); mbuf = mbuf->next; @@ -371,6 +404,10 @@ static inline bool hinic_mbuf_dma_map_sge(struct hinic_txq *txq, /* deal with the last mbuf */ dma_addr = rte_mbuf_data_iova(mbuf); + if (unlikely(mbuf->data_len == 0)) { + txq->txq_stats.sge_len0++; + return false; + } hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, mbuf->data_len); if (unlikely(sqe_info->around)) @@ -415,7 +452,7 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf, *poff_info, struct hinic_wqe_info *sqe_info) { - u32 total_len, limit_len, checked_len, left_len; + u32 total_len, limit_len, checked_len, left_len, adjust_mss; u32 i, first_mss_sges, left_sges; struct rte_mbuf *mbuf_head, *mbuf_pre; @@ -425,7 +462,9 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf, /* tso sge number validation */ if (unlikely(left_sges >= HINIC_NONTSO_PKT_MAX_SGE)) { checked_len = 0; - limit_len = mbuf->tso_segsz + poff_info->payload_offset; + adjust_mss = mbuf->tso_segsz >= TX_MSS_MIN ? + mbuf->tso_segsz : TX_MSS_MIN; + limit_len = adjust_mss + poff_info->payload_offset; first_mss_sges = HINIC_NONTSO_PKT_MAX_SGE; /* each continues 17 mbufs segmust do one check */ @@ -439,7 +478,7 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf, mbuf_pre = mbuf; mbuf = mbuf->next; if (total_len >= limit_len) { - limit_len = mbuf_head->tso_segsz; + limit_len = adjust_mss; break; } } @@ -476,16 +515,16 @@ static inline void hinic_set_l4_csum_info(struct hinic_sq_task *task, u32 *queue_info, struct hinic_tx_offload_info *poff_info) { - u32 tcp_udp_cs, sctp; + u32 tcp_udp_cs, sctp = 0; u16 l2hdr_len; - sctp = 0; if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE)) sctp = 1; tcp_udp_cs = poff_info->inner_l4_tcp_udp; - if (poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) { + if (poff_info->tunnel_type == TUNNEL_UDP_CSUM || + poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) { l2hdr_len = poff_info->outer_l2_len; task->pkt_info2 |= @@ -629,7 +668,7 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) static inline struct hinic_sq_wqe * hinic_get_sq_wqe(struct hinic_txq *txq, int wqebb_cnt, - struct hinic_wqe_info *wqe_info) + struct hinic_wqe_info *wqe_info) { u32 cur_pi, end_pi; u16 remain_wqebbs; @@ -665,50 +704,6 @@ hinic_get_sq_wqe(struct hinic_txq *txq, int wqebb_cnt, return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi); } -static inline int -hinic_validate_tx_offload(const struct rte_mbuf *m) -{ - uint64_t ol_flags = m->ol_flags; - uint64_t inner_l3_offset = m->l2_len; - - /* just support vxlan offload */ - if ((ol_flags & PKT_TX_TUNNEL_MASK) && - !(ol_flags & PKT_TX_TUNNEL_VXLAN)) - return -ENOTSUP; - - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) - inner_l3_offset += m->outer_l2_len + m->outer_l3_len; - - /* Headers are fragmented */ - if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len) - return -ENOTSUP; - - /* IP checksum can be counted only for IPv4 packet */ - if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6)) - return -EINVAL; - - /* IP type not set when required */ - if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) { - if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6))) - return -EINVAL; - } - - /* Check requirements for TSO packet */ - if (ol_flags & PKT_TX_TCP_SEG) { - if (m->tso_segsz == 0 || - ((ol_flags & PKT_TX_IPV4) && - !(ol_flags & PKT_TX_IP_CKSUM))) - return -EINVAL; - } - - /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */ - if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) && - !(ol_flags & PKT_TX_OUTER_IPV4)) - return -EINVAL; - - return 0; -} - static inline uint16_t hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags) { @@ -760,174 +755,294 @@ hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags) return __rte_raw_cksum_reduce(sum); } -static inline int -hinic_tx_offload_pkt_prepare(struct rte_mbuf *m, - struct hinic_tx_offload_info *off_info) +static inline void hinic_get_outer_cs_pld_offset(struct rte_mbuf *m, + struct hinic_tx_offload_info *off_info) +{ + uint64_t ol_flags = m->ol_flags; + + if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) + off_info->payload_offset = m->outer_l2_len + m->outer_l3_len + + m->l2_len + m->l3_len; + else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG)) + off_info->payload_offset = m->outer_l2_len + m->outer_l3_len + + m->l2_len + m->l3_len + m->l4_len; +} + +static inline void hinic_get_pld_offset(struct rte_mbuf *m, + struct hinic_tx_offload_info *off_info) +{ + uint64_t ol_flags = m->ol_flags; + + if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) + off_info->payload_offset = m->l2_len + m->l3_len; + else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG)) + off_info->payload_offset = m->l2_len + m->l3_len + + m->l4_len; +} + +static inline void hinic_analyze_tx_info(struct rte_mbuf *mbuf, + struct hinic_tx_offload_info *off_info) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_vlan_hdr *vlan_hdr; + struct rte_ipv4_hdr *ip4h; + u16 pkt_type; + u8 *hdr; + + hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*); + eth_hdr = (struct rte_ether_hdr *)hdr; + pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type); + + if (pkt_type == RTE_ETHER_TYPE_VLAN) { + off_info->outer_l2_len = ETHER_LEN_WITH_VLAN; + vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1); + pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); + } else { + off_info->outer_l2_len = ETHER_LEN_NO_VLAN; + } + + if (pkt_type == RTE_ETHER_TYPE_IPV4) { + ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len); + off_info->outer_l3_len = (ip4h->version_ihl & 0xf) << + HEADER_LEN_OFFSET; + } else if (pkt_type == RTE_ETHER_TYPE_IPV6) { + /* not support ipv6 extension header */ + off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr); + } +} + +static inline void hinic_analyze_outer_ip_vxlan(struct rte_mbuf *mbuf, + struct hinic_tx_offload_info *off_info) { - struct rte_ipv4_hdr *ipv4_hdr; - struct rte_ipv6_hdr *ipv6_hdr; - struct rte_tcp_hdr *tcp_hdr; - struct rte_udp_hdr *udp_hdr; struct rte_ether_hdr *eth_hdr; struct rte_vlan_hdr *vlan_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_udp_hdr *udp_hdr; u16 eth_type = 0; - uint64_t inner_l3_offset = m->l2_len; - uint64_t ol_flags = m->ol_flags; - /* Does packet set any of available offloads */ - if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)) - return 0; + eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); + eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); - if (unlikely(hinic_validate_tx_offload(m))) - return -EINVAL; + if (eth_type == RTE_ETHER_TYPE_VLAN) { + vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); + eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); + } - if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || - (ol_flags & PKT_TX_OUTER_IPV6) || - (ol_flags & PKT_TX_TUNNEL_VXLAN)) { - inner_l3_offset += m->outer_l2_len + m->outer_l3_len; - off_info->outer_l2_len = m->outer_l2_len; - off_info->outer_l3_len = m->outer_l3_len; - /* just support vxlan tunneling pkt */ - off_info->inner_l2_len = m->l2_len - VXLANLEN - - sizeof(struct rte_udp_hdr); - off_info->inner_l3_len = m->l3_len; - off_info->inner_l4_len = m->l4_len; - off_info->tunnel_length = m->l2_len; - off_info->payload_offset = m->outer_l2_len + - m->outer_l3_len + m->l2_len + m->l3_len; - off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; - } else { - off_info->inner_l2_len = m->l2_len; - off_info->inner_l3_len = m->l3_len; - off_info->inner_l4_len = m->l4_len; - off_info->tunnel_type = NOT_TUNNEL; - off_info->payload_offset = m->l2_len + m->l3_len; + if (eth_type == RTE_ETHER_TYPE_IPV4) { + ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, + mbuf->outer_l2_len); + off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; + ipv4_hdr->hdr_checksum = 0; + + udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + + mbuf->outer_l3_len); + udp_hdr->dgram_cksum = 0; + } else if (eth_type == RTE_ETHER_TYPE_IPV6) { + off_info->outer_l3_type = IPV6_PKT; + + udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *, + (mbuf->outer_l2_len + + mbuf->outer_l3_len)); + udp_hdr->dgram_cksum = 0; } +} - if (((ol_flags & PKT_TX_L4_MASK) != PKT_TX_SCTP_CKSUM) && - ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)) - off_info->payload_offset += m->l4_len; +static inline uint8_t hinic_analyze_l3_type(struct rte_mbuf *mbuf) +{ + uint8_t l3_type; + uint64_t ol_flags = mbuf->ol_flags; - /* invalid udp or tcp header */ - if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET)) - return -EINVAL; + if (ol_flags & PKT_TX_IPV4) + l3_type = (ol_flags & PKT_TX_IP_CKSUM) ? + IPV4_PKT_WITH_CHKSUM_OFFLOAD : + IPV4_PKT_NO_CHKSUM_OFFLOAD; + else if (ol_flags & PKT_TX_IPV6) + l3_type = IPV6_PKT; + else + l3_type = UNKNOWN_L3TYPE; - /* Process outter udp pseudo-header checksum */ - if ((ol_flags & PKT_TX_TUNNEL_VXLAN) && ((ol_flags & PKT_TX_TCP_SEG) || - (ol_flags & PKT_TX_OUTER_IP_CKSUM) || - (ol_flags & PKT_TX_OUTER_IPV6))) { - off_info->tunnel_type = TUNNEL_UDP_CSUM; + return l3_type; +} - /* inner_l4_tcp_udp csum should be setted to calculate outter - * udp checksum when vxlan packets without inner l3 and l4 - */ - off_info->inner_l4_tcp_udp = 1; +static inline void hinic_calculate_tcp_checksum(struct rte_mbuf *mbuf, + struct hinic_tx_offload_info *off_info, + uint64_t inner_l3_offset) +{ + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_tcp_hdr *tcp_hdr; + uint64_t ol_flags = mbuf->ol_flags; - eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); - eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); + if (ol_flags & PKT_TX_IPV4) { + ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, + inner_l3_offset); - if (eth_type == RTE_ETHER_TYPE_VLAN) { - vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); - eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); - } + if (ol_flags & PKT_TX_IP_CKSUM) + ipv4_hdr->hdr_checksum = 0; + + tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + + mbuf->l3_len); + tcp_hdr->cksum = hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags); + } else { + ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv6_hdr *, + inner_l3_offset); + tcp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, + (inner_l3_offset + + mbuf->l3_len)); + tcp_hdr->cksum = hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags); + } + + off_info->inner_l4_type = TCP_OFFLOAD_ENABLE; + off_info->inner_l4_tcp_udp = 1; +} + +static inline void hinic_calculate_udp_checksum(struct rte_mbuf *mbuf, + struct hinic_tx_offload_info *off_info, + uint64_t inner_l3_offset) +{ + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_udp_hdr *udp_hdr; + uint64_t ol_flags = mbuf->ol_flags; + + if (ol_flags & PKT_TX_IPV4) { + ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, + inner_l3_offset); - if (eth_type == RTE_ETHER_TYPE_IPV4) { - ipv4_hdr = - rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, - m->outer_l2_len); - off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; + if (ol_flags & PKT_TX_IP_CKSUM) ipv4_hdr->hdr_checksum = 0; - udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + - m->outer_l3_len); - udp_hdr->dgram_cksum = - hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags); - } else if (eth_type == RTE_ETHER_TYPE_IPV6) { - off_info->outer_l3_type = IPV6_PKT; - ipv6_hdr = - rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, - m->outer_l2_len); - - udp_hdr = - rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, - (m->outer_l2_len + - m->outer_l3_len)); - udp_hdr->dgram_cksum = - hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags); - } + udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + + mbuf->l3_len); + udp_hdr->dgram_cksum = hinic_ipv4_phdr_cksum(ipv4_hdr, + ol_flags); + } else { + ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv6_hdr *, + inner_l3_offset); + + udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *, + (inner_l3_offset + + mbuf->l3_len)); + udp_hdr->dgram_cksum = hinic_ipv6_phdr_cksum(ipv6_hdr, + ol_flags); } - if (ol_flags & PKT_TX_IPV4) - off_info->inner_l3_type = (ol_flags & PKT_TX_IP_CKSUM) ? - IPV4_PKT_WITH_CHKSUM_OFFLOAD : - IPV4_PKT_NO_CHKSUM_OFFLOAD; - else if (ol_flags & PKT_TX_IPV6) - off_info->inner_l3_type = IPV6_PKT; + off_info->inner_l4_type = UDP_OFFLOAD_ENABLE; + off_info->inner_l4_tcp_udp = 1; +} - /* Process the pseudo-header checksum */ - if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) { - if (ol_flags & PKT_TX_IPV4) { - ipv4_hdr = - rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, - inner_l3_offset); - - if (ol_flags & PKT_TX_IP_CKSUM) - ipv4_hdr->hdr_checksum = 0; - - udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + - m->l3_len); - udp_hdr->dgram_cksum = - hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags); - } else { - ipv6_hdr = - rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, - inner_l3_offset); - - udp_hdr = - rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, - (inner_l3_offset + m->l3_len)); - udp_hdr->dgram_cksum = - hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags); - } +static inline void +hinic_calculate_sctp_checksum(struct hinic_tx_offload_info *off_info) +{ + off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE; + off_info->inner_l4_tcp_udp = 0; + off_info->inner_l4_len = sizeof(struct rte_sctp_hdr); +} + +static inline void hinic_calculate_checksum(struct rte_mbuf *mbuf, + struct hinic_tx_offload_info *off_info, + uint64_t inner_l3_offset) +{ + uint64_t ol_flags = mbuf->ol_flags; + + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + hinic_calculate_udp_checksum(mbuf, off_info, inner_l3_offset); + break; + + case PKT_TX_TCP_CKSUM: + hinic_calculate_tcp_checksum(mbuf, off_info, inner_l3_offset); + break; + + case PKT_TX_SCTP_CKSUM: + hinic_calculate_sctp_checksum(off_info); + break; + + default: + if (ol_flags & PKT_TX_TCP_SEG) + hinic_calculate_tcp_checksum(mbuf, off_info, + inner_l3_offset); + break; + } +} + +static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m, + struct hinic_tx_offload_info *off_info) +{ + uint64_t inner_l3_offset; + uint64_t ol_flags = m->ol_flags; + + /* Check if the packets set available offload flags */ + if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)) + return 0; + + /* Support only vxlan offload */ + if (unlikely((ol_flags & PKT_TX_TUNNEL_MASK) && + !(ol_flags & PKT_TX_TUNNEL_VXLAN))) + return -ENOTSUP; - off_info->inner_l4_type = UDP_OFFLOAD_ENABLE; +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + if (rte_validate_tx_offload(m) != 0) + return -EINVAL; +#endif + + if (ol_flags & PKT_TX_TUNNEL_VXLAN) { + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + + /* inner_l4_tcp_udp csum should be set to calculate outer + * udp checksum when vxlan packets without inner l3 and l4 + */ off_info->inner_l4_tcp_udp = 1; - off_info->inner_l4_len = sizeof(struct rte_udp_hdr); - } else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) || - (ol_flags & PKT_TX_TCP_SEG)) { - if (ol_flags & PKT_TX_IPV4) { - ipv4_hdr = - rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, - inner_l3_offset); - - if (ol_flags & PKT_TX_IP_CKSUM) - ipv4_hdr->hdr_checksum = 0; - - /* non-TSO tcp */ - tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + - m->l3_len); - tcp_hdr->cksum = - hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags); + + if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_IPV6) || + (ol_flags & PKT_TX_TCP_SEG)) { + inner_l3_offset = m->l2_len + m->outer_l2_len + + m->outer_l3_len; + off_info->outer_l2_len = m->outer_l2_len; + off_info->outer_l3_len = m->outer_l3_len; + /* just support vxlan tunneling pkt */ + off_info->inner_l2_len = m->l2_len - VXLANLEN - + sizeof(struct rte_udp_hdr); + off_info->tunnel_length = m->l2_len; + + hinic_analyze_outer_ip_vxlan(m, off_info); + + hinic_get_outer_cs_pld_offset(m, off_info); } else { - ipv6_hdr = - rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, - inner_l3_offset); - /* non-TSO tcp */ - tcp_hdr = - rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, - (inner_l3_offset + m->l3_len)); - tcp_hdr->cksum = - hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags); + inner_l3_offset = m->l2_len; + hinic_analyze_tx_info(m, off_info); + /* just support vxlan tunneling pkt */ + off_info->inner_l2_len = m->l2_len - VXLANLEN - + sizeof(struct rte_udp_hdr) - + off_info->outer_l2_len - + off_info->outer_l3_len; + off_info->tunnel_length = m->l2_len - + off_info->outer_l2_len - + off_info->outer_l3_len; + off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; + + hinic_get_pld_offset(m, off_info); } + } else { + inner_l3_offset = m->l2_len; + off_info->inner_l2_len = m->l2_len; + off_info->tunnel_type = NOT_TUNNEL; - off_info->inner_l4_type = TCP_OFFLOAD_ENABLE; - off_info->inner_l4_tcp_udp = 1; - } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) { - off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE; - off_info->inner_l4_tcp_udp = 0; - off_info->inner_l4_len = sizeof(struct rte_sctp_hdr); + hinic_get_pld_offset(m, off_info); } + /* invalid udp or tcp header */ + if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET)) + return -EINVAL; + + off_info->inner_l3_len = m->l3_len; + off_info->inner_l4_len = m->l4_len; + off_info->inner_l3_type = hinic_analyze_l3_type(m); + + /* Process the pseudo-header checksum */ + hinic_calculate_checksum(m, off_info, inner_l3_offset); + return 0; } @@ -1105,7 +1220,7 @@ u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts) return nb_tx; } -void hinic_free_all_tx_skbs(struct hinic_txq *txq) +void hinic_free_all_tx_mbufs(struct hinic_txq *txq) { u16 ci; struct hinic_nic_dev *nic_dev = txq->nic_dev; @@ -1139,13 +1254,14 @@ void hinic_free_all_tx_resources(struct rte_eth_dev *eth_dev) HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); for (q_id = 0; q_id < nic_dev->num_sq; q_id++) { - eth_dev->data->tx_queues[q_id] = NULL; + if (eth_dev->data->tx_queues != NULL) + eth_dev->data->tx_queues[q_id] = NULL; if (nic_dev->txqs[q_id] == NULL) continue; /* stop tx queue free tx mbuf */ - hinic_free_all_tx_skbs(nic_dev->txqs[q_id]); + hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]); hinic_free_tx_resources(nic_dev->txqs[q_id]); /* free txq */ @@ -1162,7 +1278,7 @@ void hinic_free_all_tx_mbuf(struct rte_eth_dev *eth_dev) for (q_id = 0; q_id < nic_dev->num_sq; q_id++) /* stop tx queue free tx mbuf */ - hinic_free_all_tx_skbs(nic_dev->txqs[q_id]); + hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]); } int hinic_setup_tx_resources(struct hinic_txq *txq) @@ -1170,7 +1286,8 @@ int hinic_setup_tx_resources(struct hinic_txq *txq) u64 tx_info_sz; tx_info_sz = txq->q_depth * sizeof(*txq->tx_info); - txq->tx_info = kzalloc_aligned(tx_info_sz, GFP_KERNEL); + txq->tx_info = rte_zmalloc_socket("tx_info", tx_info_sz, + RTE_CACHE_LINE_SIZE, txq->socket_id); if (!txq->tx_info) return -ENOMEM; @@ -1182,11 +1299,12 @@ void hinic_free_tx_resources(struct hinic_txq *txq) if (txq->tx_info == NULL) return; - kfree(txq->tx_info); + rte_free(txq->tx_info); txq->tx_info = NULL; } -int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id, u16 sq_depth) +int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id, + u16 sq_depth, unsigned int socket_id) { int err; struct hinic_nic_io *nic_io = hwdev->nic_io; @@ -1200,7 +1318,8 @@ int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id, u16 sq_depth) /* alloc wq */ err = hinic_wq_allocate(nic_io->hwdev, &nic_io->sq_wq[q_id], - HINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth); + HINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth, + socket_id); if (err) { PMD_DRV_LOG(ERR, "Failed to allocate WQ for SQ"); return err;