#include <rte_sctp.h>
#include <rte_udp.h>
#include <rte_ip.h>
-#ifdef __ARM64_NEON__
+#ifdef RTE_ARCH_ARM64
#include <arm_neon.h>
#endif
/* packet header and tx offload info */
#define ETHER_LEN_NO_VLAN 14
#define ETHER_LEN_WITH_VLAN 18
-#define HEADER_LEN_OFFSET 2
#define VXLANLEN 8
#define MAX_PLD_OFFSET 221
#define MAX_SINGLE_SGE_SIZE 65536
static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb)
{
-#if defined(__X86_64_SSE__)
+#if defined(RTE_ARCH_X86_64)
int i;
__m128i *wqe_line = (__m128i *)data;
__m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10,
wqe_line[3] = _mm_shuffle_epi8(wqe_line[3], shuf_mask);
wqe_line += 4;
}
-#elif defined(__ARM64_NEON__)
+#elif defined(RTE_ARCH_ARM64)
int i;
uint8x16_t *wqe_line = (uint8x16_t *)data;
const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10,
static inline void hinic_sge_cpu_to_be32(void *data, int nr_sge)
{
-#if defined(__X86_64_SSE__)
+#if defined(RTE_ARCH_X86_64)
int i;
__m128i *sge_line = (__m128i *)data;
__m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10,
*sge_line = _mm_shuffle_epi8(*sge_line, shuf_mask);
sge_line++;
}
-#elif defined(__ARM64_NEON__)
+#elif defined(RTE_ARCH_ARM64)
int i;
uint8x16_t *sge_line = (uint8x16_t *)data;
const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10,
task->pkt_info2 = 0;
/* Base VLAN */
- if (unlikely(ol_flags & PKT_TX_VLAN_PKT)) {
+ if (unlikely(ol_flags & RTE_MBUF_F_TX_VLAN_PKT)) {
vlan_tag = mbuf->vlan_tci;
hinic_set_vlan_tx_offload(task, queue_info, vlan_tag,
vlan_tag >> VLAN_PRIO_SHIFT);
if (unlikely(!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)))
return;
- if ((ol_flags & PKT_TX_TCP_SEG))
+ if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG))
/* set tso info for task and qsf */
hinic_set_tso_info(task, queue_info, mbuf, tx_off_info);
else /* just support l4 checksum offload */
uint8_t proto; /* L4 protocol type. */
uint16_t len; /* L4 length. */
} psd_hdr;
- uint8_t ihl;
psd_hdr.src_addr = ipv4_hdr->src_addr;
psd_hdr.dst_addr = ipv4_hdr->dst_addr;
psd_hdr.zero = 0;
psd_hdr.proto = ipv4_hdr->next_proto_id;
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
psd_hdr.len = 0;
} else {
- /* ipv4_hdr->version_ihl is uint8_t big endian, ihl locates
- * lower 4 bits and unit is 4 bytes
- */
- ihl = (ipv4_hdr->version_ihl & 0xF) << 2;
psd_hdr.len =
rte_cpu_to_be_16(rte_be_to_cpu_16(ipv4_hdr->total_length) -
- ihl);
+ rte_ipv4_hdr_len(ipv4_hdr));
}
return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr));
}
} psd_hdr;
psd_hdr.proto = (ipv6_hdr->proto << 24);
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
psd_hdr.len = 0;
else
psd_hdr.len = ipv6_hdr->payload_len;
{
uint64_t ol_flags = m->ol_flags;
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)
off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
m->l2_len + m->l3_len;
- else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG))
+ else if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) || (ol_flags & RTE_MBUF_F_TX_TCP_SEG))
off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
m->l2_len + m->l3_len + m->l4_len;
}
{
uint64_t ol_flags = m->ol_flags;
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ if (((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) ||
+ ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_SCTP_CKSUM))
off_info->payload_offset = m->l2_len + m->l3_len;
- else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG))
+ else if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) || (ol_flags & RTE_MBUF_F_TX_TCP_SEG))
off_info->payload_offset = m->l2_len + m->l3_len +
m->l4_len;
}
{
struct rte_ether_hdr *eth_hdr;
struct rte_vlan_hdr *vlan_hdr;
- struct rte_ipv4_hdr *ip4h;
- u16 pkt_type;
- u8 *hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ u16 eth_type;
- hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*);
- eth_hdr = (struct rte_ether_hdr *)hdr;
- pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+ eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
+ eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
- if (pkt_type == RTE_ETHER_TYPE_VLAN) {
+ if (eth_type == RTE_ETHER_TYPE_VLAN) {
off_info->outer_l2_len = ETHER_LEN_WITH_VLAN;
- vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1);
- pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+ vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
+ eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
} else {
off_info->outer_l2_len = ETHER_LEN_NO_VLAN;
}
- if (pkt_type == RTE_ETHER_TYPE_IPV4) {
- ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len);
- off_info->outer_l3_len = (ip4h->version_ihl & 0xf) <<
- HEADER_LEN_OFFSET;
- } else if (pkt_type == RTE_ETHER_TYPE_IPV6) {
+ if (eth_type == RTE_ETHER_TYPE_IPV4) {
+ ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+ off_info->outer_l2_len);
+ off_info->outer_l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+ } else if (eth_type == RTE_ETHER_TYPE_IPV6) {
/* not support ipv6 extension header */
off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr);
}
uint8_t l3_type;
uint64_t ol_flags = mbuf->ol_flags;
- if (ol_flags & PKT_TX_IPV4)
- l3_type = (ol_flags & PKT_TX_IP_CKSUM) ?
+ if (ol_flags & RTE_MBUF_F_TX_IPV4)
+ l3_type = (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ?
IPV4_PKT_WITH_CHKSUM_OFFLOAD :
IPV4_PKT_NO_CHKSUM_OFFLOAD;
- else if (ol_flags & PKT_TX_IPV6)
+ else if (ol_flags & RTE_MBUF_F_TX_IPV6)
l3_type = IPV6_PKT;
else
l3_type = UNKNOWN_L3TYPE;
struct rte_tcp_hdr *tcp_hdr;
uint64_t ol_flags = mbuf->ol_flags;
- if (ol_flags & PKT_TX_IPV4) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV4) {
ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
inner_l3_offset);
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
struct rte_udp_hdr *udp_hdr;
uint64_t ol_flags = mbuf->ol_flags;
- if (ol_flags & PKT_TX_IPV4) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV4) {
ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
inner_l3_offset);
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
{
uint64_t ol_flags = mbuf->ol_flags;
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_UDP_CKSUM:
hinic_calculate_udp_checksum(mbuf, off_info, inner_l3_offset);
break;
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
hinic_calculate_tcp_checksum(mbuf, off_info, inner_l3_offset);
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
hinic_calculate_sctp_checksum(off_info);
break;
default:
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
hinic_calculate_tcp_checksum(mbuf, off_info,
inner_l3_offset);
break;
return 0;
/* Support only vxlan offload */
- if (unlikely((ol_flags & PKT_TX_TUNNEL_MASK) &&
- !(ol_flags & PKT_TX_TUNNEL_VXLAN)))
+ if (unlikely((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
+ !(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN)))
return -ENOTSUP;
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
return -EINVAL;
#endif
- if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) {
off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
/* inner_l4_tcp_udp csum should be set to calculate outer
*/
off_info->inner_l4_tcp_udp = 1;
- if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
- (ol_flags & PKT_TX_OUTER_IPV6) ||
- (ol_flags & PKT_TX_TCP_SEG)) {
+ if ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
+ (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) ||
+ (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
inner_l3_offset = m->l2_len + m->outer_l2_len +
m->outer_l3_len;
off_info->outer_l2_len = m->outer_l2_len;
sqe_info->cpy_mbuf_cnt = 0;
/* non tso mbuf */
- if (likely(!(mbuf_pkt->ol_flags & PKT_TX_TCP_SEG))) {
+ if (likely(!(mbuf_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG))) {
if (unlikely(mbuf_pkt->pkt_len > MAX_SINGLE_SGE_SIZE)) {
/* non tso packet len must less than 64KB */
return false;