/* Do not delete, this is required by HW*/
ipv4_hdr->hdr_checksum = 0;
- if (tx_ol_flags & 0x1) {
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) {
/* HW checksum */
ol_flags |= PKT_TX_IP_CKSUM;
}
if (l4_proto == IPPROTO_UDP) {
udp_hdr = (struct udp_hdr*) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len + l3_len);
- if (tx_ol_flags & 0x2) {
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) {
/* HW Offload */
ol_flags |= PKT_TX_UDP_CKSUM;
if (ipv4_tunnel)
uint16_t len;
/* Check if inner L3/L4 checkum flag is set */
- if (tx_ol_flags & 0xF0)
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_INNER_CKSUM_MASK)
ol_flags |= PKT_TX_VXLAN_CKSUM;
inner_l2_len = sizeof(struct ether_hdr);
unsigned char *) + len);
inner_l4_proto = inner_ipv4_hdr->next_proto_id;
- if (tx_ol_flags & 0x10) {
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_INNER_IP_CKSUM) {
/* Do not delete, this is required by HW*/
inner_ipv4_hdr->hdr_checksum = 0;
unsigned char *) + len);
inner_l4_proto = inner_ipv6_hdr->proto;
}
- if ((inner_l4_proto == IPPROTO_UDP) && (tx_ol_flags & 0x20)) {
+ if ((inner_l4_proto == IPPROTO_UDP) &&
+ (tx_ol_flags & TESTPMD_TX_OFFLOAD_INNER_UDP_CKSUM)) {
/* HW Offload */
ol_flags |= PKT_TX_UDP_CKSUM;
else if (eth_type == ETHER_TYPE_IPv6)
inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
- } else if ((inner_l4_proto == IPPROTO_TCP) && (tx_ol_flags & 0x40)) {
+ } else if ((inner_l4_proto == IPPROTO_TCP) &&
+ (tx_ol_flags & TESTPMD_TX_OFFLOAD_INNER_TCP_CKSUM)) {
/* HW Offload */
ol_flags |= PKT_TX_TCP_CKSUM;
inner_tcp_hdr = (struct tcp_hdr *) (rte_pktmbuf_mtod(mb,
inner_tcp_hdr->cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
else if (eth_type == ETHER_TYPE_IPv6)
inner_tcp_hdr->cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
- } else if ((inner_l4_proto == IPPROTO_SCTP) && (tx_ol_flags & 0x80)) {
+ } else if ((inner_l4_proto == IPPROTO_SCTP) &&
+ (tx_ol_flags & TESTPMD_TX_OFFLOAD_INNER_SCTP_CKSUM)) {
/* HW Offload */
ol_flags |= PKT_TX_SCTP_CKSUM;
inner_sctp_hdr = (struct sctp_hdr *) (rte_pktmbuf_mtod(mb,
} else if (l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len + l3_len);
- if (tx_ol_flags & 0x4) {
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) {
ol_flags |= PKT_TX_TCP_CKSUM;
tcp_hdr->cksum = get_ipv4_psd_sum(ipv4_hdr);
}
sctp_hdr = (struct sctp_hdr*) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len + l3_len);
- if (tx_ol_flags & 0x8) {
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) {
ol_flags |= PKT_TX_SCTP_CKSUM;
sctp_hdr->cksum = 0;
if (l4_proto == IPPROTO_UDP) {
udp_hdr = (struct udp_hdr*) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len + l3_len);
- if (tx_ol_flags & 0x2) {
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) {
/* HW Offload */
ol_flags |= PKT_TX_UDP_CKSUM;
if (ipv6_tunnel)
uint16_t len;
/* Check if inner L3/L4 checksum flag is set */
- if (tx_ol_flags & 0xF0)
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_INNER_CKSUM_MASK)
ol_flags |= PKT_TX_VXLAN_CKSUM;
inner_l2_len = sizeof(struct ether_hdr);
inner_l4_proto = inner_ipv4_hdr->next_proto_id;
/* HW offload */
- if (tx_ol_flags & 0x10) {
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_INNER_IP_CKSUM) {
/* Do not delete, this is required by HW*/
inner_ipv4_hdr->hdr_checksum = 0;
inner_l4_proto = inner_ipv6_hdr->proto;
}
- if ((inner_l4_proto == IPPROTO_UDP) && (tx_ol_flags & 0x20)) {
+ if ((inner_l4_proto == IPPROTO_UDP) &&
+ (tx_ol_flags & TESTPMD_TX_OFFLOAD_INNER_UDP_CKSUM)) {
inner_udp_hdr = (struct udp_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len + inner_l3_len);
/* HW offload */
inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
else if (eth_type == ETHER_TYPE_IPv6)
inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
- } else if ((inner_l4_proto == IPPROTO_TCP) && (tx_ol_flags & 0x40)) {
+ } else if ((inner_l4_proto == IPPROTO_TCP) &&
+ (tx_ol_flags & TESTPMD_TX_OFFLOAD_INNER_TCP_CKSUM)) {
/* HW offload */
ol_flags |= PKT_TX_TCP_CKSUM;
inner_tcp_hdr = (struct tcp_hdr *) (rte_pktmbuf_mtod(mb,
else if (eth_type == ETHER_TYPE_IPv6)
inner_tcp_hdr->cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
- } else if ((inner_l4_proto == IPPROTO_SCTP) && (tx_ol_flags & 0x80)) {
+ } else if ((inner_l4_proto == IPPROTO_SCTP) &&
+ (tx_ol_flags & TESTPMD_TX_OFFLOAD_INNER_SCTP_CKSUM)) {
/* HW offload */
ol_flags |= PKT_TX_SCTP_CKSUM;
inner_sctp_hdr = (struct sctp_hdr *) (rte_pktmbuf_mtod(mb,
else if (l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len + l3_len);
- if (tx_ol_flags & 0x4) {
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) {
ol_flags |= PKT_TX_TCP_CKSUM;
tcp_hdr->cksum = get_ipv6_psd_sum(ipv6_hdr);
}
sctp_hdr = (struct sctp_hdr*) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len + l3_len);
- if (tx_ol_flags & 0x8) {
+ if (tx_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) {
ol_flags |= PKT_TX_SCTP_CKSUM;
sctp_hdr->cksum = 0;
/* Sanity check, only number of 4 bytes supported by HW */
#endif
};
+/** Offload IP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_IP_CKSUM 0x0001
+/** Offload UDP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_UDP_CKSUM 0x0002
+/** Offload TCP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_TCP_CKSUM 0x0004
+/** Offload SCTP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_SCTP_CKSUM 0x0008
+/** Offload inner IP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_INNER_IP_CKSUM 0x0010
+/** Offload inner UDP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_INNER_UDP_CKSUM 0x0020
+/** Offload inner TCP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_INNER_TCP_CKSUM 0x0040
+/** Offload inner SCTP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_INNER_SCTP_CKSUM 0x0080
+/** Offload inner IP checksum mask */
+#define TESTPMD_TX_OFFLOAD_INNER_CKSUM_MASK 0x00F0
+/** Insert VLAN header in forward engine */
+#define TESTPMD_TX_OFFLOAD_INSERT_VLAN 0x0100
/**
* The data structure associated with each port.
- * tx_ol_flags is slightly different from ol_flags of rte_mbuf.
- * Bit 0: Insert IP checksum
- * Bit 1: Insert UDP checksum
- * Bit 2: Insert TCP checksum
- * Bit 3: Insert SCTP checksum
- * Bit 11: Insert VLAN Label
*/
struct rte_port {
struct rte_eth_dev_info dev_info; /**< PCI info + driver name */
struct fwd_stream *rx_stream; /**< Port RX stream, if unique */
struct fwd_stream *tx_stream; /**< Port TX stream, if unique */
unsigned int socket_id; /**< For NUMA support */
- uint64_t tx_ol_flags;/**< Offload Flags of TX packets. */
+ uint16_t tx_ol_flags;/**< TX Offload Flags (TESTPMD_TX_OFFLOAD...). */
uint16_t tx_vlan_id; /**< Tag Id. in TX VLAN packets. */
void *fwd_ctx; /**< Forwarding mode context */
uint64_t rx_bad_ip_csum; /**< rx pkts with bad ip checksum */