+/* Calculate the checksum of outer header */
+static uint64_t
+process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
+ uint64_t tx_offloads, int tso_enabled)
+{
+ struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr;
+ struct rte_udp_hdr *udp_hdr;
+ uint64_t ol_flags = 0;
+
+ if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
+ ipv4_hdr->hdr_checksum = 0;
+ ol_flags |= PKT_TX_OUTER_IPV4;
+
+ if (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ ol_flags |= PKT_TX_OUTER_IP_CKSUM;
+ else
+ ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+ } else
+ ol_flags |= PKT_TX_OUTER_IPV6;
+
+ if (info->outer_l4_proto != IPPROTO_UDP)
+ return ol_flags;
+
+ udp_hdr = (struct rte_udp_hdr *)
+ ((char *)outer_l3_hdr + info->outer_l3_len);
+
+ if (tso_enabled)
+ ol_flags |= PKT_TX_TCP_SEG;
+
+ /* Skip SW outer UDP checksum generation if HW supports it */
+ if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+ if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
+ udp_hdr->dgram_cksum
+ = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+ else
+ udp_hdr->dgram_cksum
+ = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+
+ ol_flags |= PKT_TX_OUTER_UDP_CKSUM;
+ return ol_flags;
+ }
+
+ /* outer UDP checksum is done in software. In the other side, for
+ * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
+ * set to zero.
+ *
+ * If a packet will be TSOed into small packets by NIC, we cannot
+ * set/calculate a non-zero checksum, because it will be a wrong
+ * value after the packet be split into several small packets.
+ */
+ if (tso_enabled)
+ udp_hdr->dgram_cksum = 0;
+
+ /* do not recalculate udp cksum if it was 0 */
+ if (udp_hdr->dgram_cksum != 0) {
+ udp_hdr->dgram_cksum = 0;
+ if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
+ udp_hdr->dgram_cksum =
+ rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
+ else
+ udp_hdr->dgram_cksum =
+ rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr);
+ }
+
+ return ol_flags;
+}
+
+/*
+ * Helper function.
+ * Performs actual copying.
+ * Returns number of segments in the destination mbuf on success,
+ * or negative error code on failure.
+ */
+static int
+mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[],
+ uint16_t seglen[], uint8_t nb_seg)
+{
+ uint32_t dlen, slen, tlen;
+ uint32_t i, len;
+ const struct rte_mbuf *m;
+ const uint8_t *src;
+ uint8_t *dst;
+
+ dlen = 0;
+ slen = 0;
+ tlen = 0;
+
+ dst = NULL;
+ src = NULL;
+
+ m = ms;
+ i = 0;
+ while (ms != NULL && i != nb_seg) {
+
+ if (slen == 0) {
+ slen = rte_pktmbuf_data_len(ms);
+ src = rte_pktmbuf_mtod(ms, const uint8_t *);
+ }
+
+ if (dlen == 0) {
+ dlen = RTE_MIN(seglen[i], slen);
+ md[i]->data_len = dlen;
+ md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1];
+ dst = rte_pktmbuf_mtod(md[i], uint8_t *);
+ }
+
+ len = RTE_MIN(slen, dlen);
+ memcpy(dst, src, len);
+ tlen += len;
+ slen -= len;
+ dlen -= len;
+ src += len;
+ dst += len;
+
+ if (slen == 0)
+ ms = ms->next;
+ if (dlen == 0)
+ i++;
+ }
+
+ if (ms != NULL)
+ return -ENOBUFS;
+ else if (tlen != m->pkt_len)
+ return -EINVAL;
+
+ md[0]->nb_segs = nb_seg;
+ md[0]->pkt_len = tlen;
+ md[0]->vlan_tci = m->vlan_tci;
+ md[0]->vlan_tci_outer = m->vlan_tci_outer;
+ md[0]->ol_flags = m->ol_flags;
+ md[0]->tx_offload = m->tx_offload;
+
+ return nb_seg;
+}
+
+/*
+ * Allocate a new mbuf with up to tx_pkt_nb_segs segments.
+ * Copy packet contents and offload information into the new segmented mbuf.
+ */
+static struct rte_mbuf *
+pkt_copy_split(const struct rte_mbuf *pkt)
+{
+ int32_t n, rc;
+ uint32_t i, len, nb_seg;
+ struct rte_mempool *mp;
+ uint16_t seglen[RTE_MAX_SEGS_PER_PKT];
+ struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT];
+
+ mp = current_fwd_lcore()->mbp;
+
+ if (tx_pkt_split == TX_PKT_SPLIT_RND)
+ nb_seg = random() % tx_pkt_nb_segs + 1;
+ else
+ nb_seg = tx_pkt_nb_segs;
+
+ memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0]));
+
+ /* calculate number of segments to use and their length. */
+ len = 0;
+ for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) {
+ len += seglen[i];
+ md[i] = NULL;
+ }
+
+ n = pkt->pkt_len - len;
+
+ /* update size of the last segment to fit rest of the packet */
+ if (n >= 0) {
+ seglen[i - 1] += n;
+ len += n;
+ }
+
+ nb_seg = i;
+ while (i != 0) {
+ p = rte_pktmbuf_alloc(mp);
+ if (p == NULL) {
+ TESTPMD_LOG(ERR,
+ "failed to allocate %u-th of %u mbuf "
+ "from mempool: %s\n",
+ nb_seg - i, nb_seg, mp->name);
+ break;
+ }
+
+ md[--i] = p;
+ if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) {
+ TESTPMD_LOG(ERR, "mempool %s, %u-th segment: "
+ "expected seglen: %u, "
+ "actual mbuf tailroom: %u\n",
+ mp->name, i, seglen[i],
+ rte_pktmbuf_tailroom(md[i]));
+ break;
+ }
+ }
+
+ /* all mbufs successfully allocated, do copy */
+ if (i == 0) {
+ rc = mbuf_copy_split(pkt, md, seglen, nb_seg);
+ if (rc < 0)
+ TESTPMD_LOG(ERR,
+ "mbuf_copy_split for %p(len=%u, nb_seg=%u) "
+ "into %u segments failed with error code: %d\n",
+ pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc);
+
+ /* figure out how many mbufs to free. */
+ i = RTE_MAX(rc, 0);
+ }
+
+ /* free unused mbufs */
+ for (; i != nb_seg; i++) {
+ rte_pktmbuf_free_seg(md[i]);
+ md[i] = NULL;
+ }
+
+ return md[0];
+}