ret = rte_gso_segment(pkts_burst[i], gso_ctx,
&gso_segments[nb_segments],
GSO_MAX_PKT_BURST - nb_segments);
- if (ret >= 0)
+ if (ret >= 1) {
+ /* pkts_burst[i] can be freed safely here. */
+ rte_pktmbuf_free(pkts_burst[i]);
nb_segments += ret;
- else {
+ } else if (ret == 0) {
+ /* 0 means it can be transmitted directly
+ * without gso.
+ */
+ gso_segments[nb_segments] = pkts_burst[i];
+ nb_segments += 1;
+ } else {
TESTPMD_LOG(DEBUG, "Unable to segment packet");
rte_pktmbuf_free(pkts_burst[i]);
}
packets in software. Note however, that GSO is implemented as a standalone
library, and not via a 'fallback' mechanism (i.e. for when TSO is unsupported
in the underlying hardware); that is, applications must explicitly invoke the
-GSO library to segment packets. The size of GSO segments ``(segsz)`` is
-configurable by the application.
+GSO library to segment packets, they also must call ``rte_pktmbuf_free()``
+to free mbuf GSO segments attached after calling ``rte_gso_segment()``.
+The size of GSO segments (``segsz``) is configurable by the application.
Limitations
-----------
#. Invoke the GSO segmentation API, ``rte_gso_segment()``.
+#. Call ``rte_pktmbuf_free()`` to free mbuf ``rte_gso_segment()`` segments.
+
#. If required, update the L3 and L4 checksums of the newly-created segments.
For tunneled packets, the outer IPv4 headers' checksums should also be
updated. Alternatively, the application may offload checksum calculation
* bpf: ``RTE_BPF_XTYPE_NUM`` has been dropped from ``rte_bpf_xtype``.
+* gso: Changed ``rte_gso_segment`` behaviour and return value:
+
+ * ``pkt`` is not saved to ``pkts_out[0]`` if not GSOed.
+ * Return 0 instead of 1 for the above case.
+ * ``pkt`` is not freed, no matter whether it is GSOed, leaving to the caller.
+
* acl: ``RTE_ACL_CLASSIFY_NUM`` enum value has been removed.
This enum value was not used inside DPDK, while it prevented to add new
classify algorithms without causing an ABI breakage.
if (num_tso_mbufs < 0)
break;
- mbuf = gso_mbufs;
- num_mbufs = num_tso_mbufs;
+ if (num_tso_mbufs >= 1) {
+ mbuf = gso_mbufs;
+ num_mbufs = num_tso_mbufs;
+ } else {
+ /* 0 means it can be transmitted directly
+ * without gso.
+ */
+ mbuf = &mbuf_in;
+ num_mbufs = 1;
+ }
} else {
/* stats.errs will be incremented */
if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
pkt->l2_len);
frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
if (unlikely(IS_FRAGMENTED(frag_off))) {
- pkts_out[0] = pkt;
- return 1;
+ return 0;
}
/* Don't process the packet without data */
hdr_offset = pkt->l2_len + pkt->l3_len + pkt->l4_len;
if (unlikely(hdr_offset >= pkt->pkt_len)) {
- pkts_out[0] = pkt;
- return 1;
+ return 0;
}
pyld_unit_size = gso_size - hdr_offset;
{
struct rte_ipv4_hdr *inner_ipv4_hdr;
uint16_t pyld_unit_size, hdr_offset, frag_off;
- int ret = 1;
+ int ret;
hdr_offset = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len;
inner_ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
*/
frag_off = rte_be_to_cpu_16(inner_ipv4_hdr->fragment_offset);
if (unlikely(IS_FRAGMENTED(frag_off))) {
- pkts_out[0] = pkt;
- return 1;
+ return 0;
}
hdr_offset += pkt->l3_len + pkt->l4_len;
/* Don't process the packet without data */
if (hdr_offset >= pkt->pkt_len) {
- pkts_out[0] = pkt;
- return 1;
+ return 0;
}
pyld_unit_size = gso_size - hdr_offset;
/* Segment the payload */
ret = gso_do_segment(pkt, hdr_offset, pyld_unit_size, direct_pool,
indirect_pool, pkts_out, nb_pkts_out);
- if (ret <= 1)
- return ret;
-
- update_tunnel_ipv4_tcp_headers(pkt, ipid_delta, pkts_out, ret);
+ if (ret > 1)
+ update_tunnel_ipv4_tcp_headers(pkt, ipid_delta, pkts_out, ret);
return ret;
}
pkt->l2_len);
frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
if (unlikely(IS_FRAGMENTED(frag_off))) {
- pkts_out[0] = pkt;
- return 1;
+ return 0;
}
/*
/* Don't process the packet without data. */
if (unlikely(hdr_offset + pkt->l4_len >= pkt->pkt_len)) {
- pkts_out[0] = pkt;
- return 1;
+ return 0;
}
/* pyld_unit_size must be a multiple of 8 because frag_off
uint16_t nb_pkts_out)
{
struct rte_mempool *direct_pool, *indirect_pool;
- struct rte_mbuf *pkt_seg;
uint64_t ol_flags;
uint16_t gso_size;
uint8_t ipid_delta;
if (gso_ctx->gso_size >= pkt->pkt_len) {
pkt->ol_flags &= (~(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG));
- pkts_out[0] = pkt;
- return 1;
+ return 0;
}
direct_pool = gso_ctx->direct_pool;
indirect_pool, pkts_out, nb_pkts_out);
} else {
/* unsupported packet, skip */
- pkts_out[0] = pkt;
RTE_LOG(DEBUG, GSO, "Unsupported packet type\n");
- return 1;
+ ret = 0;
}
- if (ret > 1) {
- pkt_seg = pkt;
- while (pkt_seg) {
- rte_mbuf_refcnt_update(pkt_seg, -1);
- pkt_seg = pkt_seg->next;
- }
- } else if (ret < 0) {
+ if (ret < 0) {
/* Revert the ol_flags in the event of failure. */
pkt->ol_flags = ol_flags;
}
* the GSO segments are sent to should support transmission of multi-segment
* packets.
*
- * If the input packet is GSO'd, its mbuf refcnt reduces by 1. Therefore,
- * when all GSO segments are freed, the input packet is freed automatically.
+ * If the input packet is GSO'd, all the indirect segments are attached to the
+ * input packet.
+ *
+ * rte_gso_segment() will not free the input packet no matter whether it is
+ * GSO'd or not, the application should free it after calling rte_gso_segment().
*
* If the memory space in pkts_out or MBUF pools is insufficient, this
* function fails, and it returns (-1) * errno. Otherwise, GSO succeeds,
*
* @return
* - The number of GSO segments filled in pkts_out on success.
+ * - Return 0 if it does not need to be GSO'd.
* - Return -ENOMEM if run out of memory in MBUF pools.
* - Return -EINVAL for invalid parameters.
*/