-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * Copyright 2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright 2014 6WIND S.A.
*/
#ifndef _RTE_MBUF_H_
*/
#include <stdint.h>
+#include <rte_compat.h>
#include <rte_common.h>
+#include <rte_config.h>
#include <rte_mempool.h>
#include <rte_memory.h>
#include <rte_atomic.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
#include <rte_mbuf_ptype.h>
#ifdef __cplusplus
*/
/**
- * RX packet is a 802.1q VLAN packet. This flag was set by PMDs when
- * the packet is recognized as a VLAN, but the behavior between PMDs
- * was not the same. This flag is kept for some time to avoid breaking
- * applications and should be replaced by PKT_RX_VLAN_STRIPPED.
+ * The RX packet is a 802.1q VLAN packet, and the tci has been
+ * saved in in mbuf->vlan_tci.
+ * If the flag PKT_RX_VLAN_STRIPPED is also present, the VLAN
+ * header has been stripped from mbuf data, else it is still
+ * present.
*/
-#define PKT_RX_VLAN_PKT (1ULL << 0)
+#define PKT_RX_VLAN (1ULL << 0)
#define PKT_RX_RSS_HASH (1ULL << 1) /**< RX packet with RSS hash result. */
#define PKT_RX_FDIR (1ULL << 2) /**< RX packet with FDIR match indicate. */
* A vlan has been stripped by the hardware and its tci is saved in
* mbuf->vlan_tci. This can only happen if vlan stripping is enabled
* in the RX configuration of the PMD.
+ * When PKT_RX_VLAN_STRIPPED is set, PKT_RX_VLAN must also be set.
*/
#define PKT_RX_VLAN_STRIPPED (1ULL << 6)
* The 2 vlans have been stripped by the hardware and their tci are
* saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer).
* This can only happen if vlan stripping is enabled in the RX
- * configuration of the PMD. If this flag is set, PKT_RX_VLAN_STRIPPED
- * must also be set.
+ * configuration of the PMD.
+ * When PKT_RX_QINQ_STRIPPED is set, the flags (PKT_RX_VLAN |
+ * PKT_RX_VLAN_STRIPPED | PKT_RX_QINQ) must also be set.
*/
#define PKT_RX_QINQ_STRIPPED (1ULL << 15)
-/**
- * Deprecated.
- * RX packet with double VLAN stripped.
- * This flag is replaced by PKT_RX_QINQ_STRIPPED.
- */
-#define PKT_RX_QINQ_PKT PKT_RX_QINQ_STRIPPED
-
/**
* When packets are coalesced by a hardware or virtual driver, this flag
* can be set in the RX mbuf, meaning that the m->tso_segsz field is
*/
#define PKT_RX_TIMESTAMP (1ULL << 17)
+/**
+ * Indicate that security offload processing was applied on the RX packet.
+ */
+#define PKT_RX_SEC_OFFLOAD (1ULL << 18)
+
+/**
+ * Indicate that security offload processing failed on the RX packet.
+ */
+#define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
+
+/**
+ * The RX packet is a double VLAN, and the outer tci has been
+ * saved in in mbuf->vlan_tci_outer. If PKT_RX_QINQ set, PKT_RX_VLAN
+ * also should be set and inner tci should be saved to mbuf->vlan_tci.
+ * If the flag PKT_RX_QINQ_STRIPPED is also present, both VLANs
+ * headers have been stripped from mbuf data, else they are still
+ * present.
+ */
+#define PKT_RX_QINQ (1ULL << 20)
+
+/**
+ * Mask of bits used to determine the status of outer RX L4 checksum.
+ * - PKT_RX_OUTER_L4_CKSUM_UNKNOWN: no info about the outer RX L4 checksum
+ * - PKT_RX_OUTER_L4_CKSUM_BAD: the outer L4 checksum in the packet is wrong
+ * - PKT_RX_OUTER_L4_CKSUM_GOOD: the outer L4 checksum in the packet is valid
+ * - PKT_RX_OUTER_L4_CKSUM_INVALID: invalid outer L4 checksum state.
+ *
+ * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
+ * HW capability, At minimum, the PMD should support
+ * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states
+ * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
+ */
+#define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22))
+
+#define PKT_RX_OUTER_L4_CKSUM_UNKNOWN 0
+#define PKT_RX_OUTER_L4_CKSUM_BAD (1ULL << 21)
+#define PKT_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22)
+#define PKT_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22))
+
/* add new RX flags here */
/* add new TX flags here */
+/**
+ * Indicate that the metadata field in the mbuf is in use.
+ */
+#define PKT_TX_METADATA (1ULL << 40)
+
+/**
+ * Outer UDP checksum offload flag. This flag is used for enabling
+ * outer UDP checksum in PMD. To use outer UDP checksum, the user needs to
+ * 1) Enable the following in mbuff,
+ * a) Fill outer_l2_len and outer_l3_len in mbuf.
+ * b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
+ * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
+ * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
+ */
+#define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41)
+
+/**
+ * UDP Fragmentation Offload flag. This flag is used for enabling UDP
+ * fragmentation in SW or in HW. When use UFO, mbuf->tso_segsz is used
+ * to store the MSS of UDP fragments.
+ */
+#define PKT_TX_UDP_SEG (1ULL << 42)
+
+/**
+ * Request security offload processing on the TX packet.
+ */
+#define PKT_TX_SEC_OFFLOAD (1ULL << 43)
+
/**
* Offload the MACsec. This flag must be set by the application to enable
* this offload feature for a packet to be transmitted.
/**
* Bits 45:48 used for the tunnel type.
- * When doing Tx offload like TSO or checksum, the HW needs to configure the
- * tunnel type into the HW descriptors.
+ * The tunnel type must be specified for TSO or checksum on the inner part
+ * of tunnel packets.
+ * These flags can be used with PKT_TX_TCP_SEG for TSO, or PKT_TX_xxx_CKSUM.
+ * The mbuf fields for inner and outer header lengths are required:
+ * outer_l2_len, outer_l3_len, l2_len, l3_len, l4_len and tso_segsz for TSO.
*/
#define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
#define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
#define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
#define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
+/** TX packet with MPLS-in-UDP RFC 7510 header. */
+#define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
+#define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
+/**
+ * Generic IP encapsulated tunnel type, used for TSO and checksum offload.
+ * It can be used for tunnels which are not standards or listed above.
+ * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
+ * or PKT_TX_TUNNEL_IPIP if possible.
+ * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
+ * Outer and inner checksums are done according to the existing flags like
+ * PKT_TX_xxx_CKSUM.
+ * Specific tunnel headers that contain payload length, sequence id
+ * or checksum are not expected to be updated.
+ */
+#define PKT_TX_TUNNEL_IP (0xDULL << 45)
+/**
+ * Generic UDP encapsulated tunnel type, used for TSO and checksum offload.
+ * UDP tunnel type implies outer IP layer.
+ * It can be used for tunnels which are not standards or listed above.
+ * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
+ * if possible.
+ * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
+ * Outer and inner checksums are done according to the existing flags like
+ * PKT_TX_xxx_CKSUM.
+ * Specific tunnel headers that contain payload length, sequence id
+ * or checksum are not expected to be updated.
+ */
+#define PKT_TX_TUNNEL_UDP (0xEULL << 45)
/* add new TX TUNNEL type here */
#define PKT_TX_TUNNEL_MASK (0xFULL << 45)
/**
* Second VLAN insertion (QinQ) flag.
*/
-#define PKT_TX_QINQ_PKT (1ULL << 49) /**< TX packet with double VLAN inserted. */
+#define PKT_TX_QINQ (1ULL << 49) /**< TX packet with double VLAN inserted. */
+/* this old name is deprecated */
+#define PKT_TX_QINQ_PKT PKT_TX_QINQ
/**
* TCP segmentation offload. To enable this offload feature for a
* - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
* PKT_TX_TCP_CKSUM)
* - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
- * - if it's IPv4, set the PKT_TX_IP_CKSUM flag and write the IP checksum
- * to 0 in the packet
+ * - if it's IPv4, set the PKT_TX_IP_CKSUM flag
* - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz
- * - calculate the pseudo header checksum without taking ip_len in account,
- * and set it in the TCP header. Refer to rte_ipv4_phdr_cksum() and
- * rte_ipv6_phdr_cksum() that can be used as helpers.
*/
#define PKT_TX_TCP_SEG (1ULL << 50)
* - fill l2_len and l3_len in mbuf
* - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM
* - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
- * - calculate the pseudo header checksum and set it in the L4 header (only
- * for TCP or UDP). See rte_ipv4_phdr_cksum() and rte_ipv6_phdr_cksum().
- * For SCTP, set the crc field to 0.
*/
#define PKT_TX_L4_NO_CKSUM (0ULL << 52) /**< Disable L4 cksum of TX pkt. */
#define PKT_TX_TCP_CKSUM (1ULL << 52) /**< TCP cksum of TX pkt. computed by NIC. */
* Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should
* also be set by the application, although a PMD will only check
* PKT_TX_IP_CKSUM.
- * - set the IP checksum field in the packet to 0
* - fill the mbuf offload information: l2_len, l3_len
*/
#define PKT_TX_IP_CKSUM (1ULL << 54)
*/
#define PKT_TX_IPV6 (1ULL << 56)
-#define PKT_TX_VLAN_PKT (1ULL << 57) /**< TX packet is a 802.1q VLAN packet. */
+/**
+ * TX packet is a 802.1q VLAN packet.
+ */
+#define PKT_TX_VLAN (1ULL << 57)
+/* this old name is deprecated */
+#define PKT_TX_VLAN_PKT PKT_TX_VLAN
/**
* Offload the IP checksum of an external header in the hardware. The
- * flag PKT_TX_OUTER_IPV4 should also be set by the application, alto ugh
- * a PMD will only check PKT_TX_IP_CKSUM. The IP checksum field in the
- * packet must be set to 0.
- * - set the outer IP checksum field in the packet to 0
+ * flag PKT_TX_OUTER_IPV4 should also be set by the application, although
+ * a PMD will only check PKT_TX_OUTER_IP_CKSUM.
* - fill the mbuf offload information: outer_l2_len, outer_l3_len
*/
#define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
* which can be set for packet.
*/
#define PKT_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_TCP_SEG | \
PKT_TX_IEEE1588_TMST | \
+ PKT_TX_TCP_SEG | \
PKT_TX_QINQ_PKT | \
- PKT_TX_VLAN_PKT | \
PKT_TX_TUNNEL_MASK | \
- PKT_TX_MACSEC)
+ PKT_TX_MACSEC | \
+ PKT_TX_SEC_OFFLOAD | \
+ PKT_TX_UDP_SEG | \
+ PKT_TX_OUTER_UDP_CKSUM | \
+ PKT_TX_METADATA)
-#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */
+/**
+ * Mbuf having an external buffer attached. shinfo in mbuf must be filled.
+ */
+#define EXT_ATTACHED_MBUF (1ULL << 61)
#define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */
-/* Use final bit of flags to indicate a control mbuf */
-#define CTRL_MBUF_FLAG (1ULL << 63) /**< Mbuf contains control data */
-
/** Alignment constraint of mbuf private area. */
#define RTE_MBUF_PRIV_ALIGN 8
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
+struct rte_mbuf_sched {
+ uint32_t queue_id; /**< Queue ID. */
+ uint8_t traffic_class;
+ /**< Traffic class ID. Traffic class 0
+ * is the highest priority traffic class.
+ */
+ uint8_t color;
+ /**< Color. @see enum rte_color.*/
+ uint16_t reserved; /**< Reserved. */
+}; /**< Hierarchical scheduler */
+
+/**
+ * enum for the tx_offload bit-fields lenghts and offsets.
+ * defines the layout of rte_mbuf tx_offload field.
+ */
+enum {
+ RTE_MBUF_L2_LEN_BITS = 7,
+ RTE_MBUF_L3_LEN_BITS = 9,
+ RTE_MBUF_L4_LEN_BITS = 8,
+ RTE_MBUF_TSO_SEGSZ_BITS = 16,
+ RTE_MBUF_OUTL3_LEN_BITS = 9,
+ RTE_MBUF_OUTL2_LEN_BITS = 7,
+ RTE_MBUF_TXOFLD_UNUSED_BITS = sizeof(uint64_t) * CHAR_BIT -
+ RTE_MBUF_L2_LEN_BITS -
+ RTE_MBUF_L3_LEN_BITS -
+ RTE_MBUF_L4_LEN_BITS -
+ RTE_MBUF_TSO_SEGSZ_BITS -
+ RTE_MBUF_OUTL3_LEN_BITS -
+ RTE_MBUF_OUTL2_LEN_BITS,
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ RTE_MBUF_L2_LEN_OFS =
+ sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_L2_LEN_BITS
+ RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS - RTE_MBUF_L3_LEN_BITS,
+ RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS - RTE_MBUF_L4_LEN_BITS,
+ RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS - RTE_MBUF_TSO_SEGSZ_BITS,
+ RTE_MBUF_OUTL3_LEN_OFS =
+ RTE_MBUF_TSO_SEGSZ_OFS - RTE_MBUF_OUTL3_LEN_BITS,
+ RTE_MBUF_OUTL2_LEN_OFS =
+ RTE_MBUF_OUTL3_LEN_OFS - RTE_MBUF_OUTL2_LEN_BITS,
+ RTE_MBUF_TXOFLD_UNUSED_OFS =
+ RTE_MBUF_OUTL2_LEN_OFS - RTE_MBUF_TXOFLD_UNUSED_BITS,
+#else
+ RTE_MBUF_L2_LEN_OFS = 0,
+ RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS,
+ RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS,
+ RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS,
+ RTE_MBUF_OUTL3_LEN_OFS =
+ RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS,
+ RTE_MBUF_OUTL2_LEN_OFS =
+ RTE_MBUF_OUTL3_LEN_OFS + RTE_MBUF_OUTL3_LEN_BITS,
+ RTE_MBUF_TXOFLD_UNUSED_OFS =
+ RTE_MBUF_OUTL2_LEN_OFS + RTE_MBUF_OUTL2_LEN_BITS,
+#endif
+};
+
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
* same mbuf cacheline0 layout for 32-bit and 64-bit. This makes
* working on vector drivers easier.
*/
- phys_addr_t buf_physaddr __rte_aligned(sizeof(phys_addr_t));
+ RTE_STD_C11
+ union {
+ rte_iova_t buf_iova;
+ rte_iova_t buf_physaddr; /**< deprecated */
+ } __rte_aligned(sizeof(rte_iova_t));
/* next 8 bytes are initialised on RX descriptor rearm */
MARKER64 rearm_data;
};
uint16_t nb_segs; /**< Number of segments. */
- /** Input port (16 bits to support more than 256 virtual ports). */
+ /** Input port (16 bits to support more than 256 virtual ports).
+ * The event eth Tx adapter uses this field to specify the output port.
+ */
uint16_t port;
uint64_t ol_flags; /**< Offload features. */
uint32_t l3_type:4; /**< (Outer) L3 type. */
uint32_t l4_type:4; /**< (Outer) L4 type. */
uint32_t tun_type:4; /**< Tunnel type. */
- uint32_t inner_l2_type:4; /**< Inner L2 type. */
- uint32_t inner_l3_type:4; /**< Inner L3 type. */
+ RTE_STD_C11
+ union {
+ uint8_t inner_esp_next_proto;
+ /**< ESP next protocol type, valid if
+ * RTE_PTYPE_TUNNEL_ESP tunnel type is set
+ * on both Tx and Rx.
+ */
+ __extension__
+ struct {
+ uint8_t inner_l2_type:4;
+ /**< Inner L2 type. */
+ uint8_t inner_l3_type:4;
+ /**< Inner L3 type. */
+ };
+ };
uint32_t inner_l4_type:4; /**< Inner L4 type. */
};
};
uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
uint16_t data_len; /**< Amount of data in segment buffer. */
- /** VLAN TCI (CPU order), valid if PKT_RX_VLAN_STRIPPED is set. */
+ /** VLAN TCI (CPU order), valid if PKT_RX_VLAN is set. */
uint16_t vlan_tci;
+ RTE_STD_C11
union {
- uint32_t rss; /**< RSS hash result if RSS enabled */
- struct {
- RTE_STD_C11
- union {
- struct {
- uint16_t hash;
- uint16_t id;
+ union {
+ uint32_t rss; /**< RSS hash result if RSS enabled */
+ struct {
+ union {
+ struct {
+ uint16_t hash;
+ uint16_t id;
+ };
+ uint32_t lo;
+ /**< Second 4 flexible bytes */
};
- uint32_t lo;
- /**< Second 4 flexible bytes */
- };
- uint32_t hi;
- /**< First 4 flexible bytes or FD ID, dependent on
- PKT_RX_FDIR_* flag in ol_flags. */
- } fdir; /**< Filter identifier if FDIR enabled */
+ uint32_t hi;
+ /**< First 4 flexible bytes or FD ID, dependent
+ * on PKT_RX_FDIR_* flag in ol_flags.
+ */
+ } fdir; /**< Filter identifier if FDIR enabled */
+ struct rte_mbuf_sched sched;
+ /**< Hierarchical scheduler : 8 bytes */
+ struct {
+ uint32_t reserved1;
+ uint16_t reserved2;
+ uint16_t txq;
+ /**< The event eth Tx adapter uses this field
+ * to store Tx queue id.
+ * @see rte_event_eth_tx_adapter_txq_set()
+ */
+ } txadapter; /**< Eventdev ethdev Tx adapter */
+ /**< User defined tags. See rte_distributor_process() */
+ uint32_t usr;
+ } hash; /**< hash information */
struct {
- uint32_t lo;
- uint32_t hi;
- } sched; /**< Hierarchical scheduler */
- uint32_t usr; /**< User defined tags. See rte_distributor_process() */
- } hash; /**< hash information */
+ /**
+ * Application specific metadata value
+ * for egress flow rule match.
+ * Valid if PKT_TX_METADATA is set.
+ * Located here to allow conjunct use
+ * with hash.sched.hi.
+ */
+ uint32_t tx_metadata;
+ uint32_t reserved;
+ };
+ };
- /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ_STRIPPED is set. */
+ /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ is set. */
uint16_t vlan_tci_outer;
uint16_t buf_len; /**< Length of segment buffer. */
uint64_t tx_offload; /**< combined for easy fetch */
__extension__
struct {
- uint64_t l2_len:7;
+ uint64_t l2_len:RTE_MBUF_L2_LEN_BITS;
/**< L2 (MAC) Header Length for non-tunneling pkt.
* Outer_L4_len + ... + Inner_L2_len for tunneling pkt.
*/
- uint64_t l3_len:9; /**< L3 (IP) Header Length. */
- uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
- uint64_t tso_segsz:16; /**< TCP TSO segment size */
+ uint64_t l3_len:RTE_MBUF_L3_LEN_BITS;
+ /**< L3 (IP) Header Length. */
+ uint64_t l4_len:RTE_MBUF_L4_LEN_BITS;
+ /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS;
+ /**< TCP TSO segment size */
/* fields for TX offloading of tunnels */
- uint64_t outer_l3_len:9; /**< Outer L3 (IP) Hdr Length. */
- uint64_t outer_l2_len:7; /**< Outer L2 (MAC) Hdr Length. */
+ uint64_t outer_l3_len:RTE_MBUF_OUTL3_LEN_BITS;
+ /**< Outer L3 (IP) Hdr Length. */
+ uint64_t outer_l2_len:RTE_MBUF_OUTL2_LEN_BITS;
+ /**< Outer L2 (MAC) Hdr Length. */
- /* uint64_t unused:8; */
+ /* uint64_t unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */
};
};
/** Sequence number. See also rte_reorder_insert(). */
uint32_t seqn;
+ /** Shared data for external buffer attached to mbuf. See
+ * rte_pktmbuf_attach_extbuf().
+ */
+ struct rte_mbuf_ext_shared_info *shinfo;
+
} __rte_cache_aligned;
+/**
+ * Function typedef of callback to free externally attached buffer.
+ */
+typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque);
+
+/**
+ * Shared data at the end of an external buffer.
+ */
+struct rte_mbuf_ext_shared_info {
+ rte_mbuf_extbuf_free_callback_t free_cb; /**< Free callback function */
+ void *fcb_opaque; /**< Free callback argument */
+ rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */
+};
+
+/**< Maximum number of nb_segs allowed. */
+#define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
+
/**
* Prefetch the first part of the mbuf
*
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
/**
- * Return the DMA address of the beginning of the mbuf data
+ * Return the IO address of the beginning of the mbuf data
*
* @param mb
* The pointer to the mbuf.
* @return
- * The physical address of the beginning of the mbuf data
+ * The IO address of the beginning of the mbuf data
*/
+static inline rte_iova_t
+rte_mbuf_data_iova(const struct rte_mbuf *mb)
+{
+ return mb->buf_iova + mb->data_off;
+}
+
+__rte_deprecated
static inline phys_addr_t
rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
{
- return mb->buf_physaddr + mb->data_off;
+ return rte_mbuf_data_iova(mb);
}
/**
- * Return the default DMA address of the beginning of the mbuf data
+ * Return the default IO address of the beginning of the mbuf data
*
* This function is used by drivers in their receive function, as it
* returns the location where data should be written by the NIC, taking
* @param mb
* The pointer to the mbuf.
* @return
- * The physical address of the beginning of the mbuf data
+ * The IO address of the beginning of the mbuf data
*/
+static inline rte_iova_t
+rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
+{
+ return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+}
+
+__rte_deprecated
static inline phys_addr_t
rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
{
- return mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ return rte_mbuf_data_iova_default(mb);
}
/**
}
/**
- * Return the buffer address embedded in the given mbuf.
+ * Return address of buffer embedded in the given mbuf.
+ *
+ * The return value shall be same as mb->buf_addr if the mbuf is already
+ * initialized and direct. However, this API is useful if mempool of the
+ * mbuf is already known because it doesn't need to access mbuf contents in
+ * order to get the mempool pointer.
+ *
+ * @warning
+ * @b EXPERIMENTAL: This API may change without prior notice.
+ * This will be used by rte_mbuf_to_baddr() which has redundant code once
+ * experimental tag is removed.
+ *
+ * @param mb
+ * The pointer to the mbuf.
+ * @param mp
+ * The pointer to the mempool of the mbuf.
+ * @return
+ * The pointer of the mbuf buffer.
+ */
+static inline char * __rte_experimental
+rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
+{
+ return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
+}
+
+/**
+ * Return the default address of the beginning of the mbuf data.
+ *
+ * @warning
+ * @b EXPERIMENTAL: This API may change without prior notice.
+ *
+ * @param mb
+ * The pointer to the mbuf.
+ * @return
+ * The pointer of the beginning of the mbuf data.
+ */
+static inline char * __rte_experimental
+rte_mbuf_data_addr_default(struct rte_mbuf *mb)
+{
+ return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
+}
+
+/**
+ * Return address of buffer embedded in the given mbuf.
+ *
+ * @note: Accessing mempool pointer of a mbuf is expensive because the
+ * pointer is stored in the 2nd cache line of mbuf. If mempool is known, it
+ * is better not to reference the mempool pointer in mbuf but calling
+ * rte_mbuf_buf_addr() would be more efficient.
*
* @param md
* The pointer to the mbuf.
static inline char *
rte_mbuf_to_baddr(struct rte_mbuf *md)
{
+#ifdef ALLOW_EXPERIMENTAL_API
+ return rte_mbuf_buf_addr(md, md->pool);
+#else
char *buffer_addr;
buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
return buffer_addr;
+#endif
}
/**
- * Returns TRUE if given mbuf is indirect, or FALSE otherwise.
+ * Return the starting address of the private data area embedded in
+ * the given mbuf.
+ *
+ * Note that no check is made to ensure that a private data area
+ * actually exists in the supplied mbuf.
+ *
+ * @param m
+ * The pointer to the mbuf.
+ * @return
+ * The starting address of the private data area of the given mbuf.
+ */
+static inline void * __rte_experimental
+rte_mbuf_to_priv(struct rte_mbuf *m)
+{
+ return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
+}
+
+/**
+ * Returns TRUE if given mbuf is cloned by mbuf indirection, or FALSE
+ * otherwise.
+ *
+ * If a mbuf has its data in another mbuf and references it by mbuf
+ * indirection, this mbuf can be defined as a cloned mbuf.
+ */
+#define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
+
+/**
+ * Returns TRUE if given mbuf has an external buffer, or FALSE otherwise.
+ *
+ * External buffer is a user-provided anonymous buffer.
*/
-#define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
+#define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
/**
* Returns TRUE if given mbuf is direct, or FALSE otherwise.
+ *
+ * If a mbuf embeds its own data after the rte_mbuf structure, this mbuf
+ * can be defined as a direct mbuf.
*/
-#define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
+#define RTE_MBUF_DIRECT(mb) \
+ (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
/**
* Private data in case of pktmbuf pool.
static inline void
rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
{
- rte_atomic16_set(&m->refcnt_atomic, new_value);
+ rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
+}
+
+/* internal */
+static inline uint16_t
+__rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
+{
+ return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
}
/**
{
/*
* The atomic_add is an expensive operation, so we don't want to
- * call it in the case where we know we are the uniq holder of
+ * call it in the case where we know we are the unique holder of
* this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
* operation has to be used because concurrent accesses on the
* reference counter can occur.
*/
if (likely(rte_mbuf_refcnt_read(m) == 1)) {
- rte_mbuf_refcnt_set(m, 1 + value);
- return 1 + value;
+ ++value;
+ rte_mbuf_refcnt_set(m, (uint16_t)value);
+ return (uint16_t)value;
}
- return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
+ return __rte_mbuf_refcnt_update(m, value);
}
#else /* ! RTE_MBUF_REFCNT_ATOMIC */
+/* internal */
+static inline uint16_t
+__rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
+{
+ m->refcnt = (uint16_t)(m->refcnt + value);
+ return m->refcnt;
+}
+
/**
* Adds given value to an mbuf's refcnt and returns its new value.
*/
static inline uint16_t
rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
{
- m->refcnt = (uint16_t)(m->refcnt + value);
- return m->refcnt;
+ return __rte_mbuf_refcnt_update(m, value);
}
/**
#endif /* RTE_MBUF_REFCNT_ATOMIC */
+/**
+ * Reads the refcnt of an external buffer.
+ *
+ * @param shinfo
+ * Shared data of the external buffer.
+ * @return
+ * Reference count number.
+ */
+static inline uint16_t
+rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
+{
+ return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic));
+}
+
+/**
+ * Set refcnt of an external buffer.
+ *
+ * @param shinfo
+ * Shared data of the external buffer.
+ * @param new_value
+ * Value set
+ */
+static inline void
+rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo,
+ uint16_t new_value)
+{
+ rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value);
+}
+
+/**
+ * Add given value to refcnt of an external buffer and return its new
+ * value.
+ *
+ * @param shinfo
+ * Shared data of the external buffer.
+ * @param value
+ * Value to add/subtract
+ * @return
+ * Updated value
+ */
+static inline uint16_t
+rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo,
+ int16_t value)
+{
+ if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
+ ++value;
+ rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
+ return (uint16_t)value;
+ }
+
+ return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value);
+}
+
/** Mbuf prefetch */
#define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
if ((m) != NULL) \
void
rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
+/**
+ * Sanity checks on a mbuf.
+ *
+ * Almost like rte_mbuf_sanity_check(), but this function gives the reason
+ * if corruption is detected rather than panic.
+ *
+ * @param m
+ * The mbuf to be checked.
+ * @param is_header
+ * True if the mbuf is a packet header, false if it is a sub-segment
+ * of a packet (in this case, some fields like nb_segs are not checked)
+ * @param reason
+ * A reference to a string pointer where to store the reason why a mbuf is
+ * considered invalid.
+ * @return
+ * - 0 if no issue has been found, reason is left untouched.
+ * - -1 if a problem is detected, reason then points to a string describing
+ * the reason why the mbuf is deemed invalid.
+ */
+__rte_experimental
+int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
+ const char **reason);
+
#define MBUF_RAW_ALLOC_CHECK(m) do { \
RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
RTE_ASSERT((m)->next == NULL); \
} while (0)
/**
- * Allocate an unitialized mbuf from mempool *mp*.
+ * Allocate an uninitialized mbuf from mempool *mp*.
*
* This function can be used by PMDs (especially in RX functions) to
- * allocate an unitialized mbuf. The driver is responsible of
+ * allocate an uninitialized mbuf. The driver is responsible of
* initializing all the required fields. See rte_pktmbuf_reset().
* For standard needs, prefer rte_pktmbuf_alloc().
*
* The caller can expect that the following fields of the mbuf structure
- * are initialized: buf_addr, buf_physaddr, buf_len, refcnt=1, nb_segs=1,
+ * are initialized: buf_addr, buf_iova, buf_len, refcnt=1, nb_segs=1,
* next=NULL, pool, priv_size. The other fields must be initialized
* by the caller.
*
static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
- void *mb = NULL;
- if (rte_mempool_get(mp, &mb) < 0)
+ if (rte_mempool_get(mp, (void **)&m) < 0)
return NULL;
- m = (struct rte_mbuf *)mb;
MBUF_RAW_ALLOC_CHECK(m);
return m;
}
* @param m
* The mbuf to be freed.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mbuf_raw_free(struct rte_mbuf *m)
{
RTE_ASSERT(RTE_MBUF_DIRECT(m));
rte_mempool_put(m->pool, m);
}
-/* compat with older versions */
-__rte_deprecated
-static inline void
-__rte_mbuf_raw_free(struct rte_mbuf *m)
-{
- rte_mbuf_raw_free(m);
-}
-
-/* Operations on ctrl mbuf */
-
-/**
- * The control mbuf constructor.
- *
- * This function initializes some fields in an mbuf structure that are
- * not modified by the user once created (mbuf type, origin pool, buffer
- * start address, and so on). This function is given as a callback function
- * to rte_mempool_obj_iter() or rte_mempool_create() at pool creation time.
- *
- * @param mp
- * The mempool from which the mbuf is allocated.
- * @param opaque_arg
- * A pointer that can be used by the user to retrieve useful information
- * for mbuf initialization. This pointer is the opaque argument passed to
- * rte_mempool_obj_iter() or rte_mempool_create().
- * @param m
- * The mbuf to initialize.
- * @param i
- * The index of the mbuf in the pool table.
- */
-void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
- void *m, unsigned i);
-
-/**
- * Allocate a new mbuf (type is ctrl) from mempool *mp*.
- *
- * This new mbuf is initialized with data pointing to the beginning of
- * buffer, and with a length of zero.
- *
- * @param mp
- * The mempool from which the mbuf is allocated.
- * @return
- * - The pointer to the new mbuf on success.
- * - NULL if allocation failed.
- */
-#define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
-
-/**
- * Free a control mbuf back into its original mempool.
- *
- * @param m
- * The control mbuf to be freed.
- */
-#define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
-
-/**
- * A macro that returns the pointer to the carried data.
- *
- * The value that can be read or assigned.
- *
- * @param m
- * The control mbuf.
- */
-#define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
-
-/**
- * A macro that returns the length of the carried data.
- *
- * The value that can be read or assigned.
- *
- * @param m
- * The control mbuf.
- */
-#define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
-
-/**
- * Tests if an mbuf is a control mbuf
- *
- * @param m
- * The mbuf to be tested
- * @return
- * - True (1) if the mbuf is a control mbuf
- * - False(0) otherwise
- */
-static inline int
-rte_is_ctrlmbuf(struct rte_mbuf *m)
-{
- return !!(m->ol_flags & CTRL_MBUF_FLAG);
-}
-
-/* Operations on pkt mbuf */
-
/**
* The packet mbuf constructor.
*
unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
int socket_id);
+/**
+ * Create a mbuf pool with a given mempool ops name
+ *
+ * This function creates and initializes a packet mbuf pool. It is
+ * a wrapper to rte_mempool functions.
+ *
+ * @param name
+ * The name of the mbuf pool.
+ * @param n
+ * The number of elements in the mbuf pool. The optimum size (in terms
+ * of memory usage) for a mempool is when n is a power of two minus one:
+ * n = (2^q - 1).
+ * @param cache_size
+ * Size of the per-core object cache. See rte_mempool_create() for
+ * details.
+ * @param priv_size
+ * Size of application private are between the rte_mbuf structure
+ * and the data buffer. This value must be aligned to RTE_MBUF_PRIV_ALIGN.
+ * @param data_room_size
+ * Size of data buffer in each mbuf, including RTE_PKTMBUF_HEADROOM.
+ * @param socket_id
+ * The socket identifier where the memory should be allocated. The
+ * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
+ * reserved zone.
+ * @param ops_name
+ * The mempool ops name to be used for this mempool instead of
+ * default mempool. The value can be *NULL* to use default mempool.
+ * @return
+ * The pointer to the new allocated mempool, on success. NULL on error
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - EINVAL - cache size provided is too large, or priv_size is not aligned.
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_mempool *
+rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
+ unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
+ int socket_id, const char *ops_name);
+
/**
* Get the data room size of mbufs stored in a pktmbuf_pool
*
*/
static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
{
- m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
+ m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
+ (uint16_t)m->buf_len);
}
/**
* @param m
* The packet mbuf to be resetted.
*/
+#define MBUF_INVALID_PORT UINT16_MAX
+
static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
{
m->next = NULL;
m->vlan_tci = 0;
m->vlan_tci_outer = 0;
m->nb_segs = 1;
- m->port = 0xff;
+ m->port = MBUF_INVALID_PORT;
m->ol_flags = 0;
m->packet_type = 0;
* Array size
* @return
* - 0: Success
+ * - -ENOENT: Not enough entries in the mempool; no mbufs are retrieved.
*/
static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
struct rte_mbuf **mbufs, unsigned count)
return 0;
}
+/**
+ * Initialize shared data at the end of an external buffer before attaching
+ * to a mbuf by ``rte_pktmbuf_attach_extbuf()``. This is not a mandatory
+ * initialization but a helper function to simply spare a few bytes at the
+ * end of the buffer for shared data. If shared data is allocated
+ * separately, this should not be called but application has to properly
+ * initialize the shared data according to its need.
+ *
+ * Free callback and its argument is saved and the refcnt is set to 1.
+ *
+ * @warning
+ * The value of buf_len will be reduced to RTE_PTR_DIFF(shinfo, buf_addr)
+ * after this initialization. This shall be used for
+ * ``rte_pktmbuf_attach_extbuf()``
+ *
+ * @param buf_addr
+ * The pointer to the external buffer.
+ * @param [in,out] buf_len
+ * The pointer to length of the external buffer. Input value must be
+ * larger than the size of ``struct rte_mbuf_ext_shared_info`` and
+ * padding for alignment. If not enough, this function will return NULL.
+ * Adjusted buffer length will be returned through this pointer.
+ * @param free_cb
+ * Free callback function to call when the external buffer needs to be
+ * freed.
+ * @param fcb_opaque
+ * Argument for the free callback function.
+ *
+ * @return
+ * A pointer to the initialized shared data on success, return NULL
+ * otherwise.
+ */
+static inline struct rte_mbuf_ext_shared_info *
+rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
+ rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
+{
+ struct rte_mbuf_ext_shared_info *shinfo;
+ void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
+ void *addr;
+
+ addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
+ sizeof(uintptr_t));
+ if (addr <= buf_addr)
+ return NULL;
+
+ shinfo = (struct rte_mbuf_ext_shared_info *)addr;
+ shinfo->free_cb = free_cb;
+ shinfo->fcb_opaque = fcb_opaque;
+ rte_mbuf_ext_refcnt_set(shinfo, 1);
+
+ *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
+ return shinfo;
+}
+
+/**
+ * Attach an external buffer to a mbuf.
+ *
+ * User-managed anonymous buffer can be attached to an mbuf. When attaching
+ * it, corresponding free callback function and its argument should be
+ * provided via shinfo. This callback function will be called once all the
+ * mbufs are detached from the buffer (refcnt becomes zero).
+ *
+ * The headroom for the attaching mbuf will be set to zero and this can be
+ * properly adjusted after attachment. For example, ``rte_pktmbuf_adj()``
+ * or ``rte_pktmbuf_reset_headroom()`` might be used.
+ *
+ * More mbufs can be attached to the same external buffer by
+ * ``rte_pktmbuf_attach()`` once the external buffer has been attached by
+ * this API.
+ *
+ * Detachment can be done by either ``rte_pktmbuf_detach_extbuf()`` or
+ * ``rte_pktmbuf_detach()``.
+ *
+ * Memory for shared data must be provided and user must initialize all of
+ * the content properly, escpecially free callback and refcnt. The pointer
+ * of shared data will be stored in m->shinfo.
+ * ``rte_pktmbuf_ext_shinfo_init_helper`` can help to simply spare a few
+ * bytes at the end of buffer for the shared data, store free callback and
+ * its argument and set the refcnt to 1. The following is an example:
+ *
+ * struct rte_mbuf_ext_shared_info *shinfo =
+ * rte_pktmbuf_ext_shinfo_init_helper(buf_addr, &buf_len,
+ * free_cb, fcb_arg);
+ * rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo);
+ * rte_pktmbuf_reset_headroom(m);
+ * rte_pktmbuf_adj(m, data_len);
+ *
+ * Attaching an external buffer is quite similar to mbuf indirection in
+ * replacing buffer addresses and length of a mbuf, but a few differences:
+ * - When an indirect mbuf is attached, refcnt of the direct mbuf would be
+ * 2 as long as the direct mbuf itself isn't freed after the attachment.
+ * In such cases, the buffer area of a direct mbuf must be read-only. But
+ * external buffer has its own refcnt and it starts from 1. Unless
+ * multiple mbufs are attached to a mbuf having an external buffer, the
+ * external buffer is writable.
+ * - There's no need to allocate buffer from a mempool. Any buffer can be
+ * attached with appropriate free callback and its IO address.
+ * - Smaller metadata is required to maintain shared data such as refcnt.
+ *
+ * @param m
+ * The pointer to the mbuf.
+ * @param buf_addr
+ * The pointer to the external buffer.
+ * @param buf_iova
+ * IO address of the external buffer.
+ * @param buf_len
+ * The size of the external buffer.
+ * @param shinfo
+ * User-provided memory for shared data of the external buffer.
+ */
+static inline void
+rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
+ rte_iova_t buf_iova, uint16_t buf_len,
+ struct rte_mbuf_ext_shared_info *shinfo)
+{
+ /* mbuf should not be read-only */
+ RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
+ RTE_ASSERT(shinfo->free_cb != NULL);
+
+ m->buf_addr = buf_addr;
+ m->buf_iova = buf_iova;
+ m->buf_len = buf_len;
+
+ m->data_len = 0;
+ m->data_off = 0;
+
+ m->ol_flags |= EXT_ATTACHED_MBUF;
+ m->shinfo = shinfo;
+}
+
+/**
+ * Detach the external buffer attached to a mbuf, same as
+ * ``rte_pktmbuf_detach()``
+ *
+ * @param m
+ * The mbuf having external buffer.
+ */
+#define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
+
/**
* Attach packet mbuf to another packet mbuf.
*
- * After attachment we refer the mbuf we attached as 'indirect',
- * while mbuf we attached to as 'direct'.
- * The direct mbuf's reference counter is incremented.
+ * If the mbuf we are attaching to isn't a direct buffer and is attached to
+ * an external buffer, the mbuf being attached will be attached to the
+ * external buffer instead of mbuf indirection.
+ *
+ * Otherwise, the mbuf will be indirectly attached. After attachment we
+ * refer the mbuf we attached as 'indirect', while mbuf we attached to as
+ * 'direct'. The direct mbuf's reference counter is incremented.
*
* Right now, not supported:
* - attachment for already indirect mbuf (e.g. - mi has to be direct).
*/
static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
{
- struct rte_mbuf *md;
-
RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
rte_mbuf_refcnt_read(mi) == 1);
- /* if m is not direct, get the mbuf that embeds the data */
- if (RTE_MBUF_DIRECT(m))
- md = m;
- else
- md = rte_mbuf_from_indirect(m);
+ if (RTE_MBUF_HAS_EXTBUF(m)) {
+ rte_mbuf_ext_refcnt_update(m->shinfo, 1);
+ mi->ol_flags = m->ol_flags;
+ mi->shinfo = m->shinfo;
+ } else {
+ /* if m is not direct, get the mbuf that embeds the data */
+ rte_mbuf_refcnt_update(rte_mbuf_from_indirect(m), 1);
+ mi->priv_size = m->priv_size;
+ mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
+ }
- rte_mbuf_refcnt_update(md, 1);
- mi->priv_size = m->priv_size;
- mi->buf_physaddr = m->buf_physaddr;
+ mi->buf_iova = m->buf_iova;
mi->buf_addr = m->buf_addr;
mi->buf_len = m->buf_len;
mi->next = NULL;
mi->pkt_len = mi->data_len;
mi->nb_segs = 1;
- mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
mi->packet_type = m->packet_type;
mi->timestamp = m->timestamp;
}
/**
- * Detach an indirect packet mbuf.
+ * @internal used by rte_pktmbuf_detach().
+ *
+ * Decrement the reference counter of the external buffer. When the
+ * reference counter becomes 0, the buffer is freed by pre-registered
+ * callback.
+ */
+static inline void
+__rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
+{
+ RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
+ RTE_ASSERT(m->shinfo != NULL);
+
+ if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
+ m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
+}
+
+/**
+ * @internal used by rte_pktmbuf_detach().
+ *
+ * Decrement the direct mbuf's reference counter. When the reference
+ * counter becomes 0, the direct mbuf is freed.
+ */
+static inline void
+__rte_pktmbuf_free_direct(struct rte_mbuf *m)
+{
+ struct rte_mbuf *md;
+
+ RTE_ASSERT(RTE_MBUF_CLONED(m));
+
+ md = rte_mbuf_from_indirect(m);
+
+ if (rte_mbuf_refcnt_update(md, -1) == 0) {
+ md->next = NULL;
+ md->nb_segs = 1;
+ rte_mbuf_refcnt_set(md, 1);
+ rte_mbuf_raw_free(md);
+ }
+}
+
+/**
+ * Detach a packet mbuf from external buffer or direct buffer.
*
+ * - decrement refcnt and free the external/direct buffer if refcnt
+ * becomes zero.
* - restore original mbuf address and length values.
* - reset pktmbuf data and data_len to their default values.
- * - decrement the direct mbuf's reference counter. When the
- * reference counter becomes 0, the direct mbuf is freed.
*
* All other fields of the given packet mbuf will be left intact.
*
*/
static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
{
- struct rte_mbuf *md = rte_mbuf_from_indirect(m);
struct rte_mempool *mp = m->pool;
- uint32_t mbuf_size, buf_len, priv_size;
+ uint32_t mbuf_size, buf_len;
+ uint16_t priv_size;
+
+ if (RTE_MBUF_HAS_EXTBUF(m))
+ __rte_pktmbuf_free_extbuf(m);
+ else
+ __rte_pktmbuf_free_direct(m);
priv_size = rte_pktmbuf_priv_size(mp);
- mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
buf_len = rte_pktmbuf_data_room_size(mp);
m->priv_size = priv_size;
m->buf_addr = (char *)m + mbuf_size;
- m->buf_physaddr = rte_mempool_virt2phy(mp, m) + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
m->buf_len = (uint16_t)buf_len;
rte_pktmbuf_reset_headroom(m);
m->data_len = 0;
m->ol_flags = 0;
-
- if (rte_mbuf_refcnt_update(md, -1) == 0) {
- md->next = NULL;
- md->nb_segs = 1;
- rte_mbuf_refcnt_set(md, 1);
- rte_mbuf_raw_free(md);
- }
}
/**
* - (m) if it is the last reference. It can be recycled or freed.
* - (NULL) if the mbuf still has remaining references on it.
*/
-__attribute__((always_inline))
-static inline struct rte_mbuf *
+static __rte_always_inline struct rte_mbuf *
rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 0);
if (likely(rte_mbuf_refcnt_read(m) == 1)) {
- if (RTE_MBUF_INDIRECT(m))
+ if (!RTE_MBUF_DIRECT(m))
rte_pktmbuf_detach(m);
if (m->next != NULL) {
return m;
- } else if (rte_atomic16_add_return(&m->refcnt_atomic, -1) == 0) {
-
+ } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
- if (RTE_MBUF_INDIRECT(m))
+ if (!RTE_MBUF_DIRECT(m))
rte_pktmbuf_detach(m);
if (m->next != NULL) {
return NULL;
}
-/* deprecated, replaced by rte_pktmbuf_prefree_seg() */
-__rte_deprecated
-static inline struct rte_mbuf *
-__rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
-{
- return rte_pktmbuf_prefree_seg(m);
-}
-
/**
* Free a segment of a packet mbuf into its original mempool.
*
* @param m
* The packet mbuf segment to be freed.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_pktmbuf_free_seg(struct rte_mbuf *m)
{
m = rte_pktmbuf_prefree_seg(m);
* segment is added back into its original mempool.
*
* @param m
- * The packet mbuf to be freed.
+ * The packet mbuf to be freed. If NULL, the function does nothing.
*/
static inline void rte_pktmbuf_free(struct rte_mbuf *m)
{
struct rte_mbuf *m_next;
- __rte_mbuf_sanity_check(m, 1);
+ if (m != NULL)
+ __rte_mbuf_sanity_check(m, 1);
while (m != NULL) {
m_next = m->next;
{
struct rte_mbuf *mc, *mi, **prev;
uint32_t pktlen;
- uint8_t nseg;
+ uint16_t nseg;
if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
return NULL;
*/
static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
{
- __rte_mbuf_sanity_check(m, 1);
+ __rte_mbuf_sanity_check(m, 0);
return m->data_off;
}
*/
static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
{
- __rte_mbuf_sanity_check(m, 1);
+ __rte_mbuf_sanity_check(m, 0);
return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
m->data_len);
}
*/
static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
{
- struct rte_mbuf *m2 = (struct rte_mbuf *)m;
-
__rte_mbuf_sanity_check(m, 1);
- while (m2->next != NULL)
- m2 = m2->next;
- return m2;
+ while (m->next != NULL)
+ m = m->next;
+ return m;
}
/**
#define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
/**
- * A macro that returns the physical address that points to an offset of the
+ * A macro that returns the IO address that points to an offset of the
* start of the data in the mbuf
*
* @param m
* @param o
* The offset into the data to calculate address from.
*/
+#define rte_pktmbuf_iova_offset(m, o) \
+ (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
+
+/* deprecated */
#define rte_pktmbuf_mtophys_offset(m, o) \
- (phys_addr_t)((m)->buf_physaddr + (m)->data_off + (o))
+ rte_pktmbuf_iova_offset(m, o)
/**
- * A macro that returns the physical address that points to the start of the
+ * A macro that returns the IO address that points to the start of the
* data in the mbuf
*
* @param m
* The packet mbuf.
*/
-#define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0)
+#define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
+
+/* deprecated */
+#define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
/**
* A macro that returns the length of the packet.
if (unlikely(len > rte_pktmbuf_headroom(m)))
return NULL;
- m->data_off -= len;
+ /* NB: elaborating the subtraction like this instead of using
+ * -= allows us to ensure the result type is uint16_t
+ * avoiding compiler warnings on gcc 8.1 at least */
+ m->data_off = (uint16_t)(m->data_off - len);
m->data_len = (uint16_t)(m->data_len + len);
m->pkt_len = (m->pkt_len + len);
if (unlikely(len > m->data_len))
return NULL;
+ /* NB: elaborating the addition like this instead of using
+ * += allows us to ensure the result type is uint16_t
+ * avoiding compiler warnings on gcc 8.1 at least */
m->data_len = (uint16_t)(m->data_len - len);
- m->data_off += len;
+ m->data_off = (uint16_t)(m->data_off + len);
m->pkt_len = (m->pkt_len - len);
return (char *)m->buf_addr + m->data_off;
}
* @param len
* The amount of bytes to read.
* @param buf
- * The buffer where data is copied if it is not contigous in mbuf
+ * The buffer where data is copied if it is not contiguous in mbuf
* data. Its length should be at least equal to the len parameter.
* @return
* The pointer to the data, either in the mbuf if it is contiguous,
*
* @return
* - 0, on success.
- * - -EOVERFLOW, if the chain is full (256 entries)
+ * - -EOVERFLOW, if the chain segment limit exceeded
*/
static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
{
struct rte_mbuf *cur_tail;
/* Check for number-of-segments-overflow */
- if (head->nb_segs + tail->nb_segs >= 1 << (sizeof(head->nb_segs) * 8))
+ if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
return -EOVERFLOW;
/* Chain 'tail' onto the old tail */
cur_tail = rte_pktmbuf_lastseg(head);
cur_tail->next = tail;
- /* accumulate number of segments and total length. */
- head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs);
+ /* accumulate number of segments and total length.
+ * NB: elaborating the addition like this instead of using
+ * -= allows us to ensure the result type is uint16_t
+ * avoiding compiler warnings on gcc 8.1 at least */
+ head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
head->pkt_len += tail->pkt_len;
/* pkt_len is only set in the head */
return 0;
}
+/*
+ * @warning
+ * @b EXPERIMENTAL: This API may change without prior notice.
+ *
+ * For given input values generate raw tx_offload value.
+ * Note that it is caller responsibility to make sure that input parameters
+ * don't exceed maximum bit-field values.
+ * @param il2
+ * l2_len value.
+ * @param il3
+ * l3_len value.
+ * @param il4
+ * l4_len value.
+ * @param tso
+ * tso_segsz value.
+ * @param ol3
+ * outer_l3_len value.
+ * @param ol2
+ * outer_l2_len value.
+ * @param unused
+ * unused value.
+ * @return
+ * raw tx_offload value.
+ */
+static __rte_always_inline uint64_t
+rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
+ uint64_t ol3, uint64_t ol2, uint64_t unused)
+{
+ return il2 << RTE_MBUF_L2_LEN_OFS |
+ il3 << RTE_MBUF_L3_LEN_OFS |
+ il4 << RTE_MBUF_L4_LEN_OFS |
+ tso << RTE_MBUF_TSO_SEGSZ_OFS |
+ ol3 << RTE_MBUF_OUTL3_LEN_OFS |
+ ol2 << RTE_MBUF_OUTL2_LEN_OFS |
+ unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
+}
+
/**
* Validate general requirements for Tx offload in mbuf.
*
rte_validate_tx_offload(const struct rte_mbuf *m)
{
uint64_t ol_flags = m->ol_flags;
- uint64_t inner_l3_offset = m->l2_len;
/* Does packet set any of available offloads? */
if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
return 0;
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
-
- /* Headers are fragmented */
- if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
- return -ENOTSUP;
-
/* IP checksum can be counted only for IPv4 packet */
if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
return -EINVAL;
static inline int
rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
{
- int seg_len, copy_len;
+ size_t seg_len, copy_len;
struct rte_mbuf *m;
struct rte_mbuf *m_next;
char *buffer;
*/
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
+/**
+ * Get the value of mbuf sched queue_id field.
+ */
+static inline uint32_t
+rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
+{
+ return m->hash.sched.queue_id;
+}
+
+/**
+ * Get the value of mbuf sched traffic_class field.
+ */
+static inline uint8_t
+rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
+{
+ return m->hash.sched.traffic_class;
+}
+
+/**
+ * Get the value of mbuf sched color field.
+ */
+static inline uint8_t
+rte_mbuf_sched_color_get(const struct rte_mbuf *m)
+{
+ return m->hash.sched.color;
+}
+
+/**
+ * Get the values of mbuf sched queue_id, traffic_class and color.
+ *
+ * @param m
+ * Mbuf to read
+ * @param queue_id
+ * Returns the queue id
+ * @param traffic_class
+ * Returns the traffic class id
+ * @param color
+ * Returns the colour id
+ */
+static inline void
+rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
+ uint8_t *traffic_class,
+ uint8_t *color)
+{
+ struct rte_mbuf_sched sched = m->hash.sched;
+
+ *queue_id = sched.queue_id;
+ *traffic_class = sched.traffic_class;
+ *color = sched.color;
+}
+
+/**
+ * Set the mbuf sched queue_id to the defined value.
+ */
+static inline void
+rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
+{
+ m->hash.sched.queue_id = queue_id;
+}
+
+/**
+ * Set the mbuf sched traffic_class id to the defined value.
+ */
+static inline void
+rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
+{
+ m->hash.sched.traffic_class = traffic_class;
+}
+
+/**
+ * Set the mbuf sched color id to the defined value.
+ */
+static inline void
+rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
+{
+ m->hash.sched.color = color;
+}
+
+/**
+ * Set the mbuf sched queue_id, traffic_class and color.
+ *
+ * @param m
+ * Mbuf to set
+ * @param queue_id
+ * Queue id value to be set
+ * @param traffic_class
+ * Traffic class id value to be set
+ * @param color
+ * Color id to be set
+ */
+static inline void
+rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id,
+ uint8_t traffic_class,
+ uint8_t color)
+{
+ m->hash.sched = (struct rte_mbuf_sched){
+ .queue_id = queue_id,
+ .traffic_class = traffic_class,
+ .color = color,
+ .reserved = 0,
+ };
+}
+
#ifdef __cplusplus
}
#endif