1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
13 * The mbuf library provides the ability to create and destroy buffers
14 * that may be used by the RTE application to store message
15 * buffers. The message buffers are stored in a mempool, using the
16 * RTE mempool library.
18 * The preferred way to create a mbuf pool is to use
19 * rte_pktmbuf_pool_create(). However, in some situations, an
20 * application may want to have more control (ex: populate the pool with
21 * specific memory), in this case it is possible to use functions from
22 * rte_mempool. See how rte_pktmbuf_pool_create() is implemented for
25 * This library provides an API to allocate/free packet mbufs, which are
26 * used to carry network packets.
28 * To understand the concepts of packet buffers or mbufs, you
29 * should read "TCP/IP Illustrated, Volume 2: The Implementation,
30 * Addison-Wesley, 1995, ISBN 0-201-63354-X from Richard Stevens"
31 * http://www.kohala.com/start/tcpipiv2.html
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_byteorder.h>
44 #include <rte_mbuf_ptype.h>
51 * Packet Offload Features Flags. It also carry packet type information.
52 * Critical resources. Both rx/tx shared these bits. Be cautious on any change
54 * - RX flags start at bit position zero, and get added to the left of previous
56 * - The most-significant 3 bits are reserved for generic mbuf flags
57 * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
58 * added to the right of the previously defined flags i.e. they should count
59 * downwards, not upwards.
61 * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
62 * rte_get_tx_ol_flag_name().
66 * The RX packet is a 802.1q VLAN packet, and the tci has been
67 * saved in in mbuf->vlan_tci.
68 * If the flag PKT_RX_VLAN_STRIPPED is also present, the VLAN
69 * header has been stripped from mbuf data, else it is still
72 #define PKT_RX_VLAN (1ULL << 0)
74 #define PKT_RX_RSS_HASH (1ULL << 1) /**< RX packet with RSS hash result. */
75 #define PKT_RX_FDIR (1ULL << 2) /**< RX packet with FDIR match indicate. */
79 * Checking this flag alone is deprecated: check the 2 bits of
80 * PKT_RX_L4_CKSUM_MASK.
81 * This flag was set when the L4 checksum of a packet was detected as
82 * wrong by the hardware.
84 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
88 * Checking this flag alone is deprecated: check the 2 bits of
89 * PKT_RX_IP_CKSUM_MASK.
90 * This flag was set when the IP checksum of a packet was detected as
91 * wrong by the hardware.
93 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
95 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5) /**< External IP header checksum error. */
98 * A vlan has been stripped by the hardware and its tci is saved in
99 * mbuf->vlan_tci. This can only happen if vlan stripping is enabled
100 * in the RX configuration of the PMD.
101 * When PKT_RX_VLAN_STRIPPED is set, PKT_RX_VLAN must also be set.
103 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
106 * Mask of bits used to determine the status of RX IP checksum.
107 * - PKT_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum
108 * - PKT_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong
109 * - PKT_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid
110 * - PKT_RX_IP_CKSUM_NONE: the IP checksum is not correct in the packet
111 * data, but the integrity of the IP header is verified.
113 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
115 #define PKT_RX_IP_CKSUM_UNKNOWN 0
116 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
117 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
118 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
121 * Mask of bits used to determine the status of RX L4 checksum.
122 * - PKT_RX_L4_CKSUM_UNKNOWN: no information about the RX L4 checksum
123 * - PKT_RX_L4_CKSUM_BAD: the L4 checksum in the packet is wrong
124 * - PKT_RX_L4_CKSUM_GOOD: the L4 checksum in the packet is valid
125 * - PKT_RX_L4_CKSUM_NONE: the L4 checksum is not correct in the packet
126 * data, but the integrity of the L4 data is verified.
128 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
130 #define PKT_RX_L4_CKSUM_UNKNOWN 0
131 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
132 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
133 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
135 #define PKT_RX_IEEE1588_PTP (1ULL << 9) /**< RX IEEE1588 L2 Ethernet PT Packet. */
136 #define PKT_RX_IEEE1588_TMST (1ULL << 10) /**< RX IEEE1588 L2/L4 timestamped packet.*/
137 #define PKT_RX_FDIR_ID (1ULL << 13) /**< FD id reported if FDIR match. */
138 #define PKT_RX_FDIR_FLX (1ULL << 14) /**< Flexible bytes reported if FDIR match. */
141 * The 2 vlans have been stripped by the hardware and their tci are
142 * saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer).
143 * This can only happen if vlan stripping is enabled in the RX
144 * configuration of the PMD.
145 * When PKT_RX_QINQ_STRIPPED is set, the flags (PKT_RX_VLAN |
146 * PKT_RX_VLAN_STRIPPED | PKT_RX_QINQ) must also be set.
148 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
151 * When packets are coalesced by a hardware or virtual driver, this flag
152 * can be set in the RX mbuf, meaning that the m->tso_segsz field is
153 * valid and is set to the segment size of original packets.
155 #define PKT_RX_LRO (1ULL << 16)
158 * Indicate that the timestamp field in the mbuf is valid.
160 #define PKT_RX_TIMESTAMP (1ULL << 17)
163 * Indicate that security offload processing was applied on the RX packet.
165 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
168 * Indicate that security offload processing failed on the RX packet.
170 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
173 * The RX packet is a double VLAN, and the outer tci has been
174 * saved in in mbuf->vlan_tci_outer. If PKT_RX_QINQ set, PKT_RX_VLAN
175 * also should be set and inner tci should be saved to mbuf->vlan_tci.
176 * If the flag PKT_RX_QINQ_STRIPPED is also present, both VLANs
177 * headers have been stripped from mbuf data, else they are still
180 #define PKT_RX_QINQ (1ULL << 20)
183 * Mask of bits used to determine the status of outer RX L4 checksum.
184 * - PKT_RX_OUTER_L4_CKSUM_UNKNOWN: no info about the outer RX L4 checksum
185 * - PKT_RX_OUTER_L4_CKSUM_BAD: the outer L4 checksum in the packet is wrong
186 * - PKT_RX_OUTER_L4_CKSUM_GOOD: the outer L4 checksum in the packet is valid
187 * - PKT_RX_OUTER_L4_CKSUM_INVALID: invalid outer L4 checksum state.
189 * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
190 * HW capability, At minimum, the PMD should support
191 * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states
192 * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
194 #define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22))
196 #define PKT_RX_OUTER_L4_CKSUM_UNKNOWN 0
197 #define PKT_RX_OUTER_L4_CKSUM_BAD (1ULL << 21)
198 #define PKT_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22)
199 #define PKT_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22))
201 /* add new RX flags here, don't forget to update PKT_FIRST_FREE */
203 #define PKT_FIRST_FREE (1ULL << 23)
204 #define PKT_LAST_FREE (1ULL << 39)
206 /* add new TX flags here, don't forget to update PKT_LAST_FREE */
209 * Indicate that the metadata field in the mbuf is in use.
211 #define PKT_TX_METADATA (1ULL << 40)
214 * Outer UDP checksum offload flag. This flag is used for enabling
215 * outer UDP checksum in PMD. To use outer UDP checksum, the user needs to
216 * 1) Enable the following in mbuf,
217 * a) Fill outer_l2_len and outer_l3_len in mbuf.
218 * b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
219 * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
220 * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
222 #define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41)
225 * UDP Fragmentation Offload flag. This flag is used for enabling UDP
226 * fragmentation in SW or in HW. When use UFO, mbuf->tso_segsz is used
227 * to store the MSS of UDP fragments.
229 #define PKT_TX_UDP_SEG (1ULL << 42)
232 * Request security offload processing on the TX packet.
234 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
237 * Offload the MACsec. This flag must be set by the application to enable
238 * this offload feature for a packet to be transmitted.
240 #define PKT_TX_MACSEC (1ULL << 44)
243 * Bits 45:48 used for the tunnel type.
244 * The tunnel type must be specified for TSO or checksum on the inner part
246 * These flags can be used with PKT_TX_TCP_SEG for TSO, or PKT_TX_xxx_CKSUM.
247 * The mbuf fields for inner and outer header lengths are required:
248 * outer_l2_len, outer_l3_len, l2_len, l3_len, l4_len and tso_segsz for TSO.
250 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
251 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
252 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
253 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
254 /** TX packet with MPLS-in-UDP RFC 7510 header. */
255 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
256 #define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
258 * Generic IP encapsulated tunnel type, used for TSO and checksum offload.
259 * It can be used for tunnels which are not standards or listed above.
260 * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
261 * or PKT_TX_TUNNEL_IPIP if possible.
262 * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
263 * Outer and inner checksums are done according to the existing flags like
265 * Specific tunnel headers that contain payload length, sequence id
266 * or checksum are not expected to be updated.
268 #define PKT_TX_TUNNEL_IP (0xDULL << 45)
270 * Generic UDP encapsulated tunnel type, used for TSO and checksum offload.
271 * UDP tunnel type implies outer IP layer.
272 * It can be used for tunnels which are not standards or listed above.
273 * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
275 * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
276 * Outer and inner checksums are done according to the existing flags like
278 * Specific tunnel headers that contain payload length, sequence id
279 * or checksum are not expected to be updated.
281 #define PKT_TX_TUNNEL_UDP (0xEULL << 45)
282 /* add new TX TUNNEL type here */
283 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
286 * Double VLAN insertion (QinQ) request to driver, driver may offload the
287 * insertion based on device capability.
288 * mbuf 'vlan_tci' & 'vlan_tci_outer' must be valid when this flag is set.
290 #define PKT_TX_QINQ (1ULL << 49)
291 /* this old name is deprecated */
292 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
295 * TCP segmentation offload. To enable this offload feature for a
296 * packet to be transmitted on hardware supporting TSO:
297 * - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
299 * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
300 * - if it's IPv4, set the PKT_TX_IP_CKSUM flag
301 * - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz
303 #define PKT_TX_TCP_SEG (1ULL << 50)
305 #define PKT_TX_IEEE1588_TMST (1ULL << 51) /**< TX IEEE1588 packet to timestamp. */
308 * Bits 52+53 used for L4 packet type with checksum enabled: 00: Reserved,
309 * 01: TCP checksum, 10: SCTP checksum, 11: UDP checksum. To use hardware
310 * L4 checksum offload, the user needs to:
311 * - fill l2_len and l3_len in mbuf
312 * - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM
313 * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
315 #define PKT_TX_L4_NO_CKSUM (0ULL << 52) /**< Disable L4 cksum of TX pkt. */
316 #define PKT_TX_TCP_CKSUM (1ULL << 52) /**< TCP cksum of TX pkt. computed by NIC. */
317 #define PKT_TX_SCTP_CKSUM (2ULL << 52) /**< SCTP cksum of TX pkt. computed by NIC. */
318 #define PKT_TX_UDP_CKSUM (3ULL << 52) /**< UDP cksum of TX pkt. computed by NIC. */
319 #define PKT_TX_L4_MASK (3ULL << 52) /**< Mask for L4 cksum offload request. */
322 * Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should
323 * also be set by the application, although a PMD will only check
325 * - fill the mbuf offload information: l2_len, l3_len
327 #define PKT_TX_IP_CKSUM (1ULL << 54)
330 * Packet is IPv4. This flag must be set when using any offload feature
331 * (TSO, L3 or L4 checksum) to tell the NIC that the packet is an IPv4
332 * packet. If the packet is a tunneled packet, this flag is related to
335 #define PKT_TX_IPV4 (1ULL << 55)
338 * Packet is IPv6. This flag must be set when using an offload feature
339 * (TSO or L4 checksum) to tell the NIC that the packet is an IPv6
340 * packet. If the packet is a tunneled packet, this flag is related to
343 #define PKT_TX_IPV6 (1ULL << 56)
346 * VLAN tag insertion request to driver, driver may offload the insertion
347 * based on the device capability.
348 * mbuf 'vlan_tci' field must be valid when this flag is set.
350 #define PKT_TX_VLAN (1ULL << 57)
351 /* this old name is deprecated */
352 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
355 * Offload the IP checksum of an external header in the hardware. The
356 * flag PKT_TX_OUTER_IPV4 should also be set by the application, although
357 * a PMD will only check PKT_TX_OUTER_IP_CKSUM.
358 * - fill the mbuf offload information: outer_l2_len, outer_l3_len
360 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
363 * Packet outer header is IPv4. This flag must be set when using any
364 * outer offload feature (L3 or L4 checksum) to tell the NIC that the
365 * outer header of the tunneled packet is an IPv4 packet.
367 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
370 * Packet outer header is IPv6. This flag must be set when using any
371 * outer offload feature (L4 checksum) to tell the NIC that the outer
372 * header of the tunneled packet is an IPv6 packet.
374 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
377 * Bitmask of all supported packet Tx offload features flags,
378 * which can be set for packet.
380 #define PKT_TX_OFFLOAD_MASK ( \
381 PKT_TX_OUTER_IPV6 | \
382 PKT_TX_OUTER_IPV4 | \
383 PKT_TX_OUTER_IP_CKSUM | \
389 PKT_TX_IEEE1588_TMST | \
392 PKT_TX_TUNNEL_MASK | \
394 PKT_TX_SEC_OFFLOAD | \
396 PKT_TX_OUTER_UDP_CKSUM | \
400 * Mbuf having an external buffer attached. shinfo in mbuf must be filled.
402 #define EXT_ATTACHED_MBUF (1ULL << 61)
404 #define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */
406 /** Alignment constraint of mbuf private area. */
407 #define RTE_MBUF_PRIV_ALIGN 8
410 * Get the name of a RX offload flag
413 * The mask describing the flag.
415 * The name of this flag, or NULL if it's not a valid RX flag.
417 const char *rte_get_rx_ol_flag_name(uint64_t mask);
420 * Dump the list of RX offload flags in a buffer
423 * The mask describing the RX flags.
427 * The length of the buffer.
429 * 0 on success, (-1) on error.
431 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
434 * Get the name of a TX offload flag
437 * The mask describing the flag. Usually only one bit must be set.
438 * Several bits can be given if they belong to the same mask.
439 * Ex: PKT_TX_L4_MASK.
441 * The name of this flag, or NULL if it's not a valid TX flag.
443 const char *rte_get_tx_ol_flag_name(uint64_t mask);
446 * Dump the list of TX offload flags in a buffer
449 * The mask describing the TX flags.
453 * The length of the buffer.
455 * 0 on success, (-1) on error.
457 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
460 * Some NICs need at least 2KB buffer to RX standard Ethernet frame without
461 * splitting it into multiple segments.
462 * So, for mbufs that planned to be involved into RX/TX, the recommended
463 * minimal buffer length is 2KB + RTE_PKTMBUF_HEADROOM.
465 #define RTE_MBUF_DEFAULT_DATAROOM 2048
466 #define RTE_MBUF_DEFAULT_BUF_SIZE \
467 (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
469 /* define a set of marker types that can be used to refer to set points in the
472 typedef void *MARKER[0]; /**< generic marker for a point in a structure */
474 typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
476 typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
477 * with a single assignment */
479 struct rte_mbuf_sched {
480 uint32_t queue_id; /**< Queue ID. */
481 uint8_t traffic_class;
482 /**< Traffic class ID. Traffic class 0
483 * is the highest priority traffic class.
486 /**< Color. @see enum rte_color.*/
487 uint16_t reserved; /**< Reserved. */
488 }; /**< Hierarchical scheduler */
491 * enum for the tx_offload bit-fields lengths and offsets.
492 * defines the layout of rte_mbuf tx_offload field.
495 RTE_MBUF_L2_LEN_BITS = 7,
496 RTE_MBUF_L3_LEN_BITS = 9,
497 RTE_MBUF_L4_LEN_BITS = 8,
498 RTE_MBUF_TSO_SEGSZ_BITS = 16,
499 RTE_MBUF_OUTL3_LEN_BITS = 9,
500 RTE_MBUF_OUTL2_LEN_BITS = 7,
501 RTE_MBUF_TXOFLD_UNUSED_BITS = sizeof(uint64_t) * CHAR_BIT -
502 RTE_MBUF_L2_LEN_BITS -
503 RTE_MBUF_L3_LEN_BITS -
504 RTE_MBUF_L4_LEN_BITS -
505 RTE_MBUF_TSO_SEGSZ_BITS -
506 RTE_MBUF_OUTL3_LEN_BITS -
507 RTE_MBUF_OUTL2_LEN_BITS,
508 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
509 RTE_MBUF_L2_LEN_OFS =
510 sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_L2_LEN_BITS,
511 RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS - RTE_MBUF_L3_LEN_BITS,
512 RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS - RTE_MBUF_L4_LEN_BITS,
513 RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS - RTE_MBUF_TSO_SEGSZ_BITS,
514 RTE_MBUF_OUTL3_LEN_OFS =
515 RTE_MBUF_TSO_SEGSZ_OFS - RTE_MBUF_OUTL3_LEN_BITS,
516 RTE_MBUF_OUTL2_LEN_OFS =
517 RTE_MBUF_OUTL3_LEN_OFS - RTE_MBUF_OUTL2_LEN_BITS,
518 RTE_MBUF_TXOFLD_UNUSED_OFS =
519 RTE_MBUF_OUTL2_LEN_OFS - RTE_MBUF_TXOFLD_UNUSED_BITS,
521 RTE_MBUF_L2_LEN_OFS = 0,
522 RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS,
523 RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS,
524 RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS,
525 RTE_MBUF_OUTL3_LEN_OFS =
526 RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS,
527 RTE_MBUF_OUTL2_LEN_OFS =
528 RTE_MBUF_OUTL3_LEN_OFS + RTE_MBUF_OUTL3_LEN_BITS,
529 RTE_MBUF_TXOFLD_UNUSED_OFS =
530 RTE_MBUF_OUTL2_LEN_OFS + RTE_MBUF_OUTL2_LEN_BITS,
535 * The generic rte_mbuf, containing a packet mbuf.
540 void *buf_addr; /**< Virtual address of segment buffer. */
542 * Physical address of segment buffer.
543 * Force alignment to 8-bytes, so as to ensure we have the exact
544 * same mbuf cacheline0 layout for 32-bit and 64-bit. This makes
545 * working on vector drivers easier.
550 rte_iova_t buf_physaddr; /**< deprecated */
551 } __rte_aligned(sizeof(rte_iova_t));
553 /* next 8 bytes are initialised on RX descriptor rearm */
558 * Reference counter. Its size should at least equal to the size
559 * of port field (16 bits), to support zero-copy broadcast.
560 * It should only be accessed using the following functions:
561 * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and
562 * rte_mbuf_refcnt_set(). The functionality of these functions (atomic,
563 * or non-atomic) is controlled by the CONFIG_RTE_MBUF_REFCNT_ATOMIC
568 rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */
569 uint16_t refcnt; /**< Non-atomically accessed refcnt */
571 uint16_t nb_segs; /**< Number of segments. */
573 /** Input port (16 bits to support more than 256 virtual ports).
574 * The event eth Tx adapter uses this field to specify the output port.
578 uint64_t ol_flags; /**< Offload features. */
580 /* remaining bytes are set on RX when pulling packet from descriptor */
581 MARKER rx_descriptor_fields1;
584 * The packet type, which is the combination of outer/inner L2, L3, L4
585 * and tunnel types. The packet_type is about data really present in the
586 * mbuf. Example: if vlan stripping is enabled, a received vlan packet
587 * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
588 * vlan is stripped from the data.
592 uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */
594 uint32_t l2_type:4; /**< (Outer) L2 type. */
595 uint32_t l3_type:4; /**< (Outer) L3 type. */
596 uint32_t l4_type:4; /**< (Outer) L4 type. */
597 uint32_t tun_type:4; /**< Tunnel type. */
600 uint8_t inner_esp_next_proto;
601 /**< ESP next protocol type, valid if
602 * RTE_PTYPE_TUNNEL_ESP tunnel type is set
607 uint8_t inner_l2_type:4;
608 /**< Inner L2 type. */
609 uint8_t inner_l3_type:4;
610 /**< Inner L3 type. */
613 uint32_t inner_l4_type:4; /**< Inner L4 type. */
617 uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
618 uint16_t data_len; /**< Amount of data in segment buffer. */
619 /** VLAN TCI (CPU order), valid if PKT_RX_VLAN is set. */
625 uint32_t rss; /**< RSS hash result if RSS enabled */
633 /**< Second 4 flexible bytes */
636 /**< First 4 flexible bytes or FD ID, dependent
637 * on PKT_RX_FDIR_* flag in ol_flags.
639 } fdir; /**< Filter identifier if FDIR enabled */
640 struct rte_mbuf_sched sched;
641 /**< Hierarchical scheduler : 8 bytes */
646 /**< The event eth Tx adapter uses this field
647 * to store Tx queue id.
648 * @see rte_event_eth_tx_adapter_txq_set()
650 } txadapter; /**< Eventdev ethdev Tx adapter */
651 /**< User defined tags. See rte_distributor_process() */
653 } hash; /**< hash information */
656 * Application specific metadata value
657 * for egress flow rule match.
658 * Valid if PKT_TX_METADATA is set.
659 * Located here to allow conjunct use
660 * with hash.sched.hi.
662 uint32_t tx_metadata;
667 /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ is set. */
668 uint16_t vlan_tci_outer;
670 uint16_t buf_len; /**< Length of segment buffer. */
672 /** Valid if PKT_RX_TIMESTAMP is set. The unit and time reference
673 * are not normalized but are always the same for a given port.
674 * Some devices allow to query rte_eth_read_clock that will return the
675 * current device timestamp.
679 /* second cache line - fields only used in slow path or on TX */
680 MARKER cacheline1 __rte_cache_min_aligned;
684 void *userdata; /**< Can be used for external metadata */
685 uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */
688 struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
689 struct rte_mbuf *next; /**< Next segment of scattered packet. */
691 /* fields to support TX offloads */
694 uint64_t tx_offload; /**< combined for easy fetch */
697 uint64_t l2_len:RTE_MBUF_L2_LEN_BITS;
698 /**< L2 (MAC) Header Length for non-tunneling pkt.
699 * Outer_L4_len + ... + Inner_L2_len for tunneling pkt.
701 uint64_t l3_len:RTE_MBUF_L3_LEN_BITS;
702 /**< L3 (IP) Header Length. */
703 uint64_t l4_len:RTE_MBUF_L4_LEN_BITS;
704 /**< L4 (TCP/UDP) Header Length. */
705 uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS;
706 /**< TCP TSO segment size */
709 * Fields for Tx offloading of tunnels.
710 * These are undefined for packets which don't request
711 * any tunnel offloads (outer IP or UDP checksum,
714 * PMDs should not use these fields unconditionally
715 * when calculating offsets.
717 * Applications are expected to set appropriate tunnel
718 * offload flags when they fill in these fields.
720 uint64_t outer_l3_len:RTE_MBUF_OUTL3_LEN_BITS;
721 /**< Outer L3 (IP) Hdr Length. */
722 uint64_t outer_l2_len:RTE_MBUF_OUTL2_LEN_BITS;
723 /**< Outer L2 (MAC) Hdr Length. */
725 /* uint64_t unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */
729 /** Size of the application private data. In case of an indirect
730 * mbuf, it stores the direct mbuf private data size. */
733 /** Timesync flags for use with IEEE1588. */
736 /** Sequence number. See also rte_reorder_insert(). */
739 /** Shared data for external buffer attached to mbuf. See
740 * rte_pktmbuf_attach_extbuf().
742 struct rte_mbuf_ext_shared_info *shinfo;
744 uint64_t dynfield1; /**< Reserved for dynamic fields. */
745 uint64_t dynfield2; /**< Reserved for dynamic fields. */
746 } __rte_cache_aligned;
749 * Function typedef of callback to free externally attached buffer.
751 typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque);
754 * Shared data at the end of an external buffer.
756 struct rte_mbuf_ext_shared_info {
757 rte_mbuf_extbuf_free_callback_t free_cb; /**< Free callback function */
758 void *fcb_opaque; /**< Free callback argument */
759 rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */
762 /**< Maximum number of nb_segs allowed. */
763 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
766 * Prefetch the first part of the mbuf
768 * The first 64 bytes of the mbuf corresponds to fields that are used early
769 * in the receive path. If the cache line of the architecture is higher than
770 * 64B, the second part will also be prefetched.
773 * The pointer to the mbuf.
776 rte_mbuf_prefetch_part1(struct rte_mbuf *m)
778 rte_prefetch0(&m->cacheline0);
782 * Prefetch the second part of the mbuf
784 * The next 64 bytes of the mbuf corresponds to fields that are used in the
785 * transmit path. If the cache line of the architecture is higher than 64B,
786 * this function does nothing as it is expected that the full mbuf is
790 * The pointer to the mbuf.
793 rte_mbuf_prefetch_part2(struct rte_mbuf *m)
795 #if RTE_CACHE_LINE_SIZE == 64
796 rte_prefetch0(&m->cacheline1);
803 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
806 * Return the IO address of the beginning of the mbuf data
809 * The pointer to the mbuf.
811 * The IO address of the beginning of the mbuf data
813 static inline rte_iova_t
814 rte_mbuf_data_iova(const struct rte_mbuf *mb)
816 return mb->buf_iova + mb->data_off;
820 static inline phys_addr_t
821 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
823 return rte_mbuf_data_iova(mb);
827 * Return the default IO address of the beginning of the mbuf data
829 * This function is used by drivers in their receive function, as it
830 * returns the location where data should be written by the NIC, taking
831 * the default headroom in account.
834 * The pointer to the mbuf.
836 * The IO address of the beginning of the mbuf data
838 static inline rte_iova_t
839 rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
841 return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
845 static inline phys_addr_t
846 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
848 return rte_mbuf_data_iova_default(mb);
852 * Return the mbuf owning the data buffer address of an indirect mbuf.
855 * The pointer to the indirect mbuf.
857 * The address of the direct mbuf corresponding to buffer_addr.
859 static inline struct rte_mbuf *
860 rte_mbuf_from_indirect(struct rte_mbuf *mi)
862 return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
866 * Return address of buffer embedded in the given mbuf.
868 * The return value shall be same as mb->buf_addr if the mbuf is already
869 * initialized and direct. However, this API is useful if mempool of the
870 * mbuf is already known because it doesn't need to access mbuf contents in
871 * order to get the mempool pointer.
874 * @b EXPERIMENTAL: This API may change without prior notice.
875 * This will be used by rte_mbuf_to_baddr() which has redundant code once
876 * experimental tag is removed.
879 * The pointer to the mbuf.
881 * The pointer to the mempool of the mbuf.
883 * The pointer of the mbuf buffer.
887 rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
889 return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
893 * Return the default address of the beginning of the mbuf data.
896 * @b EXPERIMENTAL: This API may change without prior notice.
899 * The pointer to the mbuf.
901 * The pointer of the beginning of the mbuf data.
905 rte_mbuf_data_addr_default(__rte_unused struct rte_mbuf *mb)
907 /* gcc complains about calling this experimental function even
908 * when not using it. Hide it with ALLOW_EXPERIMENTAL_API.
910 #ifdef ALLOW_EXPERIMENTAL_API
911 return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
918 * Return address of buffer embedded in the given mbuf.
920 * @note: Accessing mempool pointer of a mbuf is expensive because the
921 * pointer is stored in the 2nd cache line of mbuf. If mempool is known, it
922 * is better not to reference the mempool pointer in mbuf but calling
923 * rte_mbuf_buf_addr() would be more efficient.
926 * The pointer to the mbuf.
928 * The address of the data buffer owned by the mbuf.
931 rte_mbuf_to_baddr(struct rte_mbuf *md)
933 #ifdef ALLOW_EXPERIMENTAL_API
934 return rte_mbuf_buf_addr(md, md->pool);
937 buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
943 * Return the starting address of the private data area embedded in
946 * Note that no check is made to ensure that a private data area
947 * actually exists in the supplied mbuf.
950 * The pointer to the mbuf.
952 * The starting address of the private data area of the given mbuf.
956 rte_mbuf_to_priv(struct rte_mbuf *m)
958 return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
962 * Returns TRUE if given mbuf is cloned by mbuf indirection, or FALSE
965 * If a mbuf has its data in another mbuf and references it by mbuf
966 * indirection, this mbuf can be defined as a cloned mbuf.
968 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
971 * Returns TRUE if given mbuf has an external buffer, or FALSE otherwise.
973 * External buffer is a user-provided anonymous buffer.
975 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
978 * Returns TRUE if given mbuf is direct, or FALSE otherwise.
980 * If a mbuf embeds its own data after the rte_mbuf structure, this mbuf
981 * can be defined as a direct mbuf.
983 #define RTE_MBUF_DIRECT(mb) \
984 (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
987 * Private data in case of pktmbuf pool.
989 * A structure that contains some pktmbuf_pool-specific data that are
990 * appended after the mempool structure (in private data).
992 struct rte_pktmbuf_pool_private {
993 uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf. */
994 uint16_t mbuf_priv_size; /**< Size of private area in each mbuf. */
997 #ifdef RTE_LIBRTE_MBUF_DEBUG
999 /** check mbuf type in debug mode */
1000 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
1002 #else /* RTE_LIBRTE_MBUF_DEBUG */
1004 /** check mbuf type in debug mode */
1005 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
1007 #endif /* RTE_LIBRTE_MBUF_DEBUG */
1009 #ifdef RTE_MBUF_REFCNT_ATOMIC
1012 * Reads the value of an mbuf's refcnt.
1016 * Reference count number.
1018 static inline uint16_t
1019 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
1021 return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
1025 * Sets an mbuf's refcnt to a defined value.
1032 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
1034 rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
1038 static inline uint16_t
1039 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1041 return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
1045 * Adds given value to an mbuf's refcnt and returns its new value.
1049 * Value to add/subtract
1053 static inline uint16_t
1054 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1057 * The atomic_add is an expensive operation, so we don't want to
1058 * call it in the case where we know we are the unique holder of
1059 * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
1060 * operation has to be used because concurrent accesses on the
1061 * reference counter can occur.
1063 if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1065 rte_mbuf_refcnt_set(m, (uint16_t)value);
1066 return (uint16_t)value;
1069 return __rte_mbuf_refcnt_update(m, value);
1072 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
1075 static inline uint16_t
1076 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1078 m->refcnt = (uint16_t)(m->refcnt + value);
1083 * Adds given value to an mbuf's refcnt and returns its new value.
1085 static inline uint16_t
1086 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1088 return __rte_mbuf_refcnt_update(m, value);
1092 * Reads the value of an mbuf's refcnt.
1094 static inline uint16_t
1095 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
1101 * Sets an mbuf's refcnt to the defined value.
1104 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
1106 m->refcnt = new_value;
1109 #endif /* RTE_MBUF_REFCNT_ATOMIC */
1112 * Reads the refcnt of an external buffer.
1115 * Shared data of the external buffer.
1117 * Reference count number.
1119 static inline uint16_t
1120 rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
1122 return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic));
1126 * Set refcnt of an external buffer.
1129 * Shared data of the external buffer.
1134 rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo,
1137 rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value);
1141 * Add given value to refcnt of an external buffer and return its new
1145 * Shared data of the external buffer.
1147 * Value to add/subtract
1151 static inline uint16_t
1152 rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo,
1155 if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
1157 rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
1158 return (uint16_t)value;
1161 return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value);
1164 /** Mbuf prefetch */
1165 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
1172 * Sanity checks on an mbuf.
1174 * Check the consistency of the given mbuf. The function will cause a
1175 * panic if corruption is detected.
1178 * The mbuf to be checked.
1180 * True if the mbuf is a packet header, false if it is a sub-segment
1181 * of a packet (in this case, some fields like nb_segs are not checked)
1184 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
1187 * Sanity checks on a mbuf.
1189 * Almost like rte_mbuf_sanity_check(), but this function gives the reason
1190 * if corruption is detected rather than panic.
1193 * The mbuf to be checked.
1195 * True if the mbuf is a packet header, false if it is a sub-segment
1196 * of a packet (in this case, some fields like nb_segs are not checked)
1198 * A reference to a string pointer where to store the reason why a mbuf is
1199 * considered invalid.
1201 * - 0 if no issue has been found, reason is left untouched.
1202 * - -1 if a problem is detected, reason then points to a string describing
1203 * the reason why the mbuf is deemed invalid.
1206 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
1207 const char **reason);
1209 #define MBUF_RAW_ALLOC_CHECK(m) do { \
1210 RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
1211 RTE_ASSERT((m)->next == NULL); \
1212 RTE_ASSERT((m)->nb_segs == 1); \
1213 __rte_mbuf_sanity_check(m, 0); \
1217 * Allocate an uninitialized mbuf from mempool *mp*.
1219 * This function can be used by PMDs (especially in RX functions) to
1220 * allocate an uninitialized mbuf. The driver is responsible of
1221 * initializing all the required fields. See rte_pktmbuf_reset().
1222 * For standard needs, prefer rte_pktmbuf_alloc().
1224 * The caller can expect that the following fields of the mbuf structure
1225 * are initialized: buf_addr, buf_iova, buf_len, refcnt=1, nb_segs=1,
1226 * next=NULL, pool, priv_size. The other fields must be initialized
1230 * The mempool from which mbuf is allocated.
1232 * - The pointer to the new mbuf on success.
1233 * - NULL if allocation failed.
1235 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
1239 if (rte_mempool_get(mp, (void **)&m) < 0)
1241 MBUF_RAW_ALLOC_CHECK(m);
1246 * Put mbuf back into its original mempool.
1248 * The caller must ensure that the mbuf is direct and properly
1249 * reinitialized (refcnt=1, next=NULL, nb_segs=1), as done by
1250 * rte_pktmbuf_prefree_seg().
1252 * This function should be used with care, when optimization is
1253 * required. For standard needs, prefer rte_pktmbuf_free() or
1254 * rte_pktmbuf_free_seg().
1257 * The mbuf to be freed.
1259 static __rte_always_inline void
1260 rte_mbuf_raw_free(struct rte_mbuf *m)
1262 RTE_ASSERT(RTE_MBUF_DIRECT(m));
1263 RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
1264 RTE_ASSERT(m->next == NULL);
1265 RTE_ASSERT(m->nb_segs == 1);
1266 __rte_mbuf_sanity_check(m, 0);
1267 rte_mempool_put(m->pool, m);
1271 * The packet mbuf constructor.
1273 * This function initializes some fields in the mbuf structure that are
1274 * not modified by the user once created (origin pool, buffer start
1275 * address, and so on). This function is given as a callback function to
1276 * rte_mempool_obj_iter() or rte_mempool_create() at pool creation time.
1279 * The mempool from which mbufs originate.
1281 * A pointer that can be used by the user to retrieve useful information
1282 * for mbuf initialization. This pointer is the opaque argument passed to
1283 * rte_mempool_obj_iter() or rte_mempool_create().
1285 * The mbuf to initialize.
1287 * The index of the mbuf in the pool table.
1289 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1290 void *m, unsigned i);
1294 * A packet mbuf pool constructor.
1296 * This function initializes the mempool private data in the case of a
1297 * pktmbuf pool. This private data is needed by the driver. The
1298 * function must be called on the mempool before it is used, or it
1299 * can be given as a callback function to rte_mempool_create() at
1300 * pool creation. It can be extended by the user, for example, to
1301 * provide another packet size.
1304 * The mempool from which mbufs originate.
1306 * A pointer that can be used by the user to retrieve useful information
1307 * for mbuf initialization. This pointer is the opaque argument passed to
1308 * rte_mempool_create().
1310 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1313 * Create a mbuf pool.
1315 * This function creates and initializes a packet mbuf pool. It is
1316 * a wrapper to rte_mempool functions.
1319 * The name of the mbuf pool.
1321 * The number of elements in the mbuf pool. The optimum size (in terms
1322 * of memory usage) for a mempool is when n is a power of two minus one:
1325 * Size of the per-core object cache. See rte_mempool_create() for
1328 * Size of application private are between the rte_mbuf structure
1329 * and the data buffer. This value must be aligned to RTE_MBUF_PRIV_ALIGN.
1330 * @param data_room_size
1331 * Size of data buffer in each mbuf, including RTE_PKTMBUF_HEADROOM.
1333 * The socket identifier where the memory should be allocated. The
1334 * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
1337 * The pointer to the new allocated mempool, on success. NULL on error
1338 * with rte_errno set appropriately. Possible rte_errno values include:
1339 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
1340 * - E_RTE_SECONDARY - function was called from a secondary process instance
1341 * - EINVAL - cache size provided is too large, or priv_size is not aligned.
1342 * - ENOSPC - the maximum number of memzones has already been allocated
1343 * - EEXIST - a memzone with the same name already exists
1344 * - ENOMEM - no appropriate memory area found in which to create memzone
1346 struct rte_mempool *
1347 rte_pktmbuf_pool_create(const char *name, unsigned n,
1348 unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1352 * Create a mbuf pool with a given mempool ops name
1354 * This function creates and initializes a packet mbuf pool. It is
1355 * a wrapper to rte_mempool functions.
1358 * The name of the mbuf pool.
1360 * The number of elements in the mbuf pool. The optimum size (in terms
1361 * of memory usage) for a mempool is when n is a power of two minus one:
1364 * Size of the per-core object cache. See rte_mempool_create() for
1367 * Size of application private are between the rte_mbuf structure
1368 * and the data buffer. This value must be aligned to RTE_MBUF_PRIV_ALIGN.
1369 * @param data_room_size
1370 * Size of data buffer in each mbuf, including RTE_PKTMBUF_HEADROOM.
1372 * The socket identifier where the memory should be allocated. The
1373 * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
1376 * The mempool ops name to be used for this mempool instead of
1377 * default mempool. The value can be *NULL* to use default mempool.
1379 * The pointer to the new allocated mempool, on success. NULL on error
1380 * with rte_errno set appropriately. Possible rte_errno values include:
1381 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
1382 * - E_RTE_SECONDARY - function was called from a secondary process instance
1383 * - EINVAL - cache size provided is too large, or priv_size is not aligned.
1384 * - ENOSPC - the maximum number of memzones has already been allocated
1385 * - EEXIST - a memzone with the same name already exists
1386 * - ENOMEM - no appropriate memory area found in which to create memzone
1388 struct rte_mempool *
1389 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
1390 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1391 int socket_id, const char *ops_name);
1394 * Get the data room size of mbufs stored in a pktmbuf_pool
1396 * The data room size is the amount of data that can be stored in a
1397 * mbuf including the headroom (RTE_PKTMBUF_HEADROOM).
1400 * The packet mbuf pool.
1402 * The data room size of mbufs stored in this mempool.
1404 static inline uint16_t
1405 rte_pktmbuf_data_room_size(struct rte_mempool *mp)
1407 struct rte_pktmbuf_pool_private *mbp_priv;
1409 mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1410 return mbp_priv->mbuf_data_room_size;
1414 * Get the application private size of mbufs stored in a pktmbuf_pool
1416 * The private size of mbuf is a zone located between the rte_mbuf
1417 * structure and the data buffer where an application can store data
1418 * associated to a packet.
1421 * The packet mbuf pool.
1423 * The private size of mbufs stored in this mempool.
1425 static inline uint16_t
1426 rte_pktmbuf_priv_size(struct rte_mempool *mp)
1428 struct rte_pktmbuf_pool_private *mbp_priv;
1430 mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1431 return mbp_priv->mbuf_priv_size;
1435 * Reset the data_off field of a packet mbuf to its default value.
1437 * The given mbuf must have only one segment, which should be empty.
1440 * The packet mbuf's data_off field has to be reset.
1442 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1444 m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1445 (uint16_t)m->buf_len);
1449 * Reset the fields of a packet mbuf to their default values.
1451 * The given mbuf must have only one segment.
1454 * The packet mbuf to be reset.
1456 #define MBUF_INVALID_PORT UINT16_MAX
1458 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1464 m->vlan_tci_outer = 0;
1466 m->port = MBUF_INVALID_PORT;
1470 rte_pktmbuf_reset_headroom(m);
1473 __rte_mbuf_sanity_check(m, 1);
1477 * Allocate a new mbuf from a mempool.
1479 * This new mbuf contains one segment, which has a length of 0. The pointer
1480 * to data is initialized to have some bytes of headroom in the buffer
1481 * (if buffer size allows).
1484 * The mempool from which the mbuf is allocated.
1486 * - The pointer to the new mbuf on success.
1487 * - NULL if allocation failed.
1489 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1492 if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1493 rte_pktmbuf_reset(m);
1498 * Allocate a bulk of mbufs, initialize refcnt and reset the fields to default
1502 * The mempool from which mbufs are allocated.
1504 * Array of pointers to mbufs
1509 * - -ENOENT: Not enough entries in the mempool; no mbufs are retrieved.
1511 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1512 struct rte_mbuf **mbufs, unsigned count)
1517 rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1521 /* To understand duff's device on loop unwinding optimization, see
1522 * https://en.wikipedia.org/wiki/Duff's_device.
1523 * Here while() loop is used rather than do() while{} to avoid extra
1524 * check if count is zero.
1526 switch (count % 4) {
1528 while (idx != count) {
1529 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1530 rte_pktmbuf_reset(mbufs[idx]);
1534 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1535 rte_pktmbuf_reset(mbufs[idx]);
1539 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1540 rte_pktmbuf_reset(mbufs[idx]);
1544 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1545 rte_pktmbuf_reset(mbufs[idx]);
1554 * Initialize shared data at the end of an external buffer before attaching
1555 * to a mbuf by ``rte_pktmbuf_attach_extbuf()``. This is not a mandatory
1556 * initialization but a helper function to simply spare a few bytes at the
1557 * end of the buffer for shared data. If shared data is allocated
1558 * separately, this should not be called but application has to properly
1559 * initialize the shared data according to its need.
1561 * Free callback and its argument is saved and the refcnt is set to 1.
1564 * The value of buf_len will be reduced to RTE_PTR_DIFF(shinfo, buf_addr)
1565 * after this initialization. This shall be used for
1566 * ``rte_pktmbuf_attach_extbuf()``
1569 * The pointer to the external buffer.
1570 * @param [in,out] buf_len
1571 * The pointer to length of the external buffer. Input value must be
1572 * larger than the size of ``struct rte_mbuf_ext_shared_info`` and
1573 * padding for alignment. If not enough, this function will return NULL.
1574 * Adjusted buffer length will be returned through this pointer.
1576 * Free callback function to call when the external buffer needs to be
1579 * Argument for the free callback function.
1582 * A pointer to the initialized shared data on success, return NULL
1585 static inline struct rte_mbuf_ext_shared_info *
1586 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
1587 rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
1589 struct rte_mbuf_ext_shared_info *shinfo;
1590 void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
1593 addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
1595 if (addr <= buf_addr)
1598 shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1599 shinfo->free_cb = free_cb;
1600 shinfo->fcb_opaque = fcb_opaque;
1601 rte_mbuf_ext_refcnt_set(shinfo, 1);
1603 *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1608 * Attach an external buffer to a mbuf.
1610 * User-managed anonymous buffer can be attached to an mbuf. When attaching
1611 * it, corresponding free callback function and its argument should be
1612 * provided via shinfo. This callback function will be called once all the
1613 * mbufs are detached from the buffer (refcnt becomes zero).
1615 * The headroom for the attaching mbuf will be set to zero and this can be
1616 * properly adjusted after attachment. For example, ``rte_pktmbuf_adj()``
1617 * or ``rte_pktmbuf_reset_headroom()`` might be used.
1619 * More mbufs can be attached to the same external buffer by
1620 * ``rte_pktmbuf_attach()`` once the external buffer has been attached by
1623 * Detachment can be done by either ``rte_pktmbuf_detach_extbuf()`` or
1624 * ``rte_pktmbuf_detach()``.
1626 * Memory for shared data must be provided and user must initialize all of
1627 * the content properly, especially free callback and refcnt. The pointer
1628 * of shared data will be stored in m->shinfo.
1629 * ``rte_pktmbuf_ext_shinfo_init_helper`` can help to simply spare a few
1630 * bytes at the end of buffer for the shared data, store free callback and
1631 * its argument and set the refcnt to 1. The following is an example:
1633 * struct rte_mbuf_ext_shared_info *shinfo =
1634 * rte_pktmbuf_ext_shinfo_init_helper(buf_addr, &buf_len,
1635 * free_cb, fcb_arg);
1636 * rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo);
1637 * rte_pktmbuf_reset_headroom(m);
1638 * rte_pktmbuf_adj(m, data_len);
1640 * Attaching an external buffer is quite similar to mbuf indirection in
1641 * replacing buffer addresses and length of a mbuf, but a few differences:
1642 * - When an indirect mbuf is attached, refcnt of the direct mbuf would be
1643 * 2 as long as the direct mbuf itself isn't freed after the attachment.
1644 * In such cases, the buffer area of a direct mbuf must be read-only. But
1645 * external buffer has its own refcnt and it starts from 1. Unless
1646 * multiple mbufs are attached to a mbuf having an external buffer, the
1647 * external buffer is writable.
1648 * - There's no need to allocate buffer from a mempool. Any buffer can be
1649 * attached with appropriate free callback and its IO address.
1650 * - Smaller metadata is required to maintain shared data such as refcnt.
1653 * The pointer to the mbuf.
1655 * The pointer to the external buffer.
1657 * IO address of the external buffer.
1659 * The size of the external buffer.
1661 * User-provided memory for shared data of the external buffer.
1664 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1665 rte_iova_t buf_iova, uint16_t buf_len,
1666 struct rte_mbuf_ext_shared_info *shinfo)
1668 /* mbuf should not be read-only */
1669 RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1670 RTE_ASSERT(shinfo->free_cb != NULL);
1672 m->buf_addr = buf_addr;
1673 m->buf_iova = buf_iova;
1674 m->buf_len = buf_len;
1679 m->ol_flags |= EXT_ATTACHED_MBUF;
1684 * Detach the external buffer attached to a mbuf, same as
1685 * ``rte_pktmbuf_detach()``
1688 * The mbuf having external buffer.
1690 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1693 * Copy dynamic fields from m_src to m_dst.
1696 * The destination mbuf.
1701 rte_mbuf_dynfield_copy(struct rte_mbuf *m_dst, const struct rte_mbuf *m_src)
1703 m_dst->dynfield1 = m_src->dynfield1;
1704 m_dst->dynfield2 = m_src->dynfield2;
1708 * Attach packet mbuf to another packet mbuf.
1710 * If the mbuf we are attaching to isn't a direct buffer and is attached to
1711 * an external buffer, the mbuf being attached will be attached to the
1712 * external buffer instead of mbuf indirection.
1714 * Otherwise, the mbuf will be indirectly attached. After attachment we
1715 * refer the mbuf we attached as 'indirect', while mbuf we attached to as
1716 * 'direct'. The direct mbuf's reference counter is incremented.
1718 * Right now, not supported:
1719 * - attachment for already indirect mbuf (e.g. - mi has to be direct).
1720 * - mbuf we trying to attach (mi) is used by someone else
1721 * e.g. it's reference counter is greater then 1.
1724 * The indirect packet mbuf.
1726 * The packet mbuf we're attaching to.
1728 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1730 RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1731 rte_mbuf_refcnt_read(mi) == 1);
1733 if (RTE_MBUF_HAS_EXTBUF(m)) {
1734 rte_mbuf_ext_refcnt_update(m->shinfo, 1);
1735 mi->ol_flags = m->ol_flags;
1736 mi->shinfo = m->shinfo;
1738 /* if m is not direct, get the mbuf that embeds the data */
1739 rte_mbuf_refcnt_update(rte_mbuf_from_indirect(m), 1);
1740 mi->priv_size = m->priv_size;
1741 mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1744 mi->buf_iova = m->buf_iova;
1745 mi->buf_addr = m->buf_addr;
1746 mi->buf_len = m->buf_len;
1748 mi->data_off = m->data_off;
1749 mi->data_len = m->data_len;
1751 mi->vlan_tci = m->vlan_tci;
1752 mi->vlan_tci_outer = m->vlan_tci_outer;
1753 mi->tx_offload = m->tx_offload;
1755 rte_mbuf_dynfield_copy(mi, m);
1758 mi->pkt_len = mi->data_len;
1760 mi->packet_type = m->packet_type;
1761 mi->timestamp = m->timestamp;
1763 __rte_mbuf_sanity_check(mi, 1);
1764 __rte_mbuf_sanity_check(m, 0);
1768 * @internal used by rte_pktmbuf_detach().
1770 * Decrement the reference counter of the external buffer. When the
1771 * reference counter becomes 0, the buffer is freed by pre-registered
1775 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1777 RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1778 RTE_ASSERT(m->shinfo != NULL);
1780 if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1781 m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1785 * @internal used by rte_pktmbuf_detach().
1787 * Decrement the direct mbuf's reference counter. When the reference
1788 * counter becomes 0, the direct mbuf is freed.
1791 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1793 struct rte_mbuf *md;
1795 RTE_ASSERT(RTE_MBUF_CLONED(m));
1797 md = rte_mbuf_from_indirect(m);
1799 if (rte_mbuf_refcnt_update(md, -1) == 0) {
1802 rte_mbuf_refcnt_set(md, 1);
1803 rte_mbuf_raw_free(md);
1808 * Detach a packet mbuf from external buffer or direct buffer.
1810 * - decrement refcnt and free the external/direct buffer if refcnt
1812 * - restore original mbuf address and length values.
1813 * - reset pktmbuf data and data_len to their default values.
1815 * All other fields of the given packet mbuf will be left intact.
1818 * The indirect attached packet mbuf.
1820 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1822 struct rte_mempool *mp = m->pool;
1823 uint32_t mbuf_size, buf_len;
1826 if (RTE_MBUF_HAS_EXTBUF(m))
1827 __rte_pktmbuf_free_extbuf(m);
1829 __rte_pktmbuf_free_direct(m);
1831 priv_size = rte_pktmbuf_priv_size(mp);
1832 mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1833 buf_len = rte_pktmbuf_data_room_size(mp);
1835 m->priv_size = priv_size;
1836 m->buf_addr = (char *)m + mbuf_size;
1837 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1838 m->buf_len = (uint16_t)buf_len;
1839 rte_pktmbuf_reset_headroom(m);
1845 * Decrease reference counter and unlink a mbuf segment
1847 * This function does the same than a free, except that it does not
1848 * return the segment to its pool.
1849 * It decreases the reference counter, and if it reaches 0, it is
1850 * detached from its parent for an indirect mbuf.
1853 * The mbuf to be unlinked
1855 * - (m) if it is the last reference. It can be recycled or freed.
1856 * - (NULL) if the mbuf still has remaining references on it.
1858 static __rte_always_inline struct rte_mbuf *
1859 rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1861 __rte_mbuf_sanity_check(m, 0);
1863 if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1865 if (!RTE_MBUF_DIRECT(m))
1866 rte_pktmbuf_detach(m);
1868 if (m->next != NULL) {
1875 } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1877 if (!RTE_MBUF_DIRECT(m))
1878 rte_pktmbuf_detach(m);
1880 if (m->next != NULL) {
1884 rte_mbuf_refcnt_set(m, 1);
1892 * Free a segment of a packet mbuf into its original mempool.
1894 * Free an mbuf, without parsing other segments in case of chained
1898 * The packet mbuf segment to be freed.
1900 static __rte_always_inline void
1901 rte_pktmbuf_free_seg(struct rte_mbuf *m)
1903 m = rte_pktmbuf_prefree_seg(m);
1904 if (likely(m != NULL))
1905 rte_mbuf_raw_free(m);
1909 * Free a packet mbuf back into its original mempool.
1911 * Free an mbuf, and all its segments in case of chained buffers. Each
1912 * segment is added back into its original mempool.
1915 * The packet mbuf to be freed. If NULL, the function does nothing.
1917 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1919 struct rte_mbuf *m_next;
1922 __rte_mbuf_sanity_check(m, 1);
1926 rte_pktmbuf_free_seg(m);
1932 * Creates a "clone" of the given packet mbuf.
1934 * Walks through all segments of the given packet mbuf, and for each of them:
1935 * - Creates a new packet mbuf from the given pool.
1936 * - Attaches newly created mbuf to the segment.
1937 * Then updates pkt_len and nb_segs of the "clone" packet mbuf to match values
1938 * from the original packet mbuf.
1941 * The packet mbuf to be cloned.
1943 * The mempool from which the "clone" mbufs are allocated.
1945 * - The pointer to the new "clone" mbuf on success.
1946 * - NULL if allocation fails.
1948 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1949 struct rte_mempool *mp)
1951 struct rte_mbuf *mc, *mi, **prev;
1955 if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1960 pktlen = md->pkt_len;
1965 rte_pktmbuf_attach(mi, md);
1968 } while ((md = md->next) != NULL &&
1969 (mi = rte_pktmbuf_alloc(mp)) != NULL);
1973 mc->pkt_len = pktlen;
1975 /* Allocation of new indirect segment failed */
1976 if (unlikely (mi == NULL)) {
1977 rte_pktmbuf_free(mc);
1981 __rte_mbuf_sanity_check(mc, 1);
1986 * Adds given value to the refcnt of all packet mbuf segments.
1988 * Walks through all segments of given packet mbuf and for each of them
1989 * invokes rte_mbuf_refcnt_update().
1992 * The packet mbuf whose refcnt to be updated.
1994 * The value to add to the mbuf's segments refcnt.
1996 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1998 __rte_mbuf_sanity_check(m, 1);
2001 rte_mbuf_refcnt_update(m, v);
2002 } while ((m = m->next) != NULL);
2006 * Get the headroom in a packet mbuf.
2011 * The length of the headroom.
2013 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
2015 __rte_mbuf_sanity_check(m, 0);
2020 * Get the tailroom of a packet mbuf.
2025 * The length of the tailroom.
2027 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
2029 __rte_mbuf_sanity_check(m, 0);
2030 return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
2035 * Get the last segment of the packet.
2040 * The last segment of the given mbuf.
2042 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
2044 __rte_mbuf_sanity_check(m, 1);
2045 while (m->next != NULL)
2051 * A macro that points to an offset into the data in the mbuf.
2053 * The returned pointer is cast to type t. Before using this
2054 * function, the user must ensure that the first segment is large
2055 * enough to accommodate its data.
2060 * The offset into the mbuf data.
2062 * The type to cast the result into.
2064 #define rte_pktmbuf_mtod_offset(m, t, o) \
2065 ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
2068 * A macro that points to the start of the data in the mbuf.
2070 * The returned pointer is cast to type t. Before using this
2071 * function, the user must ensure that the first segment is large
2072 * enough to accommodate its data.
2077 * The type to cast the result into.
2079 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
2082 * A macro that returns the IO address that points to an offset of the
2083 * start of the data in the mbuf
2088 * The offset into the data to calculate address from.
2090 #define rte_pktmbuf_iova_offset(m, o) \
2091 (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
2094 #define rte_pktmbuf_mtophys_offset(m, o) \
2095 rte_pktmbuf_iova_offset(m, o)
2098 * A macro that returns the IO address that points to the start of the
2104 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
2107 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
2110 * A macro that returns the length of the packet.
2112 * The value can be read or assigned.
2117 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
2120 * A macro that returns the length of the segment.
2122 * The value can be read or assigned.
2127 #define rte_pktmbuf_data_len(m) ((m)->data_len)
2130 * Prepend len bytes to an mbuf data area.
2132 * Returns a pointer to the new
2133 * data start address. If there is not enough headroom in the first
2134 * segment, the function will return NULL, without modifying the mbuf.
2139 * The amount of data to prepend (in bytes).
2141 * A pointer to the start of the newly prepended data, or
2142 * NULL if there is not enough headroom space in the first segment
2144 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
2147 __rte_mbuf_sanity_check(m, 1);
2149 if (unlikely(len > rte_pktmbuf_headroom(m)))
2152 /* NB: elaborating the subtraction like this instead of using
2153 * -= allows us to ensure the result type is uint16_t
2154 * avoiding compiler warnings on gcc 8.1 at least */
2155 m->data_off = (uint16_t)(m->data_off - len);
2156 m->data_len = (uint16_t)(m->data_len + len);
2157 m->pkt_len = (m->pkt_len + len);
2159 return (char *)m->buf_addr + m->data_off;
2163 * Append len bytes to an mbuf.
2165 * Append len bytes to an mbuf and return a pointer to the start address
2166 * of the added data. If there is not enough tailroom in the last
2167 * segment, the function will return NULL, without modifying the mbuf.
2172 * The amount of data to append (in bytes).
2174 * A pointer to the start of the newly appended data, or
2175 * NULL if there is not enough tailroom space in the last segment
2177 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
2180 struct rte_mbuf *m_last;
2182 __rte_mbuf_sanity_check(m, 1);
2184 m_last = rte_pktmbuf_lastseg(m);
2185 if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
2188 tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
2189 m_last->data_len = (uint16_t)(m_last->data_len + len);
2190 m->pkt_len = (m->pkt_len + len);
2191 return (char*) tail;
2195 * Remove len bytes at the beginning of an mbuf.
2197 * Returns a pointer to the start address of the new data area. If the
2198 * length is greater than the length of the first segment, then the
2199 * function will fail and return NULL, without modifying the mbuf.
2204 * The amount of data to remove (in bytes).
2206 * A pointer to the new start of the data.
2208 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
2210 __rte_mbuf_sanity_check(m, 1);
2212 if (unlikely(len > m->data_len))
2215 /* NB: elaborating the addition like this instead of using
2216 * += allows us to ensure the result type is uint16_t
2217 * avoiding compiler warnings on gcc 8.1 at least */
2218 m->data_len = (uint16_t)(m->data_len - len);
2219 m->data_off = (uint16_t)(m->data_off + len);
2220 m->pkt_len = (m->pkt_len - len);
2221 return (char *)m->buf_addr + m->data_off;
2225 * Remove len bytes of data at the end of the mbuf.
2227 * If the length is greater than the length of the last segment, the
2228 * function will fail and return -1 without modifying the mbuf.
2233 * The amount of data to remove (in bytes).
2238 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
2240 struct rte_mbuf *m_last;
2242 __rte_mbuf_sanity_check(m, 1);
2244 m_last = rte_pktmbuf_lastseg(m);
2245 if (unlikely(len > m_last->data_len))
2248 m_last->data_len = (uint16_t)(m_last->data_len - len);
2249 m->pkt_len = (m->pkt_len - len);
2254 * Test if mbuf data is contiguous.
2259 * - 1, if all data is contiguous (one segment).
2260 * - 0, if there is several segments.
2262 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
2264 __rte_mbuf_sanity_check(m, 1);
2265 return !!(m->nb_segs == 1);
2269 * @internal used by rte_pktmbuf_read().
2271 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
2272 uint32_t len, void *buf);
2275 * Read len data bytes in a mbuf at specified offset.
2277 * If the data is contiguous, return the pointer in the mbuf data, else
2278 * copy the data in the buffer provided by the user and return its
2282 * The pointer to the mbuf.
2284 * The offset of the data in the mbuf.
2286 * The amount of bytes to read.
2288 * The buffer where data is copied if it is not contiguous in mbuf
2289 * data. Its length should be at least equal to the len parameter.
2291 * The pointer to the data, either in the mbuf if it is contiguous,
2292 * or in the user buffer. If mbuf is too small, NULL is returned.
2294 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
2295 uint32_t off, uint32_t len, void *buf)
2297 if (likely(off + len <= rte_pktmbuf_data_len(m)))
2298 return rte_pktmbuf_mtod_offset(m, char *, off);
2300 return __rte_pktmbuf_read(m, off, len, buf);
2304 * Chain an mbuf to another, thereby creating a segmented packet.
2306 * Note: The implementation will do a linear walk over the segments to find
2307 * the tail entry. For cases when there are many segments, it's better to
2308 * chain the entries manually.
2311 * The head of the mbuf chain (the first packet)
2313 * The mbuf to put last in the chain
2317 * - -EOVERFLOW, if the chain segment limit exceeded
2319 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
2321 struct rte_mbuf *cur_tail;
2323 /* Check for number-of-segments-overflow */
2324 if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
2327 /* Chain 'tail' onto the old tail */
2328 cur_tail = rte_pktmbuf_lastseg(head);
2329 cur_tail->next = tail;
2331 /* accumulate number of segments and total length.
2332 * NB: elaborating the addition like this instead of using
2333 * -= allows us to ensure the result type is uint16_t
2334 * avoiding compiler warnings on gcc 8.1 at least */
2335 head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
2336 head->pkt_len += tail->pkt_len;
2338 /* pkt_len is only set in the head */
2339 tail->pkt_len = tail->data_len;
2346 * @b EXPERIMENTAL: This API may change without prior notice.
2348 * For given input values generate raw tx_offload value.
2349 * Note that it is caller responsibility to make sure that input parameters
2350 * don't exceed maximum bit-field values.
2360 * outer_l3_len value.
2362 * outer_l2_len value.
2366 * raw tx_offload value.
2368 static __rte_always_inline uint64_t
2369 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
2370 uint64_t ol3, uint64_t ol2, uint64_t unused)
2372 return il2 << RTE_MBUF_L2_LEN_OFS |
2373 il3 << RTE_MBUF_L3_LEN_OFS |
2374 il4 << RTE_MBUF_L4_LEN_OFS |
2375 tso << RTE_MBUF_TSO_SEGSZ_OFS |
2376 ol3 << RTE_MBUF_OUTL3_LEN_OFS |
2377 ol2 << RTE_MBUF_OUTL2_LEN_OFS |
2378 unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
2382 * Validate general requirements for Tx offload in mbuf.
2384 * This function checks correctness and completeness of Tx offload settings.
2387 * The packet mbuf to be validated.
2389 * 0 if packet is valid
2392 rte_validate_tx_offload(const struct rte_mbuf *m)
2394 uint64_t ol_flags = m->ol_flags;
2396 /* Does packet set any of available offloads? */
2397 if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
2400 /* IP checksum can be counted only for IPv4 packet */
2401 if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
2404 /* IP type not set when required */
2405 if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
2406 if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
2409 /* Check requirements for TSO packet */
2410 if (ol_flags & PKT_TX_TCP_SEG)
2411 if ((m->tso_segsz == 0) ||
2412 ((ol_flags & PKT_TX_IPV4) &&
2413 !(ol_flags & PKT_TX_IP_CKSUM)))
2416 /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
2417 if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2418 !(ol_flags & PKT_TX_OUTER_IPV4))
2425 * Linearize data in mbuf.
2427 * This function moves the mbuf data in the first segment if there is enough
2428 * tailroom. The subsequent segments are unchained and freed.
2437 rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
2439 size_t seg_len, copy_len;
2441 struct rte_mbuf *m_next;
2444 if (rte_pktmbuf_is_contiguous(mbuf))
2447 /* Extend first segment to the total packet length */
2448 copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
2450 if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
2453 buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
2454 mbuf->data_len = (uint16_t)(mbuf->pkt_len);
2456 /* Append data from next segments to the first one */
2461 seg_len = rte_pktmbuf_data_len(m);
2462 rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
2465 rte_pktmbuf_free_seg(m);
2476 * Dump an mbuf structure to a file.
2478 * Dump all fields for the given packet mbuf and all its associated
2479 * segments (in the case of a chained buffer).
2482 * A pointer to a file for output
2486 * If dump_len != 0, also dump the "dump_len" first data bytes of
2489 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
2492 * Get the value of mbuf sched queue_id field.
2494 static inline uint32_t
2495 rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
2497 return m->hash.sched.queue_id;
2501 * Get the value of mbuf sched traffic_class field.
2503 static inline uint8_t
2504 rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
2506 return m->hash.sched.traffic_class;
2510 * Get the value of mbuf sched color field.
2512 static inline uint8_t
2513 rte_mbuf_sched_color_get(const struct rte_mbuf *m)
2515 return m->hash.sched.color;
2519 * Get the values of mbuf sched queue_id, traffic_class and color.
2524 * Returns the queue id
2525 * @param traffic_class
2526 * Returns the traffic class id
2528 * Returns the colour id
2531 rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
2532 uint8_t *traffic_class,
2535 struct rte_mbuf_sched sched = m->hash.sched;
2537 *queue_id = sched.queue_id;
2538 *traffic_class = sched.traffic_class;
2539 *color = sched.color;
2543 * Set the mbuf sched queue_id to the defined value.
2546 rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
2548 m->hash.sched.queue_id = queue_id;
2552 * Set the mbuf sched traffic_class id to the defined value.
2555 rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
2557 m->hash.sched.traffic_class = traffic_class;
2561 * Set the mbuf sched color id to the defined value.
2564 rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
2566 m->hash.sched.color = color;
2570 * Set the mbuf sched queue_id, traffic_class and color.
2575 * Queue id value to be set
2576 * @param traffic_class
2577 * Traffic class id value to be set
2579 * Color id to be set
2582 rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id,
2583 uint8_t traffic_class,
2586 m->hash.sched = (struct rte_mbuf_sched){
2587 .queue_id = queue_id,
2588 .traffic_class = traffic_class,
2598 #endif /* _RTE_MBUF_H_ */