1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
14 #include <sys/queue.h>
16 #include <rte_compat.h>
17 #include <rte_debug.h>
18 #include <rte_common.h>
20 #include <rte_memory.h>
21 #include <rte_launch.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_mempool.h>
28 #include <rte_mbuf_pool_ops.h>
29 #include <rte_string_fns.h>
30 #include <rte_hexdump.h>
31 #include <rte_errno.h>
32 #include <rte_memcpy.h>
35 * pktmbuf pool constructor, given as a callback function to
36 * rte_mempool_create(), or called directly if using
37 * rte_mempool_create_empty()/rte_mempool_populate()
40 rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
42 struct rte_pktmbuf_pool_private *user_mbp_priv, *mbp_priv;
43 struct rte_pktmbuf_pool_private default_mbp_priv;
46 RTE_ASSERT(mp->private_data_size >=
47 sizeof(struct rte_pktmbuf_pool_private));
48 RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
50 /* if no structure is provided, assume no mbuf private area */
51 user_mbp_priv = opaque_arg;
52 if (user_mbp_priv == NULL) {
53 memset(&default_mbp_priv, 0, sizeof(default_mbp_priv));
54 if (mp->elt_size > sizeof(struct rte_mbuf))
55 roomsz = mp->elt_size - sizeof(struct rte_mbuf);
58 default_mbp_priv.mbuf_data_room_size = roomsz;
59 user_mbp_priv = &default_mbp_priv;
62 RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
63 ((user_mbp_priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) ?
64 sizeof(struct rte_mbuf_ext_shared_info) :
65 user_mbp_priv->mbuf_data_room_size) +
66 user_mbp_priv->mbuf_priv_size);
67 RTE_ASSERT((user_mbp_priv->flags &
68 ~RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) == 0);
70 mbp_priv = rte_mempool_get_priv(mp);
71 memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv));
75 * pktmbuf constructor, given as a callback function to
76 * rte_mempool_obj_iter() or rte_mempool_create().
77 * Set the fields of a packet mbuf to their default values.
80 rte_pktmbuf_init(struct rte_mempool *mp,
81 __rte_unused void *opaque_arg,
83 __rte_unused unsigned i)
85 struct rte_mbuf *m = _m;
86 uint32_t mbuf_size, buf_len, priv_size;
88 RTE_ASSERT(mp->private_data_size >=
89 sizeof(struct rte_pktmbuf_pool_private));
91 priv_size = rte_pktmbuf_priv_size(mp);
92 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
93 buf_len = rte_pktmbuf_data_room_size(mp);
95 RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
96 RTE_ASSERT(mp->elt_size >= mbuf_size);
97 RTE_ASSERT(buf_len <= UINT16_MAX);
99 memset(m, 0, mbuf_size);
100 /* start of buffer is after mbuf structure and priv data */
101 m->priv_size = priv_size;
102 m->buf_addr = (char *)m + mbuf_size;
103 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
104 m->buf_len = (uint16_t)buf_len;
106 /* keep some headroom between start of buffer and data */
107 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
109 /* init some constant fields */
112 m->port = RTE_MBUF_PORT_INVALID;
113 rte_mbuf_refcnt_set(m, 1);
118 * @internal The callback routine called when reference counter in shinfo
119 * for mbufs with pinned external buffer reaches zero. It means there is
120 * no more reference to buffer backing mbuf and this one should be freed.
121 * This routine is called for the regular (not with pinned external or
122 * indirect buffer) mbufs on detaching from the mbuf with pinned external
126 rte_pktmbuf_free_pinned_extmem(void *addr, void *opaque)
128 struct rte_mbuf *m = opaque;
131 RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
132 RTE_ASSERT(RTE_MBUF_HAS_PINNED_EXTBUF(m));
133 RTE_ASSERT(m->shinfo->fcb_opaque == m);
135 rte_mbuf_ext_refcnt_set(m->shinfo, 1);
136 m->ol_flags = EXT_ATTACHED_MBUF;
137 if (m->next != NULL) {
141 rte_mbuf_raw_free(m);
144 /** The context to initialize the mbufs with pinned external buffers. */
145 struct rte_pktmbuf_extmem_init_ctx {
146 const struct rte_pktmbuf_extmem *ext_mem; /* descriptor array. */
147 unsigned int ext_num; /* number of descriptors in array. */
148 unsigned int ext; /* loop descriptor index. */
149 size_t off; /* loop buffer offset. */
153 * @internal Packet mbuf constructor for pools with pinned external memory.
155 * This function initializes some fields in the mbuf structure that are
156 * not modified by the user once created (origin pool, buffer start
157 * address, and so on). This function is given as a callback function to
158 * rte_mempool_obj_iter() called from rte_mempool_create_extmem().
161 * The mempool from which mbufs originate.
163 * A pointer to the rte_pktmbuf_extmem_init_ctx - initialization
166 * The mbuf to initialize.
168 * The index of the mbuf in the pool table.
171 __rte_pktmbuf_init_extmem(struct rte_mempool *mp,
174 __rte_unused unsigned int i)
176 struct rte_mbuf *m = _m;
177 struct rte_pktmbuf_extmem_init_ctx *ctx = opaque_arg;
178 const struct rte_pktmbuf_extmem *ext_mem;
179 uint32_t mbuf_size, buf_len, priv_size;
180 struct rte_mbuf_ext_shared_info *shinfo;
182 priv_size = rte_pktmbuf_priv_size(mp);
183 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
184 buf_len = rte_pktmbuf_data_room_size(mp);
186 RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
187 RTE_ASSERT(mp->elt_size >= mbuf_size);
188 RTE_ASSERT(buf_len <= UINT16_MAX);
190 memset(m, 0, mbuf_size);
191 m->priv_size = priv_size;
192 m->buf_len = (uint16_t)buf_len;
194 /* set the data buffer pointers to external memory */
195 ext_mem = ctx->ext_mem + ctx->ext;
197 RTE_ASSERT(ctx->ext < ctx->ext_num);
198 RTE_ASSERT(ctx->off + ext_mem->elt_size <= ext_mem->buf_len);
200 m->buf_addr = RTE_PTR_ADD(ext_mem->buf_ptr, ctx->off);
201 m->buf_iova = ext_mem->buf_iova == RTE_BAD_IOVA ?
202 RTE_BAD_IOVA : (ext_mem->buf_iova + ctx->off);
204 ctx->off += ext_mem->elt_size;
205 if (ctx->off + ext_mem->elt_size > ext_mem->buf_len) {
209 /* keep some headroom between start of buffer and data */
210 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
212 /* init some constant fields */
215 m->port = RTE_MBUF_PORT_INVALID;
216 m->ol_flags = EXT_ATTACHED_MBUF;
217 rte_mbuf_refcnt_set(m, 1);
220 /* init external buffer shared info items */
221 shinfo = RTE_PTR_ADD(m, mbuf_size);
223 shinfo->free_cb = rte_pktmbuf_free_pinned_extmem;
224 shinfo->fcb_opaque = m;
225 rte_mbuf_ext_refcnt_set(shinfo, 1);
228 /* Helper to create a mbuf pool with given mempool ops name*/
230 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
231 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
232 int socket_id, const char *ops_name)
234 struct rte_mempool *mp;
235 struct rte_pktmbuf_pool_private mbp_priv;
236 const char *mp_ops_name = ops_name;
240 if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) {
241 RTE_LOG(ERR, MBUF, "mbuf priv_size=%u is not aligned\n",
246 elt_size = sizeof(struct rte_mbuf) + (unsigned)priv_size +
247 (unsigned)data_room_size;
248 memset(&mbp_priv, 0, sizeof(mbp_priv));
249 mbp_priv.mbuf_data_room_size = data_room_size;
250 mbp_priv.mbuf_priv_size = priv_size;
252 mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
253 sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
257 if (mp_ops_name == NULL)
258 mp_ops_name = rte_mbuf_best_mempool_ops();
259 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
261 RTE_LOG(ERR, MBUF, "error setting mempool handler\n");
262 rte_mempool_free(mp);
266 rte_pktmbuf_pool_init(mp, &mbp_priv);
268 ret = rte_mempool_populate_default(mp);
270 rte_mempool_free(mp);
275 rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
280 /* helper to create a mbuf pool */
282 rte_pktmbuf_pool_create(const char *name, unsigned int n,
283 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
286 return rte_pktmbuf_pool_create_by_ops(name, n, cache_size, priv_size,
287 data_room_size, socket_id, NULL);
290 /* Helper to create a mbuf pool with pinned external data buffers. */
292 rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n,
293 unsigned int cache_size, uint16_t priv_size,
294 uint16_t data_room_size, int socket_id,
295 const struct rte_pktmbuf_extmem *ext_mem,
296 unsigned int ext_num)
298 struct rte_mempool *mp;
299 struct rte_pktmbuf_pool_private mbp_priv;
300 struct rte_pktmbuf_extmem_init_ctx init_ctx;
301 const char *mp_ops_name;
302 unsigned int elt_size;
303 unsigned int i, n_elts = 0;
306 if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) {
307 RTE_LOG(ERR, MBUF, "mbuf priv_size=%u is not aligned\n",
312 /* Check the external memory descriptors. */
313 for (i = 0; i < ext_num; i++) {
314 const struct rte_pktmbuf_extmem *extm = ext_mem + i;
316 if (!extm->elt_size || !extm->buf_len || !extm->buf_ptr) {
317 RTE_LOG(ERR, MBUF, "invalid extmem descriptor\n");
321 if (data_room_size > extm->elt_size) {
322 RTE_LOG(ERR, MBUF, "ext elt_size=%u is too small\n",
327 n_elts += extm->buf_len / extm->elt_size;
329 /* Check whether enough external memory provided. */
331 RTE_LOG(ERR, MBUF, "not enough extmem\n");
335 elt_size = sizeof(struct rte_mbuf) +
336 (unsigned int)priv_size +
337 sizeof(struct rte_mbuf_ext_shared_info);
339 memset(&mbp_priv, 0, sizeof(mbp_priv));
340 mbp_priv.mbuf_data_room_size = data_room_size;
341 mbp_priv.mbuf_priv_size = priv_size;
342 mbp_priv.flags = RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
344 mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
345 sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
349 mp_ops_name = rte_mbuf_best_mempool_ops();
350 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
352 RTE_LOG(ERR, MBUF, "error setting mempool handler\n");
353 rte_mempool_free(mp);
357 rte_pktmbuf_pool_init(mp, &mbp_priv);
359 ret = rte_mempool_populate_default(mp);
361 rte_mempool_free(mp);
366 init_ctx = (struct rte_pktmbuf_extmem_init_ctx){
372 rte_mempool_obj_iter(mp, __rte_pktmbuf_init_extmem, &init_ctx);
377 /* do some sanity checks on a mbuf: panic if it fails */
379 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
383 if (rte_mbuf_check(m, is_header, &reason))
384 rte_panic("%s\n", reason);
387 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
390 unsigned int nb_segs, pkt_len;
393 *reason = "mbuf is NULL";
398 if (m->pool == NULL) {
399 *reason = "bad mbuf pool";
402 if (m->buf_iova == 0) {
403 *reason = "bad IO addr";
406 if (m->buf_addr == NULL) {
407 *reason = "bad virt addr";
411 uint16_t cnt = rte_mbuf_refcnt_read(m);
412 if ((cnt == 0) || (cnt == UINT16_MAX)) {
413 *reason = "bad ref cnt";
417 /* nothing to check for sub-segments */
421 /* data_len is supposed to be not more than pkt_len */
422 if (m->data_len > m->pkt_len) {
423 *reason = "bad data_len";
427 nb_segs = m->nb_segs;
428 pkt_len = m->pkt_len;
431 if (m->data_off > m->buf_len) {
432 *reason = "data offset too big in mbuf segment";
435 if (m->data_off + m->data_len > m->buf_len) {
436 *reason = "data length too big in mbuf segment";
440 pkt_len -= m->data_len;
441 } while ((m = m->next) != NULL);
444 *reason = "bad nb_segs";
448 *reason = "bad pkt_len";
456 * @internal helper function for freeing a bulk of packet mbuf segments
457 * via an array holding the packet mbuf segments from the same mempool
458 * pending to be freed.
461 * The packet mbuf segment to be freed.
463 * Pointer to the array of packet mbuf segments pending to be freed.
465 * Pointer to the number of elements held in the array.
467 * Number of elements the array can hold.
468 * Note: The compiler should optimize this parameter away when using a
469 * constant value, such as RTE_PKTMBUF_FREE_PENDING_SZ.
472 __rte_pktmbuf_free_seg_via_array(struct rte_mbuf *m,
473 struct rte_mbuf ** const pending, unsigned int * const nb_pending,
474 const unsigned int pending_sz)
476 m = rte_pktmbuf_prefree_seg(m);
477 if (likely(m != NULL)) {
478 if (*nb_pending == pending_sz ||
479 (*nb_pending > 0 && m->pool != pending[0]->pool)) {
480 rte_mempool_put_bulk(pending[0]->pool,
481 (void **)pending, *nb_pending);
485 pending[(*nb_pending)++] = m;
490 * Size of the array holding mbufs from the same mempool pending to be freed
493 #define RTE_PKTMBUF_FREE_PENDING_SZ 64
495 /* Free a bulk of packet mbufs back into their original mempools. */
496 void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
498 struct rte_mbuf *m, *m_next, *pending[RTE_PKTMBUF_FREE_PENDING_SZ];
499 unsigned int idx, nb_pending = 0;
501 for (idx = 0; idx < count; idx++) {
503 if (unlikely(m == NULL))
506 __rte_mbuf_sanity_check(m, 1);
510 __rte_pktmbuf_free_seg_via_array(m,
511 pending, &nb_pending,
512 RTE_PKTMBUF_FREE_PENDING_SZ);
518 rte_mempool_put_bulk(pending[0]->pool, (void **)pending, nb_pending);
521 /* Creates a shallow copy of mbuf */
523 rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
525 struct rte_mbuf *mc, *mi, **prev;
529 mc = rte_pktmbuf_alloc(mp);
530 if (unlikely(mc == NULL))
535 pktlen = md->pkt_len;
540 rte_pktmbuf_attach(mi, md);
543 } while ((md = md->next) != NULL &&
544 (mi = rte_pktmbuf_alloc(mp)) != NULL);
548 mc->pkt_len = pktlen;
550 /* Allocation of new indirect segment failed */
551 if (unlikely(mi == NULL)) {
552 rte_pktmbuf_free(mc);
556 __rte_mbuf_sanity_check(mc, 1);
560 /* convert multi-segment mbuf to single mbuf */
562 __rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
564 size_t seg_len, copy_len;
566 struct rte_mbuf *m_next;
569 /* Extend first segment to the total packet length */
570 copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
572 if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
575 buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
576 mbuf->data_len = (uint16_t)(mbuf->pkt_len);
578 /* Append data from next segments to the first one */
583 seg_len = rte_pktmbuf_data_len(m);
584 rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
587 rte_pktmbuf_free_seg(m);
597 /* Create a deep copy of mbuf */
599 rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp,
600 uint32_t off, uint32_t len)
602 const struct rte_mbuf *seg = m;
603 struct rte_mbuf *mc, *m_last, **prev;
605 /* garbage in check */
606 __rte_mbuf_sanity_check(m, 1);
608 /* check for request to copy at offset past end of mbuf */
609 if (unlikely(off >= m->pkt_len))
612 mc = rte_pktmbuf_alloc(mp);
613 if (unlikely(mc == NULL))
616 /* truncate requested length to available data */
617 if (len > m->pkt_len - off)
618 len = m->pkt_len - off;
620 __rte_pktmbuf_copy_hdr(mc, m);
622 /* copied mbuf is not indirect or external */
623 mc->ol_flags = m->ol_flags & ~(IND_ATTACHED_MBUF|EXT_ATTACHED_MBUF);
630 /* skip leading mbuf segments */
631 while (off >= seg->data_len) {
632 off -= seg->data_len;
636 /* current buffer is full, chain a new one */
637 if (rte_pktmbuf_tailroom(m_last) == 0) {
638 m_last = rte_pktmbuf_alloc(mp);
639 if (unlikely(m_last == NULL)) {
640 rte_pktmbuf_free(mc);
645 prev = &m_last->next;
649 * copy the min of data in input segment (seg)
650 * vs space available in output (m_last)
652 copy_len = RTE_MIN(seg->data_len - off, len);
653 if (copy_len > rte_pktmbuf_tailroom(m_last))
654 copy_len = rte_pktmbuf_tailroom(m_last);
656 /* append from seg to m_last */
657 rte_memcpy(rte_pktmbuf_mtod_offset(m_last, char *,
659 rte_pktmbuf_mtod_offset(seg, char *, off),
662 /* update offsets and lengths */
663 m_last->data_len += copy_len;
664 mc->pkt_len += copy_len;
669 /* garbage out check */
670 __rte_mbuf_sanity_check(mc, 1);
674 /* dump a mbuf on console */
676 rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
679 unsigned int nb_segs;
681 __rte_mbuf_sanity_check(m, 1);
683 fprintf(f, "dump mbuf at %p, iova=%#"PRIx64", buf_len=%u\n",
684 m, m->buf_iova, m->buf_len);
685 fprintf(f, " pkt_len=%u, ol_flags=%#"PRIx64", nb_segs=%u, port=%u",
686 m->pkt_len, m->ol_flags, m->nb_segs, m->port);
688 if (m->ol_flags & (PKT_RX_VLAN | PKT_TX_VLAN))
689 fprintf(f, ", vlan_tci=%u", m->vlan_tci);
691 fprintf(f, ", ptype=%#"PRIx32"\n", m->packet_type);
693 nb_segs = m->nb_segs;
695 while (m && nb_segs != 0) {
696 __rte_mbuf_sanity_check(m, 0);
698 fprintf(f, " segment at %p, data=%p, len=%u, off=%u, refcnt=%u\n",
699 m, rte_pktmbuf_mtod(m, void *),
700 m->data_len, m->data_off, rte_mbuf_refcnt_read(m));
703 if (len > m->data_len)
706 rte_hexdump(f, NULL, rte_pktmbuf_mtod(m, void *), len);
713 /* read len data bytes in a mbuf at specified offset (internal) */
714 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
715 uint32_t len, void *buf)
717 const struct rte_mbuf *seg = m;
718 uint32_t buf_off = 0, copy_len;
720 if (off + len > rte_pktmbuf_pkt_len(m))
723 while (off >= rte_pktmbuf_data_len(seg)) {
724 off -= rte_pktmbuf_data_len(seg);
728 if (off + len <= rte_pktmbuf_data_len(seg))
729 return rte_pktmbuf_mtod_offset(seg, char *, off);
731 /* rare case: header is split among several segments */
733 copy_len = rte_pktmbuf_data_len(seg) - off;
736 rte_memcpy((char *)buf + buf_off,
737 rte_pktmbuf_mtod_offset(seg, char *, off), copy_len);
748 * Get the name of a RX offload flag. Must be kept synchronized with flag
749 * definitions in rte_mbuf.h.
751 const char *rte_get_rx_ol_flag_name(uint64_t mask)
754 case PKT_RX_VLAN: return "PKT_RX_VLAN";
755 case PKT_RX_RSS_HASH: return "PKT_RX_RSS_HASH";
756 case PKT_RX_FDIR: return "PKT_RX_FDIR";
757 case PKT_RX_L4_CKSUM_BAD: return "PKT_RX_L4_CKSUM_BAD";
758 case PKT_RX_L4_CKSUM_GOOD: return "PKT_RX_L4_CKSUM_GOOD";
759 case PKT_RX_L4_CKSUM_NONE: return "PKT_RX_L4_CKSUM_NONE";
760 case PKT_RX_IP_CKSUM_BAD: return "PKT_RX_IP_CKSUM_BAD";
761 case PKT_RX_IP_CKSUM_GOOD: return "PKT_RX_IP_CKSUM_GOOD";
762 case PKT_RX_IP_CKSUM_NONE: return "PKT_RX_IP_CKSUM_NONE";
763 case PKT_RX_OUTER_IP_CKSUM_BAD: return "PKT_RX_OUTER_IP_CKSUM_BAD";
764 case PKT_RX_VLAN_STRIPPED: return "PKT_RX_VLAN_STRIPPED";
765 case PKT_RX_IEEE1588_PTP: return "PKT_RX_IEEE1588_PTP";
766 case PKT_RX_IEEE1588_TMST: return "PKT_RX_IEEE1588_TMST";
767 case PKT_RX_FDIR_ID: return "PKT_RX_FDIR_ID";
768 case PKT_RX_FDIR_FLX: return "PKT_RX_FDIR_FLX";
769 case PKT_RX_QINQ_STRIPPED: return "PKT_RX_QINQ_STRIPPED";
770 case PKT_RX_QINQ: return "PKT_RX_QINQ";
771 case PKT_RX_LRO: return "PKT_RX_LRO";
772 case PKT_RX_SEC_OFFLOAD: return "PKT_RX_SEC_OFFLOAD";
773 case PKT_RX_SEC_OFFLOAD_FAILED: return "PKT_RX_SEC_OFFLOAD_FAILED";
774 case PKT_RX_OUTER_L4_CKSUM_BAD: return "PKT_RX_OUTER_L4_CKSUM_BAD";
775 case PKT_RX_OUTER_L4_CKSUM_GOOD: return "PKT_RX_OUTER_L4_CKSUM_GOOD";
776 case PKT_RX_OUTER_L4_CKSUM_INVALID:
777 return "PKT_RX_OUTER_L4_CKSUM_INVALID";
779 default: return NULL;
786 const char *default_name;
789 /* write the list of rx ol flags in buffer buf */
791 rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
793 const struct flag_mask rx_flags[] = {
794 { PKT_RX_VLAN, PKT_RX_VLAN, NULL },
795 { PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, NULL },
796 { PKT_RX_FDIR, PKT_RX_FDIR, NULL },
797 { PKT_RX_L4_CKSUM_BAD, PKT_RX_L4_CKSUM_MASK, NULL },
798 { PKT_RX_L4_CKSUM_GOOD, PKT_RX_L4_CKSUM_MASK, NULL },
799 { PKT_RX_L4_CKSUM_NONE, PKT_RX_L4_CKSUM_MASK, NULL },
800 { PKT_RX_L4_CKSUM_UNKNOWN, PKT_RX_L4_CKSUM_MASK,
801 "PKT_RX_L4_CKSUM_UNKNOWN" },
802 { PKT_RX_IP_CKSUM_BAD, PKT_RX_IP_CKSUM_MASK, NULL },
803 { PKT_RX_IP_CKSUM_GOOD, PKT_RX_IP_CKSUM_MASK, NULL },
804 { PKT_RX_IP_CKSUM_NONE, PKT_RX_IP_CKSUM_MASK, NULL },
805 { PKT_RX_IP_CKSUM_UNKNOWN, PKT_RX_IP_CKSUM_MASK,
806 "PKT_RX_IP_CKSUM_UNKNOWN" },
807 { PKT_RX_OUTER_IP_CKSUM_BAD, PKT_RX_OUTER_IP_CKSUM_BAD, NULL },
808 { PKT_RX_VLAN_STRIPPED, PKT_RX_VLAN_STRIPPED, NULL },
809 { PKT_RX_IEEE1588_PTP, PKT_RX_IEEE1588_PTP, NULL },
810 { PKT_RX_IEEE1588_TMST, PKT_RX_IEEE1588_TMST, NULL },
811 { PKT_RX_FDIR_ID, PKT_RX_FDIR_ID, NULL },
812 { PKT_RX_FDIR_FLX, PKT_RX_FDIR_FLX, NULL },
813 { PKT_RX_QINQ_STRIPPED, PKT_RX_QINQ_STRIPPED, NULL },
814 { PKT_RX_LRO, PKT_RX_LRO, NULL },
815 { PKT_RX_SEC_OFFLOAD, PKT_RX_SEC_OFFLOAD, NULL },
816 { PKT_RX_SEC_OFFLOAD_FAILED, PKT_RX_SEC_OFFLOAD_FAILED, NULL },
817 { PKT_RX_QINQ, PKT_RX_QINQ, NULL },
818 { PKT_RX_OUTER_L4_CKSUM_BAD, PKT_RX_OUTER_L4_CKSUM_MASK, NULL },
819 { PKT_RX_OUTER_L4_CKSUM_GOOD, PKT_RX_OUTER_L4_CKSUM_MASK,
821 { PKT_RX_OUTER_L4_CKSUM_INVALID, PKT_RX_OUTER_L4_CKSUM_MASK,
823 { PKT_RX_OUTER_L4_CKSUM_UNKNOWN, PKT_RX_OUTER_L4_CKSUM_MASK,
824 "PKT_RX_OUTER_L4_CKSUM_UNKNOWN" },
834 for (i = 0; i < RTE_DIM(rx_flags); i++) {
835 if ((mask & rx_flags[i].mask) != rx_flags[i].flag)
837 name = rte_get_rx_ol_flag_name(rx_flags[i].flag);
839 name = rx_flags[i].default_name;
840 ret = snprintf(buf, buflen, "%s ", name);
843 if ((size_t)ret >= buflen)
853 * Get the name of a TX offload flag. Must be kept synchronized with flag
854 * definitions in rte_mbuf.h.
856 const char *rte_get_tx_ol_flag_name(uint64_t mask)
859 case PKT_TX_VLAN: return "PKT_TX_VLAN";
860 case PKT_TX_IP_CKSUM: return "PKT_TX_IP_CKSUM";
861 case PKT_TX_TCP_CKSUM: return "PKT_TX_TCP_CKSUM";
862 case PKT_TX_SCTP_CKSUM: return "PKT_TX_SCTP_CKSUM";
863 case PKT_TX_UDP_CKSUM: return "PKT_TX_UDP_CKSUM";
864 case PKT_TX_IEEE1588_TMST: return "PKT_TX_IEEE1588_TMST";
865 case PKT_TX_TCP_SEG: return "PKT_TX_TCP_SEG";
866 case PKT_TX_IPV4: return "PKT_TX_IPV4";
867 case PKT_TX_IPV6: return "PKT_TX_IPV6";
868 case PKT_TX_OUTER_IP_CKSUM: return "PKT_TX_OUTER_IP_CKSUM";
869 case PKT_TX_OUTER_IPV4: return "PKT_TX_OUTER_IPV4";
870 case PKT_TX_OUTER_IPV6: return "PKT_TX_OUTER_IPV6";
871 case PKT_TX_TUNNEL_VXLAN: return "PKT_TX_TUNNEL_VXLAN";
872 case PKT_TX_TUNNEL_GTP: return "PKT_TX_TUNNEL_GTP";
873 case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
874 case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
875 case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
876 case PKT_TX_TUNNEL_MPLSINUDP: return "PKT_TX_TUNNEL_MPLSINUDP";
877 case PKT_TX_TUNNEL_VXLAN_GPE: return "PKT_TX_TUNNEL_VXLAN_GPE";
878 case PKT_TX_TUNNEL_IP: return "PKT_TX_TUNNEL_IP";
879 case PKT_TX_TUNNEL_UDP: return "PKT_TX_TUNNEL_UDP";
880 case PKT_TX_QINQ: return "PKT_TX_QINQ";
881 case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
882 case PKT_TX_SEC_OFFLOAD: return "PKT_TX_SEC_OFFLOAD";
883 case PKT_TX_UDP_SEG: return "PKT_TX_UDP_SEG";
884 case PKT_TX_OUTER_UDP_CKSUM: return "PKT_TX_OUTER_UDP_CKSUM";
885 default: return NULL;
889 /* write the list of tx ol flags in buffer buf */
891 rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
893 const struct flag_mask tx_flags[] = {
894 { PKT_TX_VLAN, PKT_TX_VLAN, NULL },
895 { PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM, NULL },
896 { PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK, NULL },
897 { PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK, NULL },
898 { PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK, NULL },
899 { PKT_TX_L4_NO_CKSUM, PKT_TX_L4_MASK, "PKT_TX_L4_NO_CKSUM" },
900 { PKT_TX_IEEE1588_TMST, PKT_TX_IEEE1588_TMST, NULL },
901 { PKT_TX_TCP_SEG, PKT_TX_TCP_SEG, NULL },
902 { PKT_TX_IPV4, PKT_TX_IPV4, NULL },
903 { PKT_TX_IPV6, PKT_TX_IPV6, NULL },
904 { PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM, NULL },
905 { PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4, NULL },
906 { PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6, NULL },
907 { PKT_TX_TUNNEL_VXLAN, PKT_TX_TUNNEL_MASK, NULL },
908 { PKT_TX_TUNNEL_GTP, PKT_TX_TUNNEL_MASK, NULL },
909 { PKT_TX_TUNNEL_GRE, PKT_TX_TUNNEL_MASK, NULL },
910 { PKT_TX_TUNNEL_IPIP, PKT_TX_TUNNEL_MASK, NULL },
911 { PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK, NULL },
912 { PKT_TX_TUNNEL_MPLSINUDP, PKT_TX_TUNNEL_MASK, NULL },
913 { PKT_TX_TUNNEL_VXLAN_GPE, PKT_TX_TUNNEL_MASK, NULL },
914 { PKT_TX_TUNNEL_IP, PKT_TX_TUNNEL_MASK, NULL },
915 { PKT_TX_TUNNEL_UDP, PKT_TX_TUNNEL_MASK, NULL },
916 { PKT_TX_QINQ, PKT_TX_QINQ, NULL },
917 { PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
918 { PKT_TX_SEC_OFFLOAD, PKT_TX_SEC_OFFLOAD, NULL },
919 { PKT_TX_UDP_SEG, PKT_TX_UDP_SEG, NULL },
920 { PKT_TX_OUTER_UDP_CKSUM, PKT_TX_OUTER_UDP_CKSUM, NULL },
930 for (i = 0; i < RTE_DIM(tx_flags); i++) {
931 if ((mask & tx_flags[i].mask) != tx_flags[i].flag)
933 name = rte_get_tx_ol_flag_name(tx_flags[i].flag);
935 name = tx_flags[i].default_name;
936 ret = snprintf(buf, buflen, "%s ", name);
939 if ((size_t)ret >= buflen)