With the API and ABI freeze ahead, it will be good to reserve
some bits on the private structure for future use.
Otherwise we will potentially need to maintain two different
private structure during 2020 period.
There is already one use case for those reserved bits[1]
The reserved field should be set to 0 by the user.
[1] https://patches.dpdk.org/patch/63077/
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
The lock-free stack implementation is enabled for aarch64 platforms.
The lock-free stack implementation is enabled for aarch64 platforms.
+* **Extended pktmbuf mempool private structure.**
+
+ rte_pktmbuf_pool_private structure was extended to include flags field
+ for future compatibility.
+ As per 19.11 release this field is reserved and should be set to 0
+ by the user.
+
* **Changed mempool allocation behaviour.**
Objects are no longer across pages by default.
* **Changed mempool allocation behaviour.**
Objects are no longer across pages by default.
if (mp == NULL)
return NULL;
if (mp == NULL)
return NULL;
+ memset(&mbp_priv, 0, sizeof(mbp_priv));
mbp_priv.mbuf_data_room_size = mbuf_seg_size;
mbp_priv.mbuf_priv_size = 0;
rte_pktmbuf_pool_init(mp, &mbp_priv);
mbp_priv.mbuf_data_room_size = mbuf_seg_size;
mbp_priv.mbuf_priv_size = 0;
rte_pktmbuf_pool_init(mp, &mbp_priv);
/* if no structure is provided, assume no mbuf private area */
user_mbp_priv = opaque_arg;
if (user_mbp_priv == NULL) {
/* if no structure is provided, assume no mbuf private area */
user_mbp_priv = opaque_arg;
if (user_mbp_priv == NULL) {
- default_mbp_priv.mbuf_priv_size = 0;
+ memset(&default_mbp_priv, 0, sizeof(default_mbp_priv));
if (mp->elt_size > sizeof(struct rte_mbuf))
roomsz = mp->elt_size - sizeof(struct rte_mbuf);
else
if (mp->elt_size > sizeof(struct rte_mbuf))
roomsz = mp->elt_size - sizeof(struct rte_mbuf);
else
RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
user_mbp_priv->mbuf_data_room_size +
user_mbp_priv->mbuf_priv_size);
RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
user_mbp_priv->mbuf_data_room_size +
user_mbp_priv->mbuf_priv_size);
+ RTE_ASSERT(user_mbp_priv->flags == 0);
mbp_priv = rte_mempool_get_priv(mp);
memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv));
mbp_priv = rte_mempool_get_priv(mp);
memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv));
}
elt_size = sizeof(struct rte_mbuf) + (unsigned)priv_size +
(unsigned)data_room_size;
}
elt_size = sizeof(struct rte_mbuf) + (unsigned)priv_size +
(unsigned)data_room_size;
+ memset(&mbp_priv, 0, sizeof(mbp_priv));
mbp_priv.mbuf_data_room_size = data_room_size;
mbp_priv.mbuf_priv_size = priv_size;
mbp_priv.mbuf_data_room_size = data_room_size;
mbp_priv.mbuf_priv_size = priv_size;
struct rte_pktmbuf_pool_private {
uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf. */
uint16_t mbuf_priv_size; /**< Size of private area in each mbuf. */
struct rte_pktmbuf_pool_private {
uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf. */
uint16_t mbuf_priv_size; /**< Size of private area in each mbuf. */
+ uint32_t flags; /**< reserved for future use. */
};
#ifdef RTE_LIBRTE_MBUF_DEBUG
};
#ifdef RTE_LIBRTE_MBUF_DEBUG