-txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
- uint16_t pkts_n, uint8_t *cs_flags)
+txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags)
if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
n = txq_count_contig_single_seg(&pkts[nb_tx], n);
if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
n = txq_count_contig_single_seg(&pkts[nb_tx], n);
if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
- n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
+ n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags);
ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
nb_tx += ret;
if (!ret)
ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
nb_tx += ret;
if (!ret)
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
{
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
{
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
if (!rxq)
continue;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
if (!rxq)
continue;