+ /*
+ * For the buffers beyond descriptions offset is zero,
+ * the first buffer contains head room.
+ */
+ buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
+ offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
+ (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
+ /*
+ * For the buffers beyond descriptions the length is
+ * pool buffer length, zero lengths are replaced with
+ * pool buffer length either.
+ */
+ seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
+ qs_seg->length ?
+ qs_seg->length :
+ (buf_len - offset);
+ /* Check is done in long int, now overflows. */
+ if (buf_len < seg_len + offset) {
+ DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
+ "%u/%u can't be satisfied",
+ dev->data->port_id, idx,
+ qs_seg->length, qs_seg->offset);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ if (seg_len > tail_len)
+ seg_len = buf_len - offset;
+ if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
+ DRV_LOG(ERR,
+ "port %u too many SGEs (%u) needed to handle"
+ " requested maximum packet size %u, the maximum"
+ " supported are %u", dev->data->port_id,
+ tmpl->rxq.rxseg_n, max_rx_pkt_len,
+ MLX5_MAX_RXQ_NSEG);
+ rte_errno = ENOTSUP;
+ goto error;
+ }
+ /* Build the actual scattering element in the queue object. */
+ hw_seg->mp = qs_seg->mp;
+ MLX5_ASSERT(offset <= UINT16_MAX);
+ MLX5_ASSERT(seg_len <= UINT16_MAX);
+ hw_seg->offset = (uint16_t)offset;
+ hw_seg->length = (uint16_t)seg_len;
+ /*
+ * Advance the segment descriptor, the padding is the based
+ * on the attributes of the last descriptor.
+ */
+ if (tmpl->rxq.rxseg_n < n_seg)
+ qs_seg++;
+ tail_len -= RTE_MIN(tail_len, seg_len);
+ } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
+ MLX5_ASSERT(tmpl->rxq.rxseg_n &&
+ tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
+ if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {