+ if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) {
+ m->packet_type |= RTE_PTYPE_L4_NONFRAG;
+
+ /* If IPv4 header has DF flag enabled and TSO support is
+ * disabled, partial chcecksum should not be calculated.
+ */
+ if (!tx_ring->adapter->offloads.tso4_supported)
+ continue;
+ }
+
+ if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return i;
+ }
+#endif
+
+ /* In case we are supposed to TSO and have DF not set (DF=0)
+ * hardware must be provided with partial checksum, otherwise
+ * it will take care of necessary calculations.
+ */
+
+ ret = rte_net_intel_cksum_flags_prepare(m,
+ ol_flags & ~PKT_TX_TCP_SEG);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+static void ena_update_hints(struct ena_adapter *adapter,
+ struct ena_admin_ena_hw_hints *hints)
+{
+ if (hints->admin_completion_tx_timeout)
+ adapter->ena_dev.admin_queue.completion_timeout =
+ hints->admin_completion_tx_timeout * 1000;
+
+ if (hints->mmio_read_timeout)
+ /* convert to usec */
+ adapter->ena_dev.mmio_read.reg_read_to =
+ hints->mmio_read_timeout * 1000;
+
+ if (hints->driver_watchdog_timeout) {
+ if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
+ else
+ // Convert msecs to ticks
+ adapter->keep_alive_timeout =
+ (hints->driver_watchdog_timeout *
+ rte_get_timer_hz()) / 1000;
+ }
+}
+
+static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
+ struct rte_mbuf *mbuf)
+{
+ struct ena_com_dev *ena_dev;
+ int num_segments, header_len, rc;
+
+ ena_dev = &tx_ring->adapter->ena_dev;
+ num_segments = mbuf->nb_segs;
+ header_len = mbuf->data_len;
+
+ if (likely(num_segments < tx_ring->sgl_size))
+ return 0;
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+ (num_segments == tx_ring->sgl_size) &&
+ (header_len < tx_ring->tx_max_header_size))
+ return 0;
+
+ ++tx_ring->tx_stats.linearize;
+ rc = rte_pktmbuf_linearize(mbuf);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n");
+ rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
+ ++tx_ring->tx_stats.linearize_failed;
+ return rc;
+ }
+
+ return rc;
+}
+
+static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info,
+ struct rte_mbuf *mbuf,
+ void **push_header,
+ uint16_t *header_len)
+{
+ struct ena_com_buf *ena_buf;
+ uint16_t delta, seg_len, push_len;
+
+ delta = 0;
+ seg_len = mbuf->data_len;
+
+ tx_info->mbuf = mbuf;
+ ena_buf = tx_info->bufs;
+
+ if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /*
+ * Tx header might be (and will be in most cases) smaller than
+ * tx_max_header_size. But it's not an issue to send more data
+ * to the device, than actually needed if the mbuf size is
+ * greater than tx_max_header_size.
+ */
+ push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size);
+ *header_len = push_len;
+
+ if (likely(push_len <= seg_len)) {
+ /* If the push header is in the single segment, then
+ * just point it to the 1st mbuf data.
+ */
+ *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *);
+ } else {
+ /* If the push header lays in the several segments, copy
+ * it to the intermediate buffer.
+ */
+ rte_pktmbuf_read(mbuf, 0, push_len,
+ tx_ring->push_buf_intermediate_buf);
+ *push_header = tx_ring->push_buf_intermediate_buf;
+ delta = push_len - seg_len;
+ }
+ } else {
+ *push_header = NULL;
+ *header_len = 0;
+ push_len = 0;
+ }
+
+ /* Process first segment taking into consideration pushed header */
+ if (seg_len > push_len) {
+ ena_buf->paddr = mbuf->buf_iova +
+ mbuf->data_off +
+ push_len;
+ ena_buf->len = seg_len - push_len;
+ ena_buf++;
+ tx_info->num_of_bufs++;
+ }
+
+ while ((mbuf = mbuf->next) != NULL) {
+ seg_len = mbuf->data_len;
+
+ /* Skip mbufs if whole data is pushed as a header */
+ if (unlikely(delta > seg_len)) {
+ delta -= seg_len;
+ continue;
+ }
+
+ ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
+ ena_buf->len = seg_len - delta;
+ ena_buf++;
+ tx_info->num_of_bufs++;
+
+ delta = 0;
+ }
+}
+
+static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
+{
+ struct ena_tx_buffer *tx_info;
+ struct ena_com_tx_ctx ena_tx_ctx = { { 0 } };
+ uint16_t next_to_use;
+ uint16_t header_len;
+ uint16_t req_id;
+ void *push_header;
+ int nb_hw_desc;
+ int rc;
+
+ rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
+ if (unlikely(rc))
+ return rc;
+
+ next_to_use = tx_ring->next_to_use;
+
+ req_id = tx_ring->empty_tx_reqs[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+
+ ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len);
+
+ ena_tx_ctx.ena_bufs = tx_info->bufs;
+ ena_tx_ctx.push_header = push_header;
+ ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+ ena_tx_ctx.req_id = req_id;
+ ena_tx_ctx.header_len = header_len;
+
+ /* Set Tx offloads flags, if applicable */
+ ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
+ tx_ring->disable_meta_caching);
+
+ if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
+ &ena_tx_ctx))) {
+ PMD_DRV_LOG(DEBUG,
+ "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
+ tx_ring->id);
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ }
+
+ /* prepare the packet's descriptors to dma engine */
+ rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
+ &nb_hw_desc);
+ if (unlikely(rc)) {
+ ++tx_ring->tx_stats.prepare_ctx_err;
+ return rc;
+ }
+
+ tx_info->tx_descs = nb_hw_desc;
+
+ tx_ring->tx_stats.cnt++;
+ tx_ring->tx_stats.bytes += mbuf->pkt_len;
+
+ tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
+ tx_ring->size_mask);
+
+ return 0;
+}
+
+static void ena_tx_cleanup(struct ena_ring *tx_ring)
+{
+ unsigned int cleanup_budget;
+ unsigned int total_tx_descs = 0;
+ uint16_t next_to_clean = tx_ring->next_to_clean;
+
+ cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
+ (unsigned int)ENA_REFILL_THRESH_PACKET);
+
+ while (likely(total_tx_descs < cleanup_budget)) {
+ struct rte_mbuf *mbuf;
+ struct ena_tx_buffer *tx_info;
+ uint16_t req_id;
+
+ if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0)
+ break;
+
+ if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0))
+ break;
+
+ /* Get Tx info & store how many descs were processed */
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+
+ mbuf = tx_info->mbuf;
+ rte_pktmbuf_free(mbuf);
+
+ tx_info->mbuf = NULL;
+ tx_ring->empty_tx_reqs[next_to_clean] = req_id;
+
+ total_tx_descs += tx_info->tx_descs;
+
+ /* Put back descriptor to the ring for reuse */
+ next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean,
+ tx_ring->size_mask);
+ }
+
+ if (likely(total_tx_descs > 0)) {
+ /* acknowledge completion of sent packets */
+ tx_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
+ ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
+ }