X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_tx.c;h=bee0beb94722927f8ea325123bdd403a74efc294;hb=af397b3c93f82b0803c0890874d7ee3b5127522d;hp=4b1f94ce8d87b6d69515eba90bd9d054ae2005f5;hpb=ac30699e9cfbb58faeca82ac6d122baf2d61b480;p=dpdk.git diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index 4b1f94ce8d..bee0beb947 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2016-2018 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2021 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. @@ -34,43 +34,57 @@ */ #define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000) -uint64_t -sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa) +struct sfc_txq_info * +sfc_txq_info_by_ethdev_qid(struct sfc_adapter_shared *sas, + sfc_ethdev_qid_t ethdev_qid) { - const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); - uint64_t caps = 0; - - if ((sa->priv.dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) && - encp->enc_hw_tx_insert_vlan_enabled) - caps |= DEV_TX_OFFLOAD_VLAN_INSERT; + sfc_sw_index_t sw_index; - if (sa->priv.dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG) - caps |= DEV_TX_OFFLOAD_MULTI_SEGS; + SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_txq_count); + SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID); - if ((~sa->priv.dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) && - (~sa->priv.dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) - caps |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; - - return caps; + sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid); + return &sas->txq_info[sw_index]; } -uint64_t -sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa) +static uint64_t +sfc_tx_get_offload_mask(struct sfc_adapter *sa) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); - uint64_t caps = 0; + uint64_t no_caps = 0; + + if (!encp->enc_hw_tx_insert_vlan_enabled) + no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; - caps |= DEV_TX_OFFLOAD_IPV4_CKSUM; - caps |= DEV_TX_OFFLOAD_UDP_CKSUM; - caps |= DEV_TX_OFFLOAD_TCP_CKSUM; + if (!encp->enc_tunnel_encapsulations_supported) + no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM; - if (encp->enc_tunnel_encapsulations_supported) - caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (!sa->tso) + no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO; - if (sa->tso) - caps |= DEV_TX_OFFLOAD_TCP_TSO; + if (!sa->tso_encap || + (encp->enc_tunnel_encapsulations_supported & + (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0) + no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO; - return caps; + if (!sa->tso_encap || + (encp->enc_tunnel_encapsulations_supported & + (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0) + no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO; + + return ~no_caps; +} + +uint64_t +sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa) +{ + return sa->priv.dp_tx->dev_offload_capa & sfc_tx_get_offload_mask(sa); +} + +uint64_t +sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa) +{ + return sa->priv.dp_tx->queue_offload_capa & sfc_tx_get_offload_mask(sa); } static int @@ -100,8 +114,8 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level, } /* We either perform both TCP and UDP offload, or no offload at all */ - if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) != - ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) { + if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) != + ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) { sfc_err(sa, "TCP and UDP offloads can't be set independently"); rc = EINVAL; } @@ -117,10 +131,12 @@ sfc_tx_qflush_done(struct sfc_txq_info *txq_info) } int -sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, +sfc_tx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); unsigned int txq_entries; unsigned int evq_entries; @@ -133,7 +149,9 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, uint64_t offloads; struct sfc_dp_tx_hw_limits hw_limits; - sfc_log_init(sa, "TxQ = %u", sw_index); + ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index); + + sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index); memset(&hw_limits, 0, sizeof(hw_limits)); hw_limits.txq_max_entries = sa->txq_max_entries; @@ -149,8 +167,11 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(txq_entries >= nb_tx_desc); SFC_ASSERT(txq_max_fill_level <= nb_tx_desc); - offloads = tx_conf->offloads | - sa->eth_dev->data->dev_conf.txmode.offloads; + offloads = tx_conf->offloads; + /* Add device level Tx offloads if the queue is an ethdev Tx queue */ + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) + offloads |= sa->eth_dev->data->dev_conf.txmode.offloads; + rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads); if (rc != 0) goto fail_bad_conf; @@ -193,6 +214,17 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.vi_window_shift = encp->enc_vi_window_shift; info.tso_tcp_header_offset_limit = encp->enc_tx_tso_tcp_header_offset_limit; + info.tso_max_nb_header_descs = + RTE_MIN(encp->enc_tx_tso_max_header_ndescs, + (uint32_t)UINT16_MAX); + info.tso_max_header_len = + RTE_MIN(encp->enc_tx_tso_max_header_length, + (uint32_t)UINT16_MAX); + info.tso_max_nb_payload_descs = + RTE_MIN(encp->enc_tx_tso_max_payload_ndescs, + (uint32_t)UINT16_MAX); + info.tso_max_payload_len = encp->enc_tx_tso_max_payload_length; + info.tso_max_nb_outgoing_frames = encp->enc_tx_tso_max_nframes; rc = sa->priv.dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index, &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, @@ -219,20 +251,26 @@ fail_ev_qinit: fail_bad_conf: fail_size_up_rings: - sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc); + sfc_log_init(sa, "failed (TxQ = %d (internal %u), rc = %d)", ethdev_qid, + sw_index, rc); return rc; } void -sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index) +sfc_tx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; struct sfc_txq_info *txq_info; struct sfc_txq *txq; - sfc_log_init(sa, "TxQ = %u", sw_index); + ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index); + + sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index); SFC_ASSERT(sw_index < sfc_sa2shared(sa)->txq_count); - sa->eth_dev->data->tx_queues[sw_index] = NULL; + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) + sa->eth_dev->data->tx_queues[ethdev_qid] = NULL; txq_info = &sfc_sa2shared(sa)->txq_info[sw_index]; @@ -252,10 +290,15 @@ sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index) txq->evq = NULL; } -static int -sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) +int +sfc_tx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { - sfc_log_init(sa, "TxQ = %u", sw_index); + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; + + ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index); + + sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index); return 0; } @@ -266,7 +309,7 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode) int rc = 0; switch (txmode->mq_mode) { - case ETH_MQ_TX_NONE: + case RTE_ETH_MQ_TX_NONE: break; default: sfc_err(sa, "Tx multi-queue mode %u not supported", @@ -304,17 +347,26 @@ static void sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); - int sw_index; + sfc_sw_index_t sw_index; + sfc_ethdev_qid_t ethdev_qid; - SFC_ASSERT(nb_tx_queues <= sas->txq_count); + SFC_ASSERT(nb_tx_queues <= sas->ethdev_txq_count); - sw_index = sas->txq_count; - while (--sw_index >= (int)nb_tx_queues) { - if (sas->txq_info[sw_index].state & SFC_TXQ_INITIALIZED) + /* + * Finalize only ethdev queues since other ones are finalized only + * on device close and they may require additional deinitializaton. + */ + ethdev_qid = sas->ethdev_txq_count; + while (--ethdev_qid >= (int)nb_tx_queues) { + struct sfc_txq_info *txq_info; + + sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid); + txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid); + if (txq_info->state & SFC_TXQ_INITIALIZED) sfc_tx_qfini(sa, sw_index); } - sas->txq_count = nb_tx_queues; + sas->ethdev_txq_count = nb_tx_queues; } int @@ -324,10 +376,13 @@ sfc_tx_configure(struct sfc_adapter *sa) const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues; + const unsigned int nb_rsvd_tx_queues = sfc_nb_txq_reserved(sas); + const unsigned int nb_txq_total = nb_tx_queues + nb_rsvd_tx_queues; + bool reconfigure; int rc = 0; sfc_log_init(sa, "nb_tx_queues=%u (old %u)", - nb_tx_queues, sas->txq_count); + nb_tx_queues, sas->ethdev_txq_count); /* * The datapath implementation assumes absence of boundary @@ -343,11 +398,12 @@ sfc_tx_configure(struct sfc_adapter *sa) if (rc != 0) goto fail_check_mode; - if (nb_tx_queues == sas->txq_count) + if (nb_txq_total == sas->txq_count) goto done; if (sas->txq_info == NULL) { - sas->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues, + reconfigure = false; + sas->txq_info = rte_calloc_socket("sfc-txqs", nb_txq_total, sizeof(sas->txq_info[0]), 0, sa->socket_id); if (sas->txq_info == NULL) @@ -358,50 +414,65 @@ sfc_tx_configure(struct sfc_adapter *sa) * since it should not be shared. */ rc = ENOMEM; - sa->txq_ctrl = calloc(nb_tx_queues, sizeof(sa->txq_ctrl[0])); + sa->txq_ctrl = calloc(nb_txq_total, sizeof(sa->txq_ctrl[0])); if (sa->txq_ctrl == NULL) goto fail_txqs_ctrl_alloc; } else { struct sfc_txq_info *new_txq_info; struct sfc_txq *new_txq_ctrl; - if (nb_tx_queues < sas->txq_count) + reconfigure = true; + + if (nb_tx_queues < sas->ethdev_txq_count) sfc_tx_fini_queues(sa, nb_tx_queues); new_txq_info = rte_realloc(sas->txq_info, - nb_tx_queues * sizeof(sas->txq_info[0]), 0); - if (new_txq_info == NULL && nb_tx_queues > 0) + nb_txq_total * sizeof(sas->txq_info[0]), 0); + if (new_txq_info == NULL && nb_txq_total > 0) goto fail_txqs_realloc; new_txq_ctrl = realloc(sa->txq_ctrl, - nb_tx_queues * sizeof(sa->txq_ctrl[0])); - if (new_txq_ctrl == NULL && nb_tx_queues > 0) + nb_txq_total * sizeof(sa->txq_ctrl[0])); + if (new_txq_ctrl == NULL && nb_txq_total > 0) goto fail_txqs_ctrl_realloc; sas->txq_info = new_txq_info; sa->txq_ctrl = new_txq_ctrl; - if (nb_tx_queues > sas->txq_count) { + if (nb_txq_total > sas->txq_count) { memset(&sas->txq_info[sas->txq_count], 0, - (nb_tx_queues - sas->txq_count) * + (nb_txq_total - sas->txq_count) * sizeof(sas->txq_info[0])); memset(&sa->txq_ctrl[sas->txq_count], 0, - (nb_tx_queues - sas->txq_count) * + (nb_txq_total - sas->txq_count) * sizeof(sa->txq_ctrl[0])); } } - while (sas->txq_count < nb_tx_queues) { - rc = sfc_tx_qinit_info(sa, sas->txq_count); + while (sas->ethdev_txq_count < nb_tx_queues) { + sfc_sw_index_t sw_index; + + sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, + sas->ethdev_txq_count); + rc = sfc_tx_qinit_info(sa, sw_index); if (rc != 0) goto fail_tx_qinit_info; - sas->txq_count++; + sas->ethdev_txq_count++; + } + + sas->txq_count = sas->ethdev_txq_count + nb_rsvd_tx_queues; + + if (!reconfigure) { + rc = sfc_repr_proxy_txq_init(sa); + if (rc != 0) + goto fail_repr_proxy_txq_init; } done: return 0; +fail_repr_proxy_txq_init: fail_tx_qinit_info: fail_txqs_ctrl_realloc: fail_txqs_realloc: @@ -419,6 +490,7 @@ void sfc_tx_close(struct sfc_adapter *sa) { sfc_tx_fini_queues(sa, 0); + sfc_repr_proxy_txq_fini(sa); free(sa->txq_ctrl); sa->txq_ctrl = NULL; @@ -428,12 +500,12 @@ sfc_tx_close(struct sfc_adapter *sa) } int -sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) +sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) | sfc_tx_get_queue_offload_caps(sa); - struct rte_eth_dev_data *dev_data; struct sfc_txq_info *txq_info; struct sfc_txq *txq; struct sfc_evq *evq; @@ -441,7 +513,9 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) unsigned int desc_index; int rc = 0; - sfc_log_init(sa, "TxQ = %u", sw_index); + ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index); + + sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index); SFC_ASSERT(sw_index < sas->txq_count); txq_info = &sas->txq_info[sw_index]; @@ -451,25 +525,27 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) txq = &sa->txq_ctrl[sw_index]; evq = txq->evq; - rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index)); + rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_txq_sw_index(sa, sw_index)); if (rc != 0) goto fail_ev_qstart; - if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) flags |= EFX_TXQ_CKSUM_IPV4; - if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) flags |= EFX_TXQ_CKSUM_INNER_IPV4; - if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || - (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { + if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) || + (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { flags |= EFX_TXQ_CKSUM_TCPUDP; - if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) flags |= EFX_TXQ_CKSUM_INNER_TCPUDP; } - if (txq_info->offloads & DEV_TX_OFFLOAD_TCP_TSO) + if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) flags |= EFX_TXQ_FATSOV2; rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem, @@ -491,11 +567,17 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) if (rc != 0) goto fail_dp_qstart; - /* - * It seems to be used by DPDK for debug purposes only ('rte_ether') - */ - dev_data = sa->eth_dev->data; - dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED; + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) { + struct rte_eth_dev_data *dev_data; + + /* + * It sems to be used by DPDK for debug purposes only + * ('rte_ether'). + */ + dev_data = sa->eth_dev->data; + dev_data->tx_queue_state[ethdev_qid] = + RTE_ETH_QUEUE_STATE_STARTED; + } return 0; @@ -511,17 +593,19 @@ fail_ev_qstart: } void -sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) +sfc_tx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); - struct rte_eth_dev_data *dev_data; + sfc_ethdev_qid_t ethdev_qid; struct sfc_txq_info *txq_info; struct sfc_txq *txq; unsigned int retry_count; unsigned int wait_count; int rc; - sfc_log_init(sa, "TxQ = %u", sw_index); + ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index); + + sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index); SFC_ASSERT(sw_index < sas->txq_count); txq_info = &sas->txq_info[sw_index]; @@ -563,10 +647,12 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS); if (txq_info->state & SFC_TXQ_FLUSHING) - sfc_err(sa, "TxQ %u flush timed out", sw_index); + sfc_err(sa, "TxQ %d (internal %u) flush timed out", + ethdev_qid, sw_index); if (txq_info->state & SFC_TXQ_FLUSHED) - sfc_notice(sa, "TxQ %u flushed", sw_index); + sfc_notice(sa, "TxQ %d (internal %u) flushed", + ethdev_qid, sw_index); } sa->priv.dp_tx->qreap(txq_info->dp); @@ -577,29 +663,45 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) sfc_ev_qstop(txq->evq); - /* - * It seems to be used by DPDK for debug purposes only ('rte_ether') - */ - dev_data = sa->eth_dev->data; - dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED; + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) { + struct rte_eth_dev_data *dev_data; + + /* + * It seems to be used by DPDK for debug purposes only + * ('rte_ether') + */ + dev_data = sa->eth_dev->data; + dev_data->tx_queue_state[ethdev_qid] = + RTE_ETH_QUEUE_STATE_STOPPED; + } } int sfc_tx_start(struct sfc_adapter *sa) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); - unsigned int sw_index; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + sfc_sw_index_t sw_index; int rc = 0; - sfc_log_init(sa, "txq_count = %u", sas->txq_count); + sfc_log_init(sa, "txq_count = %u (internal %u)", + sas->ethdev_txq_count, sas->txq_count); if (sa->tso) { - if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) { + if (!encp->enc_fw_assisted_tso_v2_enabled && + !encp->enc_tso_v3_enabled) { sfc_warn(sa, "TSO support was unable to be restored"); sa->tso = B_FALSE; + sa->tso_encap = B_FALSE; } } + if (sa->tso_encap && !encp->enc_fw_assisted_tso_v2_encap_enabled && + !encp->enc_tso_v3_enabled) { + sfc_warn(sa, "Encapsulated TSO support was unable to be restored"); + sa->tso_encap = B_FALSE; + } + rc = efx_tx_init(sa->nic); if (rc != 0) goto fail_efx_tx_init; @@ -631,9 +733,10 @@ void sfc_tx_stop(struct sfc_adapter *sa) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); - unsigned int sw_index; + sfc_sw_index_t sw_index; - sfc_log_init(sa, "txq_count = %u", sas->txq_count); + sfc_log_init(sa, "txq_count = %u (internal %u)", + sas->ethdev_txq_count, sas->txq_count); sw_index = sas->txq_count; while (sw_index-- > 0) { @@ -677,7 +780,7 @@ static unsigned int sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m, efx_desc_t **pend) { - uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ? + uint16_t this_tag = ((m->ol_flags & RTE_MBUF_F_TX_VLAN) ? m->vlan_tci : 0); if (this_tag == txq->hw_vlan_tci) @@ -697,6 +800,36 @@ sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m, return 1; } +static uint16_t +sfc_efx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct sfc_dp_txq *dp_txq = tx_queue; + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic); + uint16_t i; + + for (i = 0; i < nb_pkts; i++) { + int ret; + + /* + * EFX Tx datapath may require extra VLAN descriptor if VLAN + * insertion offload is requested regardless the offload + * requested/supported. + */ + ret = sfc_dp_tx_prepare_pkt(tx_pkts[i], 0, SFC_TSOH_STD_LEN, + encp->enc_tx_tso_tcp_header_offset_limit, + txq->max_fill_level, EFX_TX_FATSOV2_OPT_NDESCS, + 1); + if (unlikely(ret != 0)) { + rte_errno = ret; + break; + } + } + + return i; +} + static uint16_t sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -743,28 +876,24 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* * Here VLAN TCI is expected to be zero in case if no - * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised; + * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised; * if the calling app ignores the absence of - * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then + * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then * TX_ERROR will occur */ pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend); - if (m_seg->ol_flags & PKT_TX_TCP_SEG) { + if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { /* * We expect correct 'pkt->l[2, 3, 4]_len' values * to be set correctly by the caller */ if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend, &pkt_descs, &pkt_len) != 0) { - /* We may have reached this place for - * one of the following reasons: - * - * 1) Packet header linearization is needed - * and the header length is greater - * than SFC_TSOH_STD_LEN - * 2) TCP header starts at more then - * 208 bytes into the frame + /* We may have reached this place if packet + * header linearization is needed but the + * header length is greater than + * SFC_TSOH_STD_LEN * * We will deceive RTE saying that we have sent * the packet, but we will actually drop it. @@ -865,8 +994,10 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txq->completed, &txq->added); SFC_ASSERT(rc == 0); - if (likely(pushed != txq->added)) + if (likely(pushed != txq->added)) { efx_tx_qpush(txq->common, txq->added, pushed); + txq->dp.dpq.tx_dbells++; + } } #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE @@ -1108,13 +1239,16 @@ struct sfc_dp_tx sfc_efx_tx = { .dp = { .name = SFC_KVARG_DATAPATH_EFX, .type = SFC_DP_TX, - .hw_fw_caps = 0, + .hw_fw_caps = SFC_DP_HW_FW_CAP_TX_EFX, }, - .features = SFC_DP_TX_FEAT_VLAN_INSERT | - SFC_DP_TX_FEAT_TSO | - SFC_DP_TX_FEAT_MULTI_POOL | - SFC_DP_TX_FEAT_REFCNT | - SFC_DP_TX_FEAT_MULTI_SEG, + .features = 0, + .dev_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS, + .queue_offload_capa = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_TSO, .qsize_up_rings = sfc_efx_tx_qsize_up_rings, .qcreate = sfc_efx_tx_qcreate, .qdestroy = sfc_efx_tx_qdestroy, @@ -1122,5 +1256,6 @@ struct sfc_dp_tx sfc_efx_tx = { .qstop = sfc_efx_tx_qstop, .qreap = sfc_efx_tx_qreap, .qdesc_status = sfc_efx_tx_qdesc_status, + .pkt_prepare = sfc_efx_prepare_pkts, .pkt_burst = sfc_efx_xmit_pkts, };