/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright(c) 2019-2020 Xilinx, Inc.
+ * Copyright(c) 2019-2021 Xilinx, Inc.
* Copyright(c) 2016-2019 Solarflare Communications Inc.
*
* This software was jointly developed between OKTET Labs (under contract
*/
#define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
+struct sfc_txq_info *
+sfc_txq_info_by_ethdev_qid(struct sfc_adapter_shared *sas,
+ sfc_ethdev_qid_t ethdev_qid)
+{
+ sfc_sw_index_t sw_index;
+
+ SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_txq_count);
+ SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
+
+ sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
+ return &sas->txq_info[sw_index];
+}
+
static uint64_t
sfc_tx_get_offload_mask(struct sfc_adapter *sa)
{
uint64_t no_caps = 0;
if (!encp->enc_hw_tx_insert_vlan_enabled)
- no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
if (!encp->enc_tunnel_encapsulations_supported)
- no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (!sa->tso)
- no_caps |= DEV_TX_OFFLOAD_TCP_TSO;
+ no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
- if (!sa->tso_encap)
- no_caps |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ if (!sa->tso_encap ||
+ (encp->enc_tunnel_encapsulations_supported &
+ (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0)
+ no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
+
+ if (!sa->tso_encap ||
+ (encp->enc_tunnel_encapsulations_supported &
+ (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0)
+ no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
return ~no_caps;
}
}
/* We either perform both TCP and UDP offload, or no offload at all */
- if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
- ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+ if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+ ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) {
sfc_err(sa, "TCP and UDP offloads can't be set independently");
rc = EINVAL;
}
}
int
-sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+sfc_tx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
unsigned int txq_entries;
unsigned int evq_entries;
uint64_t offloads;
struct sfc_dp_tx_hw_limits hw_limits;
- sfc_log_init(sa, "TxQ = %u", sw_index);
+ ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index);
+
+ sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index);
memset(&hw_limits, 0, sizeof(hw_limits));
hw_limits.txq_max_entries = sa->txq_max_entries;
SFC_ASSERT(txq_entries >= nb_tx_desc);
SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
- offloads = tx_conf->offloads |
- sa->eth_dev->data->dev_conf.txmode.offloads;
+ offloads = tx_conf->offloads;
+ /* Add device level Tx offloads if the queue is an ethdev Tx queue */
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
+ offloads |= sa->eth_dev->data->dev_conf.txmode.offloads;
+
rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
SFC_TX_DEFAULT_FREE_THRESH;
txq_info->offloads = offloads;
- rc = sfc_dma_alloc(sa, "txq", sw_index,
+ rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_NIC_DMA_ADDR_TX_RING,
efx_txq_size(sa->nic, txq_info->entries),
socket_id, &txq->mem);
if (rc != 0)
info.vi_window_shift = encp->enc_vi_window_shift;
info.tso_tcp_header_offset_limit =
encp->enc_tx_tso_tcp_header_offset_limit;
+ info.tso_max_nb_header_descs =
+ RTE_MIN(encp->enc_tx_tso_max_header_ndescs,
+ (uint32_t)UINT16_MAX);
+ info.tso_max_header_len =
+ RTE_MIN(encp->enc_tx_tso_max_header_length,
+ (uint32_t)UINT16_MAX);
+ info.tso_max_nb_payload_descs =
+ RTE_MIN(encp->enc_tx_tso_max_payload_ndescs,
+ (uint32_t)UINT16_MAX);
+ info.tso_max_payload_len = encp->enc_tx_tso_max_payload_length;
+ info.tso_max_nb_outgoing_frames = encp->enc_tx_tso_max_nframes;
+
+ info.nic_dma_info = &sas->nic_dma_info;
rc = sa->priv.dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
&RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
fail_bad_conf:
fail_size_up_rings:
- sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
+ sfc_log_init(sa, "failed (TxQ = %d (internal %u), rc = %d)", ethdev_qid,
+ sw_index, rc);
return rc;
}
void
-sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_tx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
struct sfc_txq_info *txq_info;
struct sfc_txq *txq;
- sfc_log_init(sa, "TxQ = %u", sw_index);
+ ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index);
+
+ sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index);
SFC_ASSERT(sw_index < sfc_sa2shared(sa)->txq_count);
- sa->eth_dev->data->tx_queues[sw_index] = NULL;
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
+ sa->eth_dev->data->tx_queues[ethdev_qid] = NULL;
txq_info = &sfc_sa2shared(sa)->txq_info[sw_index];
txq->evq = NULL;
}
-static int
-sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+int
+sfc_tx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
- sfc_log_init(sa, "TxQ = %u", sw_index);
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
+
+ ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index);
+
+ sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index);
return 0;
}
static int
sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
{
+ uint64_t dev_tx_offload_cap = sfc_tx_get_dev_offload_caps(sa);
int rc = 0;
switch (txmode->mq_mode) {
- case ETH_MQ_TX_NONE:
+ case RTE_ETH_MQ_TX_NONE:
break;
default:
sfc_err(sa, "Tx multi-queue mode %u not supported",
rc = EINVAL;
}
+ if ((dev_tx_offload_cap & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 &&
+ (txmode->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) == 0) {
+ sfc_err(sa, "There is no FAST_FREE flag in the attempted Tx mode configuration");
+ sfc_err(sa, "FAST_FREE is always active as per the current Tx datapath variant");
+ rc = EINVAL;
+ }
+
/*
* These features are claimed to be i40e-specific,
* but it does make sense to double-check their absence
sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- int sw_index;
+ sfc_sw_index_t sw_index;
+ sfc_ethdev_qid_t ethdev_qid;
- SFC_ASSERT(nb_tx_queues <= sas->txq_count);
+ SFC_ASSERT(nb_tx_queues <= sas->ethdev_txq_count);
- sw_index = sas->txq_count;
- while (--sw_index >= (int)nb_tx_queues) {
- if (sas->txq_info[sw_index].state & SFC_TXQ_INITIALIZED)
+ /*
+ * Finalize only ethdev queues since other ones are finalized only
+ * on device close and they may require additional deinitialization.
+ */
+ ethdev_qid = sas->ethdev_txq_count;
+ while (--ethdev_qid >= (int)nb_tx_queues) {
+ struct sfc_txq_info *txq_info;
+
+ sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
+ if (txq_info->state & SFC_TXQ_INITIALIZED)
sfc_tx_qfini(sa, sw_index);
}
- sas->txq_count = nb_tx_queues;
+ sas->ethdev_txq_count = nb_tx_queues;
}
int
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
+ const unsigned int nb_rsvd_tx_queues = sfc_nb_txq_reserved(sas);
+ const unsigned int nb_txq_total = nb_tx_queues + nb_rsvd_tx_queues;
+ bool reconfigure;
int rc = 0;
sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
- nb_tx_queues, sas->txq_count);
+ nb_tx_queues, sas->ethdev_txq_count);
/*
* The datapath implementation assumes absence of boundary
if (rc != 0)
goto fail_check_mode;
- if (nb_tx_queues == sas->txq_count)
+ if (nb_txq_total == sas->txq_count)
goto done;
if (sas->txq_info == NULL) {
- sas->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
+ reconfigure = false;
+ sas->txq_info = rte_calloc_socket("sfc-txqs", nb_txq_total,
sizeof(sas->txq_info[0]), 0,
sa->socket_id);
if (sas->txq_info == NULL)
* since it should not be shared.
*/
rc = ENOMEM;
- sa->txq_ctrl = calloc(nb_tx_queues, sizeof(sa->txq_ctrl[0]));
+ sa->txq_ctrl = calloc(nb_txq_total, sizeof(sa->txq_ctrl[0]));
if (sa->txq_ctrl == NULL)
goto fail_txqs_ctrl_alloc;
} else {
struct sfc_txq_info *new_txq_info;
struct sfc_txq *new_txq_ctrl;
- if (nb_tx_queues < sas->txq_count)
+ reconfigure = true;
+
+ if (nb_tx_queues < sas->ethdev_txq_count)
sfc_tx_fini_queues(sa, nb_tx_queues);
new_txq_info =
rte_realloc(sas->txq_info,
- nb_tx_queues * sizeof(sas->txq_info[0]), 0);
- if (new_txq_info == NULL && nb_tx_queues > 0)
+ nb_txq_total * sizeof(sas->txq_info[0]), 0);
+ if (new_txq_info == NULL && nb_txq_total > 0)
goto fail_txqs_realloc;
new_txq_ctrl = realloc(sa->txq_ctrl,
- nb_tx_queues * sizeof(sa->txq_ctrl[0]));
- if (new_txq_ctrl == NULL && nb_tx_queues > 0)
+ nb_txq_total * sizeof(sa->txq_ctrl[0]));
+ if (new_txq_ctrl == NULL && nb_txq_total > 0)
goto fail_txqs_ctrl_realloc;
sas->txq_info = new_txq_info;
sa->txq_ctrl = new_txq_ctrl;
- if (nb_tx_queues > sas->txq_count) {
+ if (nb_txq_total > sas->txq_count) {
memset(&sas->txq_info[sas->txq_count], 0,
- (nb_tx_queues - sas->txq_count) *
+ (nb_txq_total - sas->txq_count) *
sizeof(sas->txq_info[0]));
memset(&sa->txq_ctrl[sas->txq_count], 0,
- (nb_tx_queues - sas->txq_count) *
+ (nb_txq_total - sas->txq_count) *
sizeof(sa->txq_ctrl[0]));
}
}
- while (sas->txq_count < nb_tx_queues) {
- rc = sfc_tx_qinit_info(sa, sas->txq_count);
+ while (sas->ethdev_txq_count < nb_tx_queues) {
+ sfc_sw_index_t sw_index;
+
+ sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas,
+ sas->ethdev_txq_count);
+ rc = sfc_tx_qinit_info(sa, sw_index);
if (rc != 0)
goto fail_tx_qinit_info;
- sas->txq_count++;
+ sas->ethdev_txq_count++;
+ }
+
+ sas->txq_count = sas->ethdev_txq_count + nb_rsvd_tx_queues;
+
+ if (!reconfigure) {
+ rc = sfc_repr_proxy_txq_init(sa);
+ if (rc != 0)
+ goto fail_repr_proxy_txq_init;
}
done:
return 0;
+fail_repr_proxy_txq_init:
fail_tx_qinit_info:
fail_txqs_ctrl_realloc:
fail_txqs_realloc:
sfc_tx_close(struct sfc_adapter *sa)
{
sfc_tx_fini_queues(sa, 0);
+ sfc_repr_proxy_txq_fini(sa);
free(sa->txq_ctrl);
sa->txq_ctrl = NULL;
}
int
-sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
sfc_tx_get_queue_offload_caps(sa);
- struct rte_eth_dev_data *dev_data;
struct sfc_txq_info *txq_info;
struct sfc_txq *txq;
struct sfc_evq *evq;
unsigned int desc_index;
int rc = 0;
- sfc_log_init(sa, "TxQ = %u", sw_index);
+ ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index);
+
+ sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index);
SFC_ASSERT(sw_index < sas->txq_count);
txq_info = &sas->txq_info[sw_index];
txq = &sa->txq_ctrl[sw_index];
evq = txq->evq;
- rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
+ rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_txq_sw_index(sa, sw_index));
if (rc != 0)
goto fail_ev_qstart;
- if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_IPV4;
- if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_IPV4;
- if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
- (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+ (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
flags |= EFX_TXQ_CKSUM_TCPUDP;
- if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
- if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+ if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
flags |= EFX_TXQ_FATSOV2;
rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
if (rc != 0)
goto fail_dp_qstart;
- /*
- * It seems to be used by DPDK for debug purposes only ('rte_ether')
- */
- dev_data = sa->eth_dev->data;
- dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID) {
+ struct rte_eth_dev_data *dev_data;
+
+ /*
+ * It sems to be used by DPDK for debug purposes only
+ * ('rte_ether').
+ */
+ dev_data = sa->eth_dev->data;
+ dev_data->tx_queue_state[ethdev_qid] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
return 0;
}
void
-sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_tx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- struct rte_eth_dev_data *dev_data;
+ sfc_ethdev_qid_t ethdev_qid;
struct sfc_txq_info *txq_info;
struct sfc_txq *txq;
unsigned int retry_count;
unsigned int wait_count;
int rc;
- sfc_log_init(sa, "TxQ = %u", sw_index);
+ ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index);
+
+ sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index);
SFC_ASSERT(sw_index < sas->txq_count);
txq_info = &sas->txq_info[sw_index];
wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
if (txq_info->state & SFC_TXQ_FLUSHING)
- sfc_err(sa, "TxQ %u flush timed out", sw_index);
+ sfc_err(sa, "TxQ %d (internal %u) flush timed out",
+ ethdev_qid, sw_index);
if (txq_info->state & SFC_TXQ_FLUSHED)
- sfc_notice(sa, "TxQ %u flushed", sw_index);
+ sfc_notice(sa, "TxQ %d (internal %u) flushed",
+ ethdev_qid, sw_index);
}
sa->priv.dp_tx->qreap(txq_info->dp);
sfc_ev_qstop(txq->evq);
- /*
- * It seems to be used by DPDK for debug purposes only ('rte_ether')
- */
- dev_data = sa->eth_dev->data;
- dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID) {
+ struct rte_eth_dev_data *dev_data;
+
+ /*
+ * It seems to be used by DPDK for debug purposes only
+ * ('rte_ether')
+ */
+ dev_data = sa->eth_dev->data;
+ dev_data->tx_queue_state[ethdev_qid] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ }
}
int
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
int rc = 0;
- sfc_log_init(sa, "txq_count = %u", sas->txq_count);
+ sfc_log_init(sa, "txq_count = %u (internal %u)",
+ sas->ethdev_txq_count, sas->txq_count);
if (sa->tso) {
- if (!encp->enc_fw_assisted_tso_v2_enabled) {
+ if (!encp->enc_fw_assisted_tso_v2_enabled &&
+ !encp->enc_tso_v3_enabled) {
sfc_warn(sa, "TSO support was unable to be restored");
sa->tso = B_FALSE;
sa->tso_encap = B_FALSE;
}
}
- if (sa->tso_encap && !encp->enc_fw_assisted_tso_v2_encap_enabled) {
+ if (sa->tso_encap && !encp->enc_fw_assisted_tso_v2_encap_enabled &&
+ !encp->enc_tso_v3_enabled) {
sfc_warn(sa, "Encapsulated TSO support was unable to be restored");
sa->tso_encap = B_FALSE;
}
sfc_tx_stop(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
- sfc_log_init(sa, "txq_count = %u", sas->txq_count);
+ sfc_log_init(sa, "txq_count = %u (internal %u)",
+ sas->ethdev_txq_count, sas->txq_count);
sw_index = sas->txq_count;
while (sw_index-- > 0) {
sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
efx_desc_t **pend)
{
- uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
+ uint16_t this_tag = ((m->ol_flags & RTE_MBUF_F_TX_VLAN) ?
m->vlan_tci : 0);
if (this_tag == txq->hw_vlan_tci)
* insertion offload is requested regardless the offload
* requested/supported.
*/
- ret = sfc_dp_tx_prepare_pkt(tx_pkts[i],
+ ret = sfc_dp_tx_prepare_pkt(tx_pkts[i], 0, SFC_TSOH_STD_LEN,
encp->enc_tx_tso_tcp_header_offset_limit,
txq->max_fill_level, EFX_TX_FATSOV2_OPT_NDESCS,
1);
/*
* Here VLAN TCI is expected to be zero in case if no
- * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
+ * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised;
* if the calling app ignores the absence of
- * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
+ * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
* TX_ERROR will occur
*/
pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
- if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
+ if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/*
* We expect correct 'pkt->l[2, 3, 4]_len' values
* to be set correctly by the caller
txq->completed, &txq->added);
SFC_ASSERT(rc == 0);
- if (likely(pushed != txq->added))
+ if (likely(pushed != txq->added)) {
efx_tx_qpush(txq->common, txq->added, pushed);
+ txq->dp.dpq.dbells++;
+ }
}
#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
struct sfc_txq *ctrl_txq;
int rc;
+ rc = ENOTSUP;
+ if (info->nic_dma_info->nb_regions > 0)
+ goto fail_nic_dma;
+
rc = ENOMEM;
txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
RTE_CACHE_LINE_SIZE, socket_id);
rte_free(txq);
fail_txq_alloc:
+fail_nic_dma:
return rc;
}
.dp = {
.name = SFC_KVARG_DATAPATH_EFX,
.type = SFC_DP_TX,
- .hw_fw_caps = 0,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_TX_EFX,
},
.features = 0,
- .dev_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_MULTI_SEGS,
- .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO,
+ .dev_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+ .queue_offload_capa = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO,
.qsize_up_rings = sfc_efx_tx_qsize_up_rings,
.qcreate = sfc_efx_tx_qcreate,
.qdestroy = sfc_efx_tx_qdestroy,