X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_tx.c;h=bee0beb94722927f8ea325123bdd403a74efc294;hb=af397b3c93f82b0803c0890874d7ee3b5127522d;hp=ce2a9a6a4f76c062eb50064eeb3ff4b0157846ea;hpb=db980d266f7eb01ac463b8c95a5a9e1fe5f87ff7;p=dpdk.git diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index ce2a9a6a4f..bee0beb947 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -54,23 +54,23 @@ sfc_tx_get_offload_mask(struct sfc_adapter *sa) uint64_t no_caps = 0; if (!encp->enc_hw_tx_insert_vlan_enabled) - no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT; + no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; if (!encp->enc_tunnel_encapsulations_supported) - no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM; if (!sa->tso) - no_caps |= DEV_TX_OFFLOAD_TCP_TSO; + no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO; if (!sa->tso_encap || (encp->enc_tunnel_encapsulations_supported & (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0) - no_caps |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO; + no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO; if (!sa->tso_encap || (encp->enc_tunnel_encapsulations_supported & (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0) - no_caps |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO; return ~no_caps; } @@ -114,8 +114,8 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level, } /* We either perform both TCP and UDP offload, or no offload at all */ - if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) != - ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) { + if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) != + ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) { sfc_err(sa, "TCP and UDP offloads can't be set independently"); rc = EINVAL; } @@ -290,7 +290,7 @@ sfc_tx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index) txq->evq = NULL; } -static int +int sfc_tx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); @@ -309,7 +309,7 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode) int rc = 0; switch (txmode->mq_mode) { - case ETH_MQ_TX_NONE: + case RTE_ETH_MQ_TX_NONE: break; default: sfc_err(sa, "Tx multi-queue mode %u not supported", @@ -376,6 +376,9 @@ sfc_tx_configure(struct sfc_adapter *sa) const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues; + const unsigned int nb_rsvd_tx_queues = sfc_nb_txq_reserved(sas); + const unsigned int nb_txq_total = nb_tx_queues + nb_rsvd_tx_queues; + bool reconfigure; int rc = 0; sfc_log_init(sa, "nb_tx_queues=%u (old %u)", @@ -395,11 +398,12 @@ sfc_tx_configure(struct sfc_adapter *sa) if (rc != 0) goto fail_check_mode; - if (nb_tx_queues == sas->txq_count) + if (nb_txq_total == sas->txq_count) goto done; if (sas->txq_info == NULL) { - sas->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues, + reconfigure = false; + sas->txq_info = rte_calloc_socket("sfc-txqs", nb_txq_total, sizeof(sas->txq_info[0]), 0, sa->socket_id); if (sas->txq_info == NULL) @@ -410,35 +414,37 @@ sfc_tx_configure(struct sfc_adapter *sa) * since it should not be shared. */ rc = ENOMEM; - sa->txq_ctrl = calloc(nb_tx_queues, sizeof(sa->txq_ctrl[0])); + sa->txq_ctrl = calloc(nb_txq_total, sizeof(sa->txq_ctrl[0])); if (sa->txq_ctrl == NULL) goto fail_txqs_ctrl_alloc; } else { struct sfc_txq_info *new_txq_info; struct sfc_txq *new_txq_ctrl; + reconfigure = true; + if (nb_tx_queues < sas->ethdev_txq_count) sfc_tx_fini_queues(sa, nb_tx_queues); new_txq_info = rte_realloc(sas->txq_info, - nb_tx_queues * sizeof(sas->txq_info[0]), 0); - if (new_txq_info == NULL && nb_tx_queues > 0) + nb_txq_total * sizeof(sas->txq_info[0]), 0); + if (new_txq_info == NULL && nb_txq_total > 0) goto fail_txqs_realloc; new_txq_ctrl = realloc(sa->txq_ctrl, - nb_tx_queues * sizeof(sa->txq_ctrl[0])); - if (new_txq_ctrl == NULL && nb_tx_queues > 0) + nb_txq_total * sizeof(sa->txq_ctrl[0])); + if (new_txq_ctrl == NULL && nb_txq_total > 0) goto fail_txqs_ctrl_realloc; sas->txq_info = new_txq_info; sa->txq_ctrl = new_txq_ctrl; - if (nb_tx_queues > sas->ethdev_txq_count) { - memset(&sas->txq_info[sas->ethdev_txq_count], 0, - (nb_tx_queues - sas->ethdev_txq_count) * + if (nb_txq_total > sas->txq_count) { + memset(&sas->txq_info[sas->txq_count], 0, + (nb_txq_total - sas->txq_count) * sizeof(sas->txq_info[0])); - memset(&sa->txq_ctrl[sas->ethdev_txq_count], 0, - (nb_tx_queues - sas->ethdev_txq_count) * + memset(&sa->txq_ctrl[sas->txq_count], 0, + (nb_txq_total - sas->txq_count) * sizeof(sa->txq_ctrl[0])); } } @@ -455,11 +461,18 @@ sfc_tx_configure(struct sfc_adapter *sa) sas->ethdev_txq_count++; } - sas->txq_count = sas->ethdev_txq_count; + sas->txq_count = sas->ethdev_txq_count + nb_rsvd_tx_queues; + + if (!reconfigure) { + rc = sfc_repr_proxy_txq_init(sa); + if (rc != 0) + goto fail_repr_proxy_txq_init; + } done: return 0; +fail_repr_proxy_txq_init: fail_tx_qinit_info: fail_txqs_ctrl_realloc: fail_txqs_realloc: @@ -477,6 +490,7 @@ void sfc_tx_close(struct sfc_adapter *sa) { sfc_tx_fini_queues(sa, 0); + sfc_repr_proxy_txq_fini(sa); free(sa->txq_ctrl); sa->txq_ctrl = NULL; @@ -515,23 +529,23 @@ sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index) if (rc != 0) goto fail_ev_qstart; - if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) flags |= EFX_TXQ_CKSUM_IPV4; - if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) flags |= EFX_TXQ_CKSUM_INNER_IPV4; - if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || - (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { + if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) || + (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { flags |= EFX_TXQ_CKSUM_TCPUDP; - if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) flags |= EFX_TXQ_CKSUM_INNER_TCPUDP; } - if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) + if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) flags |= EFX_TXQ_FATSOV2; rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem, @@ -766,7 +780,7 @@ static unsigned int sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m, efx_desc_t **pend) { - uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ? + uint16_t this_tag = ((m->ol_flags & RTE_MBUF_F_TX_VLAN) ? m->vlan_tci : 0); if (this_tag == txq->hw_vlan_tci) @@ -862,14 +876,14 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* * Here VLAN TCI is expected to be zero in case if no - * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised; + * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised; * if the calling app ignores the absence of - * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then + * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then * TX_ERROR will occur */ pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend); - if (m_seg->ol_flags & PKT_TX_TCP_SEG) { + if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { /* * We expect correct 'pkt->l[2, 3, 4]_len' values * to be set correctly by the caller @@ -980,8 +994,10 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txq->completed, &txq->added); SFC_ASSERT(rc == 0); - if (likely(pushed != txq->added)) + if (likely(pushed != txq->added)) { efx_tx_qpush(txq->common, txq->added, pushed); + txq->dp.dpq.tx_dbells++; + } } #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE @@ -1226,13 +1242,13 @@ struct sfc_dp_tx sfc_efx_tx = { .hw_fw_caps = SFC_DP_HW_FW_CAP_TX_EFX, }, .features = 0, - .dev_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_MULTI_SEGS, - .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO, + .dev_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS, + .queue_offload_capa = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_TSO, .qsize_up_rings = sfc_efx_tx_qsize_up_rings, .qcreate = sfc_efx_tx_qcreate, .qdestroy = sfc_efx_tx_qdestroy,