return true;
}
+bool
+sfc_repr_available(const struct sfc_adapter_shared *sas)
+{
+ return sas->nb_repr_rxq > 0 && sas->nb_repr_txq > 0;
+}
+
int
sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
size_t len, int socket_id, efsys_mem_t *esmp)
sas->counters_rxq_allocated = false;
}
+ if (sfc_repr_supported(sa) &&
+ evq_allocated >= SFC_REPR_PROXY_NB_RXQ_MIN +
+ SFC_REPR_PROXY_NB_TXQ_MIN &&
+ rxq_allocated >= SFC_REPR_PROXY_NB_RXQ_MIN &&
+ txq_allocated >= SFC_REPR_PROXY_NB_TXQ_MIN) {
+ unsigned int extra;
+
+ txq_allocated -= SFC_REPR_PROXY_NB_TXQ_MIN;
+ rxq_allocated -= SFC_REPR_PROXY_NB_RXQ_MIN;
+ evq_allocated -= SFC_REPR_PROXY_NB_RXQ_MIN +
+ SFC_REPR_PROXY_NB_TXQ_MIN;
+
+ sas->nb_repr_rxq = SFC_REPR_PROXY_NB_RXQ_MIN;
+ sas->nb_repr_txq = SFC_REPR_PROXY_NB_TXQ_MIN;
+
+ /* Allocate extra representor RxQs up to the maximum */
+ extra = MIN(evq_allocated, rxq_allocated);
+ extra = MIN(extra,
+ SFC_REPR_PROXY_NB_RXQ_MAX - sas->nb_repr_rxq);
+ evq_allocated -= extra;
+ rxq_allocated -= extra;
+ sas->nb_repr_rxq += extra;
+
+ /* Allocate extra representor TxQs up to the maximum */
+ extra = MIN(evq_allocated, txq_allocated);
+ extra = MIN(extra,
+ SFC_REPR_PROXY_NB_TXQ_MAX - sas->nb_repr_txq);
+ evq_allocated -= extra;
+ txq_allocated -= extra;
+ sas->nb_repr_txq += extra;
+ } else {
+ sas->nb_repr_rxq = 0;
+ sas->nb_repr_txq = 0;
+ }
+
/* Add remaining allocated queues */
sa->rxq_max += MIN(rxq_allocated, evq_allocated / 2);
sa->txq_max += MIN(txq_allocated, evq_allocated - sa->rxq_max);
static int
sfc_set_drv_limits(struct sfc_adapter *sa)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
const struct rte_eth_dev_data *data = sa->eth_dev->data;
- uint32_t rxq_reserved = sfc_nb_reserved_rxq(sfc_sa2shared(sa));
+ uint32_t rxq_reserved = sfc_nb_reserved_rxq(sas);
+ uint32_t txq_reserved = sfc_nb_txq_reserved(sas);
efx_drv_limits_t lim;
memset(&lim, 0, sizeof(lim));
* sfc_estimate_resource_limits().
*/
lim.edl_min_evq_count = lim.edl_max_evq_count =
- 1 + data->nb_rx_queues + data->nb_tx_queues + rxq_reserved;
+ 1 + data->nb_rx_queues + data->nb_tx_queues +
+ rxq_reserved + txq_reserved;
lim.edl_min_rxq_count = lim.edl_max_rxq_count =
data->nb_rx_queues + rxq_reserved;
- lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
+ lim.edl_min_txq_count = lim.edl_max_txq_count =
+ data->nb_tx_queues + txq_reserved;
return efx_nic_set_drv_limits(sa->nic, &lim);
}
char *dp_tx_name;
bool counters_rxq_allocated;
+ unsigned int nb_repr_rxq;
+ unsigned int nb_repr_txq;
};
/* Adapter process private data */
}
bool sfc_repr_supported(const struct sfc_adapter *sa);
+bool sfc_repr_available(const struct sfc_adapter_shared *sas);
+
+static inline unsigned int
+sfc_repr_nb_rxq(const struct sfc_adapter_shared *sas)
+{
+ return sas->nb_repr_rxq;
+}
+
+static inline unsigned int
+sfc_repr_nb_txq(const struct sfc_adapter_shared *sas)
+{
+ return sas->nb_repr_txq;
+}
/** Get the number of milliseconds since boot from the default timer */
static inline uint64_t
static inline unsigned int
sfc_nb_reserved_rxq(const struct sfc_adapter_shared *sas)
{
- return sfc_nb_counter_rxq(sas);
+ return sfc_nb_counter_rxq(sas) + sfc_repr_nb_rxq(sas);
+}
+
+/* Return the number of Tx queues reserved for driver's internal use */
+static inline unsigned int
+sfc_nb_txq_reserved(const struct sfc_adapter_shared *sas)
+{
+ return sfc_repr_nb_txq(sas);
}
static inline unsigned int
sfc_nb_reserved_evq(const struct sfc_adapter_shared *sas)
{
- /* An EvQ is required for each reserved RxQ */
- return 1 + sfc_nb_reserved_rxq(sas);
+ /* An EvQ is required for each reserved Rx/Tx queue */
+ return 1 + sfc_nb_reserved_rxq(sas) + sfc_nb_txq_reserved(sas);
}
/*
* Own event queue is allocated for management, each Rx and each Tx queue.
* Zero event queue is used for management events.
* When counters are supported, one Rx event queue is reserved.
+ * When representors are supported, Rx and Tx event queues are reserved.
* Rx event queues follow reserved event queues.
* Tx event queues follow Rx event queues.
*/
}
static inline sfc_ethdev_qid_t
-sfc_ethdev_tx_qid_by_txq_sw_index(__rte_unused struct sfc_adapter_shared *sas,
+sfc_ethdev_tx_qid_by_txq_sw_index(struct sfc_adapter_shared *sas,
sfc_sw_index_t txq_sw_index)
{
- /* Only ethdev queues are present for now */
- return txq_sw_index;
+ if (txq_sw_index < sfc_nb_txq_reserved(sas))
+ return SFC_ETHDEV_QID_INVALID;
+
+ return txq_sw_index - sfc_nb_txq_reserved(sas);
}
static inline sfc_sw_index_t
-sfc_txq_sw_index_by_ethdev_tx_qid(__rte_unused struct sfc_adapter_shared *sas,
+sfc_txq_sw_index_by_ethdev_tx_qid(struct sfc_adapter_shared *sas,
sfc_ethdev_qid_t ethdev_qid)
{
- /* Only ethdev queues are present for now */
- return ethdev_qid;
+ return sfc_nb_txq_reserved(sas) + ethdev_qid;
}
static inline sfc_sw_index_t
sfc_evq_sw_index_by_txq_sw_index(struct sfc_adapter *sa,
sfc_sw_index_t txq_sw_index)
{
- return sfc_nb_reserved_evq(sfc_sa2shared(sa)) +
- sa->eth_dev->data->nb_rx_queues + txq_sw_index;
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
+
+ ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, txq_sw_index);
+ if (ethdev_qid == SFC_ETHDEV_QID_INVALID) {
+ return sfc_nb_reserved_evq(sas) - sfc_nb_txq_reserved(sas) +
+ txq_sw_index;
+ }
+
+ return sfc_nb_reserved_evq(sas) + sa->eth_dev->data->nb_rx_queues +
+ ethdev_qid;
}
int sfc_ev_attach(struct sfc_adapter *sa);
int
sfc_repr_proxy_attach(struct sfc_adapter *sa)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_repr_proxy *rp = &sa->repr_proxy;
struct rte_service_spec service;
uint32_t cid;
sfc_log_init(sa, "entry");
- if (!sfc_repr_supported(sa)) {
+ if (!sfc_repr_available(sas)) {
sfc_log_init(sa, "representors not supported - skip");
return 0;
}
void
sfc_repr_proxy_detach(struct sfc_adapter *sa)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_repr_proxy *rp = &sa->repr_proxy;
sfc_log_init(sa, "entry");
- if (!sfc_repr_supported(sa)) {
+ if (!sfc_repr_available(sas)) {
sfc_log_init(sa, "representors not supported - skip");
return;
}
int
sfc_repr_proxy_start(struct sfc_adapter *sa)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_repr_proxy *rp = &sa->repr_proxy;
int rc;
* The condition to start the proxy is insufficient. It will be
* complemented with representor port start/stop support.
*/
- if (!sfc_repr_supported(sa)) {
+ if (!sfc_repr_available(sas)) {
sfc_log_init(sa, "representors not supported - skip");
return 0;
}
void
sfc_repr_proxy_stop(struct sfc_adapter *sa)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_repr_proxy *rp = &sa->repr_proxy;
int rc;
sfc_log_init(sa, "entry");
- if (!sfc_repr_supported(sa)) {
+ if (!sfc_repr_available(sas)) {
sfc_log_init(sa, "representors not supported - skip");
return;
}
extern "C" {
#endif
+/* Number of supported RxQs with different mbuf memory pools */
+#define SFC_REPR_PROXY_NB_RXQ_MIN (1)
+#define SFC_REPR_PROXY_NB_RXQ_MAX (1)
+
+/* One TxQ is required and sufficient for port representors support */
+#define SFC_REPR_PROXY_NB_TXQ_MIN (1)
+#define SFC_REPR_PROXY_NB_TXQ_MAX (1)
+
struct sfc_repr_proxy {
uint32_t service_core_id;
uint32_t service_id;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
+ const unsigned int nb_rsvd_tx_queues = sfc_nb_txq_reserved(sas);
+ const unsigned int nb_txq_total = nb_tx_queues + nb_rsvd_tx_queues;
int rc = 0;
sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
if (rc != 0)
goto fail_check_mode;
- if (nb_tx_queues == sas->txq_count)
+ if (nb_txq_total == sas->txq_count)
goto done;
if (sas->txq_info == NULL) {
- sas->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
+ sas->txq_info = rte_calloc_socket("sfc-txqs", nb_txq_total,
sizeof(sas->txq_info[0]), 0,
sa->socket_id);
if (sas->txq_info == NULL)
* since it should not be shared.
*/
rc = ENOMEM;
- sa->txq_ctrl = calloc(nb_tx_queues, sizeof(sa->txq_ctrl[0]));
+ sa->txq_ctrl = calloc(nb_txq_total, sizeof(sa->txq_ctrl[0]));
if (sa->txq_ctrl == NULL)
goto fail_txqs_ctrl_alloc;
} else {
new_txq_info =
rte_realloc(sas->txq_info,
- nb_tx_queues * sizeof(sas->txq_info[0]), 0);
- if (new_txq_info == NULL && nb_tx_queues > 0)
+ nb_txq_total * sizeof(sas->txq_info[0]), 0);
+ if (new_txq_info == NULL && nb_txq_total > 0)
goto fail_txqs_realloc;
new_txq_ctrl = realloc(sa->txq_ctrl,
- nb_tx_queues * sizeof(sa->txq_ctrl[0]));
- if (new_txq_ctrl == NULL && nb_tx_queues > 0)
+ nb_txq_total * sizeof(sa->txq_ctrl[0]));
+ if (new_txq_ctrl == NULL && nb_txq_total > 0)
goto fail_txqs_ctrl_realloc;
sas->txq_info = new_txq_info;
sa->txq_ctrl = new_txq_ctrl;
- if (nb_tx_queues > sas->ethdev_txq_count) {
- memset(&sas->txq_info[sas->ethdev_txq_count], 0,
- (nb_tx_queues - sas->ethdev_txq_count) *
+ if (nb_txq_total > sas->txq_count) {
+ memset(&sas->txq_info[sas->txq_count], 0,
+ (nb_txq_total - sas->txq_count) *
sizeof(sas->txq_info[0]));
- memset(&sa->txq_ctrl[sas->ethdev_txq_count], 0,
- (nb_tx_queues - sas->ethdev_txq_count) *
+ memset(&sa->txq_ctrl[sas->txq_count], 0,
+ (nb_txq_total - sas->txq_count) *
sizeof(sa->txq_ctrl[0]));
}
}
sas->ethdev_txq_count++;
}
- sas->txq_count = sas->ethdev_txq_count;
+ /* TODO: initialize reserved queues when supported. */
+ sas->txq_count = sas->ethdev_txq_count + nb_rsvd_tx_queues;
done:
return 0;