#include "sfc_debug.h"
#include "sfc_log.h"
#include "sfc_kvargs.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
static void
sfc_log_init(sa, "entry");
dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
+
+ dev_info->max_rx_queues = sa->rxq_max;
+
+ /* By default packets are dropped if no descriptors are available */
+ dev_info->default_rxconf.rx_drop_en = 1;
+
+ dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
+ dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
+ /* The RXQ hardware requires that the descriptor count is a power
+ * of 2, but rx_desc_lim cannot properly describe that constraint.
+ */
+ dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
}
static int
return -rc;
}
+static int
+sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_eth_link *dev_link = &dev->data->dev_link;
+ struct rte_eth_link old_link;
+ struct rte_eth_link current_link;
+
+ sfc_log_init(sa, "entry");
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return 0;
+
+retry:
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+ *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
+
+ if (wait_to_complete) {
+ efx_link_mode_t link_mode;
+
+ efx_port_poll(sa->nic, &link_mode);
+ sfc_port_link_mode_to_info(link_mode, ¤t_link);
+
+ if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
+ *(uint64_t *)&old_link,
+ *(uint64_t *)¤t_link))
+ goto retry;
+ } else {
+ sfc_ev_mgmt_qpoll(sa);
+ *(int64_t *)¤t_link =
+ rte_atomic64_read((rte_atomic64_t *)dev_link);
+ }
+
+ if (old_link.link_status != current_link.link_status)
+ sfc_info(sa, "Link status is %s",
+ current_link.link_status ? "UP" : "DOWN");
+
+ return old_link.link_status == current_link.link_status ? 0 : -1;
+}
+
static void
sfc_dev_stop(struct rte_eth_dev *dev)
{
sfc_log_init(sa, "done");
}
+static int
+sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
+ rx_queue_id, nb_rx_desc, socket_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
+ rx_conf, mb_pool);
+ if (rc != 0)
+ goto fail_rx_qinit;
+
+ dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_rx_qinit:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_rx_queue_release(void *queue)
+{
+ struct sfc_rxq *rxq = queue;
+ struct sfc_adapter *sa;
+ unsigned int sw_index;
+
+ if (rxq == NULL)
+ return;
+
+ sa = rxq->evq->sa;
+ sfc_adapter_lock(sa);
+
+ sw_index = sfc_rxq_sw_index(rxq);
+
+ sfc_log_init(sa, "RxQ=%u", sw_index);
+
+ sa->eth_dev->data->rx_queues[sw_index] = NULL;
+
+ sfc_rx_qfini(sa, sw_index);
+
+ sfc_adapter_unlock(sa);
+}
+
static const struct eth_dev_ops sfc_eth_dev_ops = {
.dev_configure = sfc_dev_configure,
.dev_start = sfc_dev_start,
.dev_stop = sfc_dev_stop,
.dev_close = sfc_dev_close,
+ .link_update = sfc_dev_link_update,
.dev_infos_get = sfc_dev_infos_get,
+ .rx_queue_setup = sfc_rx_queue_setup,
+ .rx_queue_release = sfc_rx_queue_release,
};
static int