#include <rte_errno.h>
#include <rte_version.h>
#include <rte_eal_memconfig.h>
+#include <rte_net.h>
#include "ena_ethdev.h"
#include "ena_logs.h"
#define PCI_DEVICE_ID_ENA_VF 0xEC20
#define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21
+#define ENA_TX_OFFLOAD_MASK (\
+ PKT_TX_L4_MASK | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_SEG)
+
+#define ENA_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
+
static struct rte_pci_id pci_id_ena_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) },
static int ena_dev_configure(struct rte_eth_dev *dev);
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
host_info->os_type = ENA_ADMIN_OS_DPDK;
host_info->kernel_ver = RTE_VERSION;
- strncpy((char *)host_info->kernel_ver_str, rte_version(),
- strlen(rte_version()));
+ snprintf((char *)host_info->kernel_ver_str,
+ sizeof(host_info->kernel_ver_str),
+ "%s", rte_version());
host_info->os_dist = RTE_VERSION;
- strncpy((char *)host_info->os_dist_str, rte_version(),
- strlen(rte_version()));
+ snprintf((char *)host_info->os_dist_str,
+ sizeof(host_info->os_dist_str),
+ "%s", rte_version());
host_info->driver_version =
(DRV_MODULE_VER_MAJOR) |
(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
if (m)
__rte_mbuf_raw_free(m);
- ring->next_to_clean =
- ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size);
+ ring->next_to_clean++;
}
}
if (tx_buf->mbuf)
rte_pktmbuf_free(tx_buf->mbuf);
- ring->next_to_clean =
- ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size);
+ ring->next_to_clean++;
}
}
if (ring->type == ENA_RING_TYPE_TX)
return 0;
- rc = ena_populate_rx_queue(ring, ring->ring_size - 1);
- if ((unsigned int)rc != ring->ring_size - 1) {
+ rc = ena_populate_rx_queue(ring, ring->ring_size);
+ if ((unsigned int)rc != ring->ring_size) {
PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n");
return (-1);
}
return -1;
}
+ if (!rte_is_power_of_2(nb_desc)) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported size of RX queue: %d is not a power of 2.",
+ nb_desc);
+ return -EINVAL;
+ }
+
if (nb_desc > adapter->tx_ring_size) {
RTE_LOG(ERR, PMD,
"Unsupported size of TX queue (max size: %d)\n",
return -1;
}
+ if (!rte_is_power_of_2(nb_desc)) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported size of TX queue: %d is not a power of 2.",
+ nb_desc);
+ return -EINVAL;
+ }
+
if (nb_desc > adapter->rx_ring_size) {
RTE_LOG(ERR, PMD,
"Unsupported size of RX queue (max size: %d)\n",
{
unsigned int i;
int rc;
- unsigned int ring_size = rxq->ring_size;
- unsigned int ring_mask = ring_size - 1;
- int next_to_use = rxq->next_to_use & ring_mask;
+ uint16_t ring_size = rxq->ring_size;
+ uint16_t ring_mask = ring_size - 1;
+ uint16_t next_to_use = rxq->next_to_use;
+ uint16_t in_use;
struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
if (unlikely(!count))
return 0;
- ena_assert_msg((((ENA_CIRC_COUNT(rxq->next_to_use, rxq->next_to_clean,
- rxq->ring_size)) +
- count) < rxq->ring_size), "bad ring state");
+ in_use = rxq->next_to_use - rxq->next_to_clean;
+ ena_assert_msg(((in_use + count) <= ring_size), "bad ring state");
- count = RTE_MIN(count, ring_size - next_to_use);
+ count = RTE_MIN(count,
+ (uint16_t)(ring_size - (next_to_use & ring_mask)));
/* get resources for incoming packets */
rc = rte_mempool_get_bulk(rxq->mb_pool,
- (void **)(&mbufs[next_to_use]), count);
+ (void **)(&mbufs[next_to_use & ring_mask]),
+ count);
if (unlikely(rc < 0)) {
rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
PMD_RX_LOG(DEBUG, "there are no enough free buffers");
}
for (i = 0; i < count; i++) {
- struct rte_mbuf *mbuf = mbufs[next_to_use];
+ uint16_t next_to_use_masked = next_to_use & ring_mask;
+ struct rte_mbuf *mbuf = mbufs[next_to_use_masked];
struct ena_com_buf ebuf;
rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
/* pass resource to device */
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
- &ebuf, next_to_use);
+ &ebuf, next_to_use_masked);
if (unlikely(rc)) {
RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
break;
}
- next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, ring_size);
+ next_to_use++;
}
/* When we submitted free recources to device... */
eth_dev->dev_ops = &ena_dev_ops;
eth_dev->rx_pkt_burst = ð_ena_recv_pkts;
eth_dev->tx_pkt_burst = ð_ena_xmit_pkts;
+ eth_dev->tx_pkt_prepare = ð_ena_prep_pkts;
adapter->rte_eth_dev_data = eth_dev->data;
adapter->rte_dev = eth_dev;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
adapter->pdev = pci_dev;
PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n",
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_2_5G |
unsigned int ring_size = rx_ring->ring_size;
unsigned int ring_mask = ring_size - 1;
uint16_t next_to_clean = rx_ring->next_to_clean;
- int desc_in_use = 0;
+ uint16_t desc_in_use = 0;
unsigned int recv_idx = 0;
struct rte_mbuf *mbuf = NULL;
struct rte_mbuf *mbuf_head = NULL;
return 0;
}
- desc_in_use = ENA_CIRC_COUNT(rx_ring->next_to_use,
- next_to_clean, ring_size);
+ desc_in_use = rx_ring->next_to_use - next_to_clean;
if (unlikely(nb_pkts > desc_in_use))
nb_pkts = desc_in_use;
mbuf_prev = mbuf;
segments++;
- next_to_clean =
- ENA_RX_RING_IDX_NEXT(next_to_clean, ring_size);
+ next_to_clean++;
}
/* fill mbuf attributes if any */
}
/* Burst refill to save doorbells, memory barriers, const interval */
- if (ring_size - desc_in_use - 1 > ENA_RING_DESCS_RATIO(ring_size))
- ena_populate_rx_queue(rx_ring, ring_size - desc_in_use - 1);
+ if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size))
+ ena_populate_rx_queue(rx_ring, ring_size - desc_in_use);
- rx_ring->next_to_clean = next_to_clean & ring_mask;
+ rx_ring->next_to_clean = next_to_clean;
return recv_idx;
}
+static uint16_t
+eth_ena_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ struct rte_mbuf *m;
+ uint64_t ol_flags;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ /* ENA doesn't need different phdr cskum for TSO */
+ ret = rte_net_intel_cksum_flags_prepare(m,
+ ol_flags & ~PKT_TX_TCP_SEG);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
- unsigned int next_to_use = tx_ring->next_to_use;
+ uint16_t next_to_use = tx_ring->next_to_use;
+ uint16_t next_to_clean = tx_ring->next_to_clean;
struct rte_mbuf *mbuf;
unsigned int ring_size = tx_ring->ring_size;
unsigned int ring_mask = ring_size - 1;
struct ena_tx_buffer *tx_info;
struct ena_com_buf *ebuf;
uint16_t rc, req_id, total_tx_descs = 0;
- uint16_t sent_idx = 0;
+ uint16_t sent_idx = 0, empty_tx_reqs;
int nb_hw_desc;
/* Check adapter state */
return 0;
}
+ empty_tx_reqs = ring_size - (next_to_use - next_to_clean);
+ if (nb_pkts > empty_tx_reqs)
+ nb_pkts = empty_tx_reqs;
+
for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
mbuf = tx_pkts[sent_idx];
- req_id = tx_ring->empty_tx_reqs[next_to_use];
+ req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask];
tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info->mbuf = mbuf;
tx_info->num_of_bufs = 0;
tx_info->tx_descs = nb_hw_desc;
- next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, ring_size);
+ next_to_use++;
}
/* If there are ready packets to be xmitted... */
rte_pktmbuf_free(mbuf);
/* Put back descriptor to the ring for reuse */
- tx_ring->empty_tx_reqs[tx_ring->next_to_clean] = req_id;
- tx_ring->next_to_clean =
- ENA_TX_RING_IDX_NEXT(tx_ring->next_to_clean,
- tx_ring->ring_size);
+ tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
+ next_to_clean++;
/* If too many descs to clean, leave it for another run */
if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size)))
if (total_tx_descs > 0) {
/* acknowledge completion of sent packets */
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
+ tx_ring->next_to_clean = next_to_clean;
}
return sent_idx;
.dev_private_size = sizeof(struct ena_adapter),
};
-DRIVER_REGISTER_PCI(net_ena, rte_ena_pmd.pci_drv);
-DRIVER_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
+RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio");