-/*
- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <stdio.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
-#include <libgen.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "enic_compat.h"
#include "enic.h"
}
}
-void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
-{
- vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
-}
-
-static void enic_free_wq_buf(struct vnic_wq_buf *buf)
+static void enic_free_wq_buf(struct rte_mbuf **buf)
{
- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
+ struct rte_mbuf *mbuf = *buf;
rte_pktmbuf_free_seg(mbuf);
- buf->mb = NULL;
+ *buf = NULL;
}
static void enic_log_q_error(struct enic *enic)
{
unsigned int i;
- u32 error_status;
+ uint32_t error_status;
for (i = 0; i < enic->wq_count; i++) {
error_status = vnic_wq_error_status(&enic->wq[i]);
struct enic_soft_stats *soft_stats = &enic->soft_stats;
rte_atomic64_clear(&soft_stats->rx_nombuf);
rte_atomic64_clear(&soft_stats->rx_packet_errors);
+ rte_atomic64_clear(&soft_stats->tx_oversized);
}
static void enic_init_soft_stats(struct enic *enic)
struct enic_soft_stats *soft_stats = &enic->soft_stats;
rte_atomic64_init(&soft_stats->rx_nombuf);
rte_atomic64_init(&soft_stats->rx_packet_errors);
+ rte_atomic64_init(&soft_stats->tx_oversized);
enic_clear_soft_stats(enic);
}
-void enic_dev_stats_clear(struct enic *enic)
+int enic_dev_stats_clear(struct enic *enic)
{
- if (vnic_dev_stats_clear(enic->vdev))
+ int ret;
+
+ ret = vnic_dev_stats_clear(enic->vdev);
+ if (ret != 0) {
dev_err(enic, "Error in clearing stats\n");
+ return ret;
+ }
enic_clear_soft_stats(enic);
+
+ return 0;
}
-void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
+int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
{
struct vnic_stats *stats;
struct enic_soft_stats *soft_stats = &enic->soft_stats;
int64_t rx_truncated;
uint64_t rx_packet_errors;
+ int ret = vnic_dev_stats_dump(enic->vdev, &stats);
- if (vnic_dev_stats_dump(enic->vdev, &stats)) {
+ if (ret) {
dev_err(enic, "Error in getting stats\n");
- return;
+ return ret;
}
/* The number of truncated packets can only be calculated by
r_stats->obytes = stats->tx.tx_bytes_ok;
r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
- r_stats->oerrors = stats->tx.tx_errors;
+ r_stats->oerrors = stats->tx.tx_errors
+ + rte_atomic64_read(&soft_stats->tx_oversized);
r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
+ return 0;
}
-void enic_del_mac_address(struct enic *enic)
+int enic_del_mac_address(struct enic *enic, int mac_index)
{
- if (vnic_dev_del_addr(enic->vdev, enic->mac_addr))
- dev_err(enic, "del mac addr failed\n");
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
+
+ return vnic_dev_del_addr(enic->vdev, mac_addr);
}
-void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
+int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
{
int err;
if (!is_eth_addr_valid(mac_addr)) {
dev_err(enic, "invalid mac address\n");
- return;
- }
-
- err = vnic_dev_del_addr(enic->vdev, enic->mac_addr);
- if (err) {
- dev_err(enic, "del mac addr failed\n");
- return;
+ return -EINVAL;
}
- ether_addr_copy((struct ether_addr *)mac_addr,
- (struct ether_addr *)enic->mac_addr);
-
err = vnic_dev_add_addr(enic->vdev, mac_addr);
- if (err) {
+ if (err)
dev_err(enic, "add mac addr failed\n");
- return;
- }
+ return err;
}
static void
return;
rte_pktmbuf_free(*mbuf);
- mbuf = NULL;
+ *mbuf = NULL;
}
void enic_init_vnic_resources(struct enic *enic)
{
unsigned int error_interrupt_enable = 1;
unsigned int error_interrupt_offset = 0;
+ unsigned int rxq_interrupt_enable = 0;
+ unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
unsigned int index = 0;
unsigned int cq_idx;
struct vnic_rq *data_rq;
+ if (enic->rte_dev->data->dev_conf.intr_conf.rxq)
+ rxq_interrupt_enable = 1;
+
for (index = 0; index < enic->rq_count; index++) {
cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
- 0 /* interrupt_enable */,
+ rxq_interrupt_enable,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
- 0 /* interrupt offset */,
+ rxq_interrupt_offset,
0 /* cq_message_addr */);
+ if (rxq_interrupt_enable)
+ rxq_interrupt_offset++;
}
for (index = 0; index < enic->wq_count; index++) {
enic_cq_wq(enic, index),
error_interrupt_enable,
error_interrupt_offset);
+ /* Compute unsupported ol flags for enic_prep_pkts() */
+ enic->wq[index].tx_offload_notsup_mask =
+ PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
cq_idx = enic_cq_wq(enic, index);
vnic_cq_init(&enic->cq[cq_idx],
0 /* cq_entry_enable */,
1 /* cq_message_enable */,
0 /* interrupt offset */,
- (u64)enic->wq[index].cqmsg_rz->phys_addr);
+ (uint64_t)enic->wq[index].cqmsg_rz->iova);
}
- vnic_intr_init(&enic->intr,
- enic->config.intr_timer_usec,
- enic->config.intr_timer_type,
- /*mask_on_assertion*/1);
+ for (index = 0; index < enic->intr_count; index++) {
+ vnic_intr_init(&enic->intr[index],
+ enic->config.intr_timer_usec,
+ enic->config.intr_timer_type,
+ /*mask_on_assertion*/1);
+ }
}
struct rq_enet_desc *rqd = rq->ring.descs;
unsigned i;
dma_addr_t dma_addr;
+ uint32_t max_rx_pkt_len;
+ uint16_t rq_buf_len;
if (!rq->in_use)
return 0;
dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
rq->ring.desc_count);
+ /*
+ * If *not* using scatter and the mbuf size is greater than the
+ * requested max packet size (max_rx_pkt_len), then reduce the
+ * posted buffer size to max_rx_pkt_len. HW still receives packets
+ * larger than max_rx_pkt_len, but they will be truncated, which we
+ * drop in the rx handler. Not ideal, but better than returning
+ * large packets when the user is not expecting them.
+ */
+ max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
+ if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
+ rq_buf_len = max_rx_pkt_len;
for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
mb = rte_mbuf_raw_alloc(rq->mp);
if (mb == NULL) {
}
mb->data_off = RTE_PKTMBUF_HEADROOM;
- dma_addr = (dma_addr_t)(mb->buf_physaddr
+ dma_addr = (dma_addr_t)(mb->buf_iova
+ RTE_PKTMBUF_HEADROOM);
rq_enet_desc_enc(rqd, dma_addr,
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
: RQ_ENET_TYPE_NOT_SOP),
- mb->buf_len - RTE_PKTMBUF_HEADROOM);
+ rq_buf_len);
rq->mbuf_ring[i] = mb;
}
+ /*
+ * Do not post the buffers to the NIC until we enable the RQ via
+ * enic_start_rq().
+ */
+ rq->need_initial_post = true;
+ /* Initialize fetch index while RQ is disabled */
+ iowrite32(0, &rq->ctrl->fetch_index);
+ return 0;
+}
+
+/*
+ * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has
+ * allocated the buffers and filled the RQ descriptor ring. Just need to push
+ * the post index to the NIC.
+ */
+static void
+enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
+{
+ if (!rq->in_use || !rq->need_initial_post)
+ return;
/* make sure all prior writes are complete before doing the PIO write */
rte_rmb();
dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
iowrite32(rq->posted_index, &rq->ctrl->posted_index);
- iowrite32(0, &rq->ctrl->fetch_index);
rte_rmb();
-
- return 0;
-
+ rq->need_initial_post = false;
}
-static void *
+void *
enic_alloc_consistent(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name)
+ dma_addr_t *dma_handle, uint8_t *name)
{
void *vaddr;
const struct rte_memzone *rz;
struct enic *enic = (struct enic *)priv;
struct enic_memzone_entry *mze;
- rz = rte_memzone_reserve_aligned((const char *)name,
- size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
+ rz = rte_memzone_reserve_aligned((const char *)name, size,
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
if (!rz) {
pr_err("%s : Failed to allocate memory requested for %s\n",
__func__, name);
}
vaddr = rz->addr;
- *dma_handle = (dma_addr_t)rz->phys_addr;
+ *dma_handle = (dma_addr_t)rz->iova;
mze = rte_malloc("enic memzone entry",
sizeof(struct enic_memzone_entry), 0);
pr_err("%s : Failed to allocate memory for memzone list\n",
__func__);
rte_memzone_free(rz);
+ return NULL;
}
mze->rz = rz;
return vaddr;
}
-static void
+void
enic_free_consistent(void *priv,
__rte_unused size_t size,
void *vaddr,
rte_spinlock_lock(&enic->memzone_list_lock);
LIST_FOREACH(mze, &enic->memzone_list, entries) {
if (mze->rz->addr == vaddr &&
- mze->rz->phys_addr == dma_handle)
+ mze->rz->iova == dma_handle)
break;
}
if (mze == NULL) {
rte_free(mze);
}
-int enic_link_update(struct enic *enic)
+int enic_link_update(struct rte_eth_dev *eth_dev)
{
- struct rte_eth_dev *eth_dev = enic->rte_dev;
- int ret;
- int link_status = 0;
+ struct enic *enic = pmd_priv(eth_dev);
+ struct rte_eth_link link;
- link_status = enic_get_link_status(enic);
- ret = (link_status == enic->link_status);
- enic->link_status = link_status;
- eth_dev->data->dev_link.link_status = link_status;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
- return ret;
+ memset(&link, 0, sizeof(link));
+ link.link_status = enic_get_link_status(enic);
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = vnic_dev_port_speed(enic->vdev);
+
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
static void
-enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
- void *arg)
+enic_intr_handler(void *arg)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
struct enic *enic = pmd_priv(dev);
- vnic_intr_return_all_credits(&enic->intr);
+ vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
- enic_link_update(enic);
+ enic_link_update(dev);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
+ /* Re-enable irq in case of INTx */
+ rte_intr_ack(&enic->pdev->intr_handle);
+}
+
+static int enic_rxq_intr_init(struct enic *enic)
+{
+ struct rte_intr_handle *intr_handle;
+ uint32_t rxq_intr_count, i;
+ int err;
+
+ intr_handle = enic->rte_dev->intr_handle;
+ if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
+ return 0;
+ /*
+ * Rx queue interrupts only work when we have MSI-X interrupts,
+ * one per queue. Sharing one interrupt is technically
+ * possible with VIC, but it is not worth the complications it brings.
+ */
+ if (!rte_intr_cap_multiple(intr_handle)) {
+ dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
+ " (vfio-pci driver)\n");
+ return -ENOTSUP;
+ }
+ rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
+ err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
+ if (err) {
+ dev_err(enic, "Failed to enable event fds for Rx queue"
+ " interrupts\n");
+ return err;
+ }
+ intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
+ rxq_intr_count * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ dev_err(enic, "Failed to allocate intr_vec\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < rxq_intr_count; i++)
+ intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
+ return 0;
+}
+
+static void enic_rxq_intr_deinit(struct enic *enic)
+{
+ struct rte_intr_handle *intr_handle;
+
+ intr_handle = enic->rte_dev->intr_handle;
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
+{
+ struct wq_enet_desc *desc;
+ struct vnic_wq *wq;
+ unsigned int i;
+
+ /*
+ * Fill WQ descriptor fields that never change. Every descriptor is
+ * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH
+ * descriptors (i.e. request one completion update every 32 packets).
+ */
+ wq = &enic->wq[queue_idx];
+ desc = (struct wq_enet_desc *)wq->ring.descs;
+ for (i = 0; i < wq->ring.desc_count; i++, desc++) {
+ desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT;
+ if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1)
+ desc->header_length_flags |=
+ (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT);
+ }
+}
+
+/*
+ * The 'strong' version is in enic_rxtx_vec_avx2.c. This weak version is used
+ * used when that file is not compiled.
+ */
+__rte_weak bool
+enic_use_vector_rx_handler(__rte_unused struct rte_eth_dev *eth_dev)
+{
+ return false;
+}
+
+void enic_pick_rx_handler(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ /*
+ * Preference order:
+ * 1. The vectorized handler if possible and requested.
+ * 2. The non-scatter, simplified handler if scatter Rx is not used.
+ * 3. The default handler as a fallback.
+ */
+ if (enic_use_vector_rx_handler(eth_dev))
+ return;
+ if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
+ ENICPMD_LOG(DEBUG, " use the non-scatter Rx handler");
+ eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
+ } else {
+ ENICPMD_LOG(DEBUG, " use the normal Rx handler");
+ eth_dev->rx_pkt_burst = &enic_recv_pkts;
+ }
+}
+
+/* Secondary process uses this to set the Tx handler */
+void enic_pick_tx_handler(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (enic->use_simple_tx_handler) {
+ ENICPMD_LOG(DEBUG, " use the simple tx handler");
+ eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
+ } else {
+ ENICPMD_LOG(DEBUG, " use the default tx handler");
+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+ }
}
int enic_enable(struct enic *enic)
unsigned int index;
int err;
struct rte_eth_dev *eth_dev = enic->rte_dev;
+ uint64_t simple_tx_offloads;
+ uintptr_t p;
+
+ if (enic->enable_avx2_rx) {
+ struct rte_mbuf mb_def = { .buf_addr = 0 };
+
+ /*
+ * mbuf_initializer contains const-after-init fields of
+ * receive mbufs (i.e. 64 bits of fields from rearm_data).
+ * It is currently used by the vectorized handler.
+ */
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = enic->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ enic->mbuf_initializer = *(uint64_t *)p;
+ }
eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
if (eth_dev->data->dev_conf.intr_conf.lsc)
vnic_dev_notify_set(enic->vdev, 0);
+ err = enic_rxq_intr_init(enic);
+ if (err)
+ return err;
if (enic_clsf_init(enic))
dev_warning(enic, "Init of hash table for clsf failed."\
"Flow director feature will not work\n");
+ if (enic_fm_init(enic))
+ dev_warning(enic, "Init of flowman failed.\n");
+
for (index = 0; index < enic->rq_count; index++) {
err = enic_alloc_rx_queue_mbufs(enic,
&enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
}
}
+ /*
+ * Use the simple TX handler if possible. Only checksum offloads
+ * and vlan insertion are supported.
+ */
+ simple_tx_offloads = enic->tx_offload_capa &
+ (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if ((eth_dev->data->dev_conf.txmode.offloads &
+ ~simple_tx_offloads) == 0) {
+ ENICPMD_LOG(DEBUG, " use the simple tx handler");
+ eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
+ for (index = 0; index < enic->wq_count; index++)
+ enic_prep_wq_for_simple_tx(enic, index);
+ enic->use_simple_tx_handler = 1;
+ } else {
+ ENICPMD_LOG(DEBUG, " use the default tx handler");
+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+ }
+
+ enic_pick_rx_handler(eth_dev);
+
for (index = 0; index < enic->wq_count; index++)
enic_start_wq(enic, index);
for (index = 0; index < enic->rq_count; index++)
enic_intr_handler, (void *)enic->rte_dev);
rte_intr_enable(&(enic->pdev->intr_handle));
- vnic_intr_unmask(&enic->intr);
+ /* Unmask LSC interrupt */
+ vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
return 0;
}
int enic_alloc_intr_resources(struct enic *enic)
{
int err;
+ unsigned int i;
dev_info(enic, "vNIC resources used: "\
"wq %d rq %d cq %d intr %d\n",
enic->wq_count, enic_vnic_rq_count(enic),
enic->cq_count, enic->intr_count);
- err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
- if (err)
- enic_free_vnic_resources(enic);
-
- return err;
+ for (i = 0; i < enic->intr_count; i++) {
+ err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
+ if (err) {
+ enic_free_vnic_resources(enic);
+ return err;
+ }
+ }
+ return 0;
}
void enic_free_rq(void *rxq)
enic = vnic_dev_priv(rq_sop->vdev);
rq_data = &enic->rq[rq_sop->data_queue_idx];
+ if (rq_sop->free_mbufs) {
+ struct rte_mbuf **mb;
+ int i;
+
+ mb = rq_sop->free_mbufs;
+ for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs;
+ i < ENIC_RX_BURST_MAX; i++)
+ rte_pktmbuf_free(mb[i]);
+ rte_free(rq_sop->free_mbufs);
+ rq_sop->free_mbufs = NULL;
+ rq_sop->num_free_mbufs = 0;
+ }
+
enic_rxmbuf_queue_release(enic, rq_sop);
if (rq_data->in_use)
enic_rxmbuf_queue_release(enic, rq_data);
void enic_start_wq(struct enic *enic, uint16_t queue_idx)
{
- struct rte_eth_dev *eth_dev = enic->rte_dev;
+ struct rte_eth_dev_data *data = enic->dev_data;
vnic_wq_enable(&enic->wq[queue_idx]);
- eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
}
int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
{
- struct rte_eth_dev *eth_dev = enic->rte_dev;
+ struct rte_eth_dev_data *data = enic->dev_data;
int ret;
ret = vnic_wq_disable(&enic->wq[queue_idx]);
if (ret)
return ret;
- eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
void enic_start_rq(struct enic *enic, uint16_t queue_idx)
{
+ struct rte_eth_dev_data *data = enic->dev_data;
struct vnic_rq *rq_sop;
struct vnic_rq *rq_data;
rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
rq_data = &enic->rq[rq_sop->data_queue_idx];
- struct rte_eth_dev *eth_dev = enic->rte_dev;
- if (rq_data->in_use)
+ if (rq_data->in_use) {
vnic_rq_enable(rq_data);
+ enic_initial_post_rx(enic, rq_data);
+ }
rte_mb();
vnic_rq_enable(rq_sop);
- eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ enic_initial_post_rx(enic, rq_sop);
+ data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
}
int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
{
+ struct rte_eth_dev_data *data = enic->dev_data;
int ret1 = 0, ret2 = 0;
- struct rte_eth_dev *eth_dev = enic->rte_dev;
struct vnic_rq *rq_sop;
struct vnic_rq *rq_data;
rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
else if (ret1)
return ret1;
- eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
unsigned int mbuf_size, mbufs_per_pkt;
unsigned int nb_sop_desc, nb_data_desc;
uint16_t min_sop, max_sop, min_data, max_data;
- uint16_t mtu = enic->rte_dev->data->mtu;
+ uint32_t max_rx_pkt_len;
rq_sop->is_sop = 1;
rq_sop->data_queue_idx = data_queue_idx;
mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
+ /* max_rx_pkt_len includes the ethernet header and CRC. */
+ max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
- if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+ if (enic->rte_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SCATTER) {
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
- /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
- mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
- (mbuf_size - 1)) / mbuf_size;
+ /* ceil((max pkt len)/mbuf_size) */
+ mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
} else {
dev_info(enic, "Scatter rx mode disabled\n");
mbufs_per_pkt = 1;
+ if (max_rx_pkt_len > mbuf_size) {
+ dev_warning(enic, "The maximum Rx packet size (%u) is"
+ " larger than the mbuf size (%u), and"
+ " scatter is disabled. Larger packets will"
+ " be truncated.\n",
+ max_rx_pkt_len, mbuf_size);
+ }
}
if (mbufs_per_pkt > 1) {
dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
rq_sop->data_queue_enable = 1;
rq_data->in_use = 1;
+ /*
+ * HW does not directly support rxmode.max_rx_pkt_len. HW always
+ * receives packet sizes up to the "max" MTU.
+ * If not using scatter, we can achieve the effect of dropping
+ * larger packets by reducing the size of posted buffers.
+ * See enic_alloc_rx_queue_mbufs().
+ */
+ if (max_rx_pkt_len <
+ enic_mtu_to_max_rx_pktlen(enic->max_mtu)) {
+ dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
+ " when scatter rx mode is in use.\n");
+ }
} else {
dev_info(enic, "Rq %u Scatter rx mode not being used\n",
queue_idx);
}
/* number of descriptors have to be a multiple of 32 */
- nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
- nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
+ nb_sop_desc = (nb_desc / mbufs_per_pkt) & ENIC_ALIGN_DESCS_MASK;
+ nb_data_desc = (nb_desc - nb_sop_desc) & ENIC_ALIGN_DESCS_MASK;
rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
if (mbufs_per_pkt > 1) {
- min_sop = 64;
+ min_sop = ENIC_RX_BURST_MAX;
max_sop = ((enic->config.rq_desc_count /
- (mbufs_per_pkt - 1)) & ~0x1F);
+ (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK);
min_data = min_sop * (mbufs_per_pkt - 1);
max_data = enic->config.rq_desc_count;
} else {
- min_sop = 64;
+ min_sop = ENIC_RX_BURST_MAX;
max_sop = enic->config.rq_desc_count;
min_data = 0;
max_data = 0;
nb_data_desc = max_data;
}
if (mbufs_per_pkt > 1) {
- dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
- mtu, mbuf_size, min_sop + min_data,
+ dev_info(enic, "For max packet size %u and mbuf size %u valid"
+ " rx descriptor range is %u to %u\n",
+ max_rx_pkt_len, mbuf_size, min_sop + min_data,
max_sop + max_data);
}
dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
goto err_free_sop_mbuf;
}
+ rq_sop->free_mbufs = (struct rte_mbuf **)
+ rte_zmalloc_socket("rq->free_mbufs",
+ sizeof(struct rte_mbuf *) *
+ ENIC_RX_BURST_MAX,
+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+ if (rq_sop->free_mbufs == NULL)
+ goto err_free_data_mbuf;
+ rq_sop->num_free_mbufs = 0;
+
rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
return 0;
+err_free_data_mbuf:
+ rte_free(rq_data->mbuf_ring);
err_free_sop_mbuf:
rte_free(rq_sop->mbuf_ring);
err_free_cq:
int err;
struct vnic_wq *wq = &enic->wq[queue_idx];
unsigned int cq_index = enic_cq_wq(enic, queue_idx);
- char name[NAME_MAX];
+ char name[RTE_MEMZONE_NAMESIZE];
static int instance;
wq->socket_id = socket_id;
- if (nb_desc) {
- if (nb_desc > enic->config.wq_desc_count) {
- dev_warning(enic,
- "WQ %d - number of tx desc in cmd line (%d)"\
- "is greater than that in the UCSM/CIMC adapter"\
- "policy. Applying the value in the adapter "\
- "policy (%d)\n",
- queue_idx, nb_desc, enic->config.wq_desc_count);
- } else if (nb_desc != enic->config.wq_desc_count) {
- enic->config.wq_desc_count = nb_desc;
- dev_info(enic,
- "TX Queues - effective number of descs:%d\n",
- nb_desc);
- }
- }
+ /*
+ * rte_eth_tx_queue_setup() checks min, max, and alignment. So just
+ * print an info message for diagnostics.
+ */
+ dev_info(enic, "TX Queues - effective number of descs:%d\n", nb_desc);
/* Allocate queue resources */
err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
- enic->config.wq_desc_count,
+ nb_desc,
sizeof(struct wq_enet_desc));
if (err) {
dev_err(enic, "error in allocation of wq\n");
}
err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
- socket_id, enic->config.wq_desc_count,
+ socket_id, nb_desc,
sizeof(struct cq_enet_wq_desc));
if (err) {
vnic_wq_free(wq);
instance++);
wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
- sizeof(uint32_t),
- SOCKET_ID_ANY, 0,
- ENIC_ALIGN);
+ sizeof(uint32_t), SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
if (!wq->cqmsg_rz)
return -ENOMEM;
unsigned int i;
int err;
- vnic_intr_mask(&enic->intr);
- (void)vnic_intr_masked(&enic->intr); /* flush write */
+ for (i = 0; i < enic->intr_count; i++) {
+ vnic_intr_mask(&enic->intr[i]);
+ (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
+ }
+ enic_rxq_intr_deinit(enic);
rte_intr_disable(&enic->pdev->intr_handle);
rte_intr_callback_unregister(&enic->pdev->intr_handle,
enic_intr_handler,
vnic_dev_disable(enic->vdev);
enic_clsf_destroy(enic);
+ enic_fm_destroy(enic);
if (!enic_is_sriov_vf(enic))
vnic_dev_del_addr(enic->vdev, enic->mac_addr);
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
- vnic_intr_clean(&enic->intr);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_clean(&enic->intr[i]);
return 0;
}
static int enic_dev_open(struct enic *enic)
{
int err;
+ int flags = CMD_OPENF_IG_DESCCACHE;
err = enic_dev_wait(enic->vdev, vnic_dev_open,
- vnic_dev_open_done, 0);
+ vnic_dev_open_done, flags);
if (err)
dev_err(enic_get_dev(enic),
"vNIC device open failed, err %d\n", err);
return err;
}
-static int enic_set_rsskey(struct enic *enic)
+static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
{
dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
- static union vnic_rss_key rss_key = {
- .key = {
- [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
- [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
- [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
- [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
- }
- };
- int err;
- u8 name[NAME_MAX];
+ int err, i;
+ uint8_t name[RTE_MEMZONE_NAMESIZE];
- snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
+ RTE_ASSERT(user_key != NULL);
+ snprintf((char *)name, sizeof(name), "rss_key-%s", enic->bdf_name);
rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
&rss_key_buf_pa, name);
if (!rss_key_buf_va)
return -ENOMEM;
- rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
+ for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
+ rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
err = enic_set_rss_key(enic,
rss_key_buf_pa,
sizeof(union vnic_rss_key));
+ /* Save for later queries */
+ if (!err) {
+ rte_memcpy(&enic->rss_key, rss_key_buf_va,
+ sizeof(union vnic_rss_key));
+ }
enic_free_consistent(enic, sizeof(union vnic_rss_key),
rss_key_buf_va, rss_key_buf_pa);
return err;
}
-static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
+int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
{
dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
- int i;
int err;
- u8 name[NAME_MAX];
+ uint8_t name[RTE_MEMZONE_NAMESIZE];
- snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
+ snprintf((char *)name, sizeof(name), "rss_cpu-%s", enic->bdf_name);
rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
&rss_cpu_buf_pa, name);
if (!rss_cpu_buf_va)
return -ENOMEM;
- for (i = 0; i < (1 << rss_hash_bits); i++)
- (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
- enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
+ rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
err = enic_set_rss_cpu(enic,
rss_cpu_buf_pa,
enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
rss_cpu_buf_va, rss_cpu_buf_pa);
+ /* Save for later queries */
+ if (!err)
+ rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
return err;
}
-static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
- u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
+static int enic_set_niccfg(struct enic *enic, uint8_t rss_default_cpu,
+ uint8_t rss_hash_type, uint8_t rss_hash_bits, uint8_t rss_base_cpu,
+ uint8_t rss_enable)
{
- const u8 tso_ipid_split_en = 0;
+ const uint8_t tso_ipid_split_en = 0;
int err;
- /* Enable VLAN tag stripping */
-
err = enic_set_nic_cfg(enic,
rss_default_cpu, rss_hash_type,
rss_hash_bits, rss_base_cpu,
return err;
}
-int enic_set_rss_nic_cfg(struct enic *enic)
+/* Initialize RSS with defaults, called from dev_configure */
+int enic_init_rss_nic_cfg(struct enic *enic)
{
- const u8 rss_default_cpu = 0;
- const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
- NIC_CFG_RSS_HASH_TYPE_IPV6 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
- const u8 rss_hash_bits = 7;
- const u8 rss_base_cpu = 0;
- u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
-
- if (rss_enable) {
- if (!enic_set_rsskey(enic)) {
- if (enic_set_rsscpu(enic, rss_hash_bits)) {
- rss_enable = 0;
- dev_warning(enic, "RSS disabled, "\
- "Failed to set RSS cpu indirection table.");
- }
- } else {
- rss_enable = 0;
- dev_warning(enic,
- "RSS disabled, Failed to set RSS key.\n");
+ static uint8_t default_rss_key[] = {
+ 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
+ 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
+ 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
+ 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
+ };
+ struct rte_eth_rss_conf rss_conf;
+ union vnic_rss_cpu rss_cpu;
+ int ret, i;
+
+ rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ /*
+ * If setting key for the first time, and the user gives us none, then
+ * push the default key to NIC.
+ */
+ if (rss_conf.rss_key == NULL) {
+ rss_conf.rss_key = default_rss_key;
+ rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
+ }
+ ret = enic_set_rss_conf(enic, &rss_conf);
+ if (ret) {
+ dev_err(enic, "Failed to configure RSS\n");
+ return ret;
+ }
+ if (enic->rss_enable) {
+ /* If enabling RSS, use the default reta */
+ for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
+ rss_cpu.cpu[i / 4].b[i % 4] =
+ enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
}
+ ret = enic_set_rss_reta(enic, &rss_cpu);
+ if (ret)
+ dev_err(enic, "Failed to set RSS indirection table\n");
}
-
- return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
- rss_hash_bits, rss_base_cpu, rss_enable);
+ return ret;
}
int enic_setup_finish(struct enic *enic)
{
- int ret;
-
enic_init_soft_stats(enic);
- ret = enic_set_rss_nic_cfg(enic);
- if (ret) {
- dev_err(enic, "Failed to config nic, aborting.\n");
- return -1;
- }
-
/* Default conf */
vnic_dev_packet_filter(enic->vdev,
1 /* directed */,
return 0;
}
-void enic_add_packet_filter(struct enic *enic)
+static int enic_rss_conf_valid(struct enic *enic,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ /* RSS is disabled per VIC settings. Ignore rss_conf. */
+ if (enic->flow_type_rss_offloads == 0)
+ return 0;
+ if (rss_conf->rss_key != NULL &&
+ rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
+ dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
+ rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
+ return -EINVAL;
+ }
+ if (rss_conf->rss_hf != 0 &&
+ (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
+ dev_err(enic, "Given rss_hf contains none of the supported"
+ " types\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Set hash type and key according to rss_conf */
+int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *eth_dev;
+ uint64_t rss_hf;
+ uint8_t rss_hash_type;
+ uint8_t rss_enable;
+ int ret;
+
+ RTE_ASSERT(rss_conf != NULL);
+ ret = enic_rss_conf_valid(enic, rss_conf);
+ if (ret) {
+ dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
+ return ret;
+ }
+
+ eth_dev = enic->rte_dev;
+ rss_hash_type = 0;
+ rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
+ if (enic->rq_count > 1 &&
+ (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+ rss_hf != 0) {
+ rss_enable = 1;
+ if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
+ if (enic->udp_rss_weak) {
+ /*
+ * 'TCP' is not a typo. The "weak" version of
+ * UDP RSS requires both the TCP and UDP bits
+ * be set. It does enable TCP RSS as well.
+ */
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ }
+ }
+ if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
+ ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
+ if (enic->udp_rss_weak)
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ }
+ } else {
+ rss_enable = 0;
+ rss_hf = 0;
+ }
+
+ /* Set the hash key if provided */
+ if (rss_enable && rss_conf->rss_key) {
+ ret = enic_set_rsskey(enic, rss_conf->rss_key);
+ if (ret) {
+ dev_err(enic, "Failed to set RSS key\n");
+ return ret;
+ }
+ }
+
+ ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
+ ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
+ rss_enable);
+ if (!ret) {
+ enic->rss_hf = rss_hf;
+ enic->rss_hash_type = rss_hash_type;
+ enic->rss_enable = rss_enable;
+ } else {
+ dev_err(enic, "Failed to update RSS configurations."
+ " hash=0x%x\n", rss_hash_type);
+ }
+ return ret;
+}
+
+int enic_set_vlan_strip(struct enic *enic)
+{
+ /*
+ * Unfortunately, VLAN strip on/off and RSS on/off are configured
+ * together. So, re-do niccfg, preserving the current RSS settings.
+ */
+ return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
+ ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
+ enic->rss_enable);
+}
+
+int enic_add_packet_filter(struct enic *enic)
{
/* Args -> directed, multicast, broadcast, promisc, allmulti */
- vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
+ return vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
enic->promisc, enic->allmulti);
}
static void enic_dev_deinit(struct enic *enic)
{
- struct rte_eth_dev *eth_dev = enic->rte_dev;
-
/* stop link status checking */
vnic_dev_notify_unset(enic->vdev);
- rte_free(eth_dev->data->mac_addrs);
+ /* mac_addrs is freed by rte_eth_dev_release_port() */
+ rte_free(enic->cq);
+ rte_free(enic->intr);
+ rte_free(enic->rq);
+ rte_free(enic->wq);
}
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
int rc = 0;
+ unsigned int required_rq, required_wq, required_cq, required_intr;
+
+ /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
+ required_rq = eth_dev->data->nb_rx_queues * 2;
+ required_wq = eth_dev->data->nb_tx_queues;
+ required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+ required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ required_intr += eth_dev->data->nb_rx_queues;
+ }
- /* With Rx scatter support, two RQs are now used per RQ used by
- * the application.
- */
- if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
+ if (enic->conf_rq_count < required_rq) {
dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
eth_dev->data->nb_rx_queues,
- eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
+ required_rq, enic->conf_rq_count);
rc = -EINVAL;
}
- if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
+ if (enic->conf_wq_count < required_wq) {
dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
eth_dev->data->nb_tx_queues, enic->conf_wq_count);
rc = -EINVAL;
}
- if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
- eth_dev->data->nb_tx_queues)) {
+ if (enic->conf_cq_count < required_cq) {
dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
- (eth_dev->data->nb_rx_queues +
- eth_dev->data->nb_tx_queues), enic->conf_cq_count);
+ required_cq, enic->conf_cq_count);
+ rc = -EINVAL;
+ }
+ if (enic->conf_intr_count < required_intr) {
+ dev_err(dev, "Not enough Interrupts to support Rx queue"
+ " interrupts. Required:%u, Configured:%u\n",
+ required_intr, enic->conf_intr_count);
rc = -EINVAL;
}
enic->rq_count = eth_dev->data->nb_rx_queues;
enic->wq_count = eth_dev->data->nb_tx_queues;
enic->cq_count = enic->rq_count + enic->wq_count;
+ enic->intr_count = required_intr;
}
return rc;
enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
{
struct vnic_rq *sop_rq, *data_rq;
- unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
+ unsigned int cq_idx;
int rc = 0;
sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
+ cq_idx = rq_idx;
vnic_cq_clean(&enic->cq[cq_idx]);
vnic_cq_init(&enic->cq[cq_idx],
old_mtu = eth_dev->data->mtu;
config_mtu = enic->config.mtu;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
if (new_mtu > enic->max_mtu) {
dev_err(enic,
"MTU not updated: requested (%u) greater than max (%u)\n",
"MTU (%u) is greater than value configured in NIC (%u)\n",
new_mtu, config_mtu);
- /* The easy case is when scatter is disabled. However if the MTU
- * becomes greater than the mbuf data size, packet drops will ensue.
+ /* Update the MTU and maximum packet length */
+ eth_dev->data->mtu = new_mtu;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ enic_mtu_to_max_rx_pktlen(new_mtu);
+
+ /*
+ * If the device has not started (enic_enable), nothing to do.
+ * Later, enic_enable() will set up RQs reflecting the new maximum
+ * packet length.
*/
- if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
- eth_dev->data->mtu = new_mtu;
+ if (!eth_dev->data->dev_started)
goto set_mtu_done;
- }
- /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
- * change Rx scatter mode if necessary for better performance. I.e. if
- * MTU was greater than the mbuf size and now it's less, scatter Rx
- * doesn't have to be used and vice versa.
- */
+ /*
+ * The device has started, re-do RQs on the fly. In the process, we
+ * pick up the new maximum packet length.
+ *
+ * Some applications rely on the ability to change MTU without stopping
+ * the device. So keep this behavior for now.
+ */
rte_spinlock_lock(&enic->mtu_lock);
/* Stop traffic on all RQs */
}
}
- /* replace Rx funciton with a no-op to avoid getting stale pkts */
+ /* replace Rx function with a no-op to avoid getting stale pkts */
eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
rte_mb();
/* now it is safe to reconfigure the RQs */
- /* update the mtu */
- eth_dev->data->mtu = new_mtu;
/* free and reallocate RQs with the new MTU */
for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ if (!rq->in_use)
+ continue;
enic_free_rq(rq);
rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
/* put back the real receive function */
rte_mb();
- eth_dev->rx_pkt_burst = enic_recv_pkts;
+ enic_pick_rx_handler(eth_dev);
rte_mb();
/* restart Rx traffic */
dev_err(enic, "See the ENIC PMD guide for more information.\n");
return -EINVAL;
}
+ /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
+ enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
+ enic->conf_cq_count, 8);
+ enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
+ enic->conf_intr_count, 8);
+ enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
+ enic->conf_rq_count, 8);
+ enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
+ enic->conf_wq_count, 8);
+ if (enic->conf_cq_count > 0 && enic->cq == NULL) {
+ dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_intr_count > 0 && enic->intr == NULL) {
+ dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_rq_count > 0 && enic->rq == NULL) {
+ dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_wq_count > 0 && enic->wq == NULL) {
+ dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
+ return -1;
+ }
/* Get the supported filters */
enic_fdir_info(enic);
- eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr",
+ sizeof(struct rte_ether_addr) *
+ ENIC_UNICAST_PERFECT_FILTERS, 0);
if (!eth_dev->data->mac_addrs) {
dev_err(enic, "mac addr storage alloc failed, aborting.\n");
return -1;
}
- ether_addr_copy((struct ether_addr *) enic->mac_addr,
- ð_dev->data->mac_addrs[0]);
+ rte_ether_addr_copy((struct rte_ether_addr *)enic->mac_addr,
+ eth_dev->data->mac_addrs);
vnic_dev_set_reset_flag(enic->vdev, 0);
+ LIST_INIT(&enic->flows);
+
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+ /*
+ * When Geneve with options offload is available, always disable it
+ * first as it can interfere with user flow rules.
+ */
+ if (enic->geneve_opt_avail) {
+ /*
+ * Disabling fails if the feature is provisioned but
+ * not enabled. So ignore result and do not log error.
+ */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_GENEVE,
+ OVERLAY_OFFLOAD_DISABLE);
+ }
+ enic->overlay_offload = false;
+ if (enic->disable_overlay && enic->vxlan) {
+ /*
+ * Explicitly disable overlay offload as the setting is
+ * sticky, and resetting vNIC does not disable it.
+ */
+ if (vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_OFFLOAD_DISABLE)) {
+ dev_err(enic, "failed to disable overlay offload\n");
+ } else {
+ dev_info(enic, "Overlay offload is disabled\n");
+ }
+ }
+ if (!enic->disable_overlay && enic->vxlan &&
+ /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_OFFLOAD_ENABLE) == 0) {
+ enic->tx_offload_capa |=
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+ enic->tx_offload_mask |=
+ PKT_TX_OUTER_IPV6 |
+ PKT_TX_OUTER_IPV4 |
+ PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_MASK;
+ enic->overlay_offload = true;
+ dev_info(enic, "Overlay offload is enabled\n");
+ }
+ /* Geneve with options offload requires overlay offload */
+ if (enic->overlay_offload && enic->geneve_opt_avail &&
+ enic->geneve_opt_request) {
+ if (vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_GENEVE,
+ OVERLAY_OFFLOAD_ENABLE)) {
+ dev_err(enic, "failed to enable geneve+option\n");
+ } else {
+ enic->geneve_opt_enabled = 1;
+ dev_info(enic, "Geneve with options is enabled\n");
+ }
+ }
+ /*
+ * Reset the vxlan port if HW vxlan parsing is available. It
+ * is always enabled regardless of overlay offload
+ * enable/disable.
+ */
+ if (enic->vxlan) {
+ enic->vxlan_port = RTE_VXLAN_DEFAULT_PORT;
+ /*
+ * Reset the vxlan port to the default, as the NIC firmware
+ * does not reset it automatically and keeps the old setting.
+ */
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ RTE_VXLAN_DEFAULT_PORT)) {
+ dev_err(enic, "failed to update vxlan port\n");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
struct rte_pci_device *pdev = enic->pdev;
int err = -1;
- dev_debug(enic, " Initializing ENIC PMD\n");
+ dev_debug(enic, "Initializing ENIC PMD\n");
+
+ /* if this is a secondary process the hardware is already initialized */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
enic->bar0.len = pdev->mem_resource[0].len;
enic_alloc_consistent,
enic_free_consistent);
+ /*
+ * Allocate the consistent memory for stats upfront so both primary and
+ * secondary processes can dump stats.
+ */
+ err = vnic_dev_alloc_stats_mem(enic->vdev);
+ if (err) {
+ dev_err(enic, "Failed to allocate cmd memory, aborting\n");
+ goto err_out_unregister;
+ }
/* Issue device open to get device in known state */
err = enic_dev_open(enic);
if (err) {
}
/* Set ingress vlan rewrite mode before vnic initialization */
+ dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n",
+ enic->ig_vlan_rewrite_mode);
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
- IG_VLAN_REWRITE_MODE_PASS_THRU);
+ enic->ig_vlan_rewrite_mode);
if (err) {
dev_err(enic,
"Failed to set ingress vlan rewrite mode, aborting.\n");