#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include "enic_compat.h"
#include "enic.h"
return !is_mcast_addr(addr) && !is_zero_addr(addr);
}
-static void
+void
enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
{
uint16_t i;
}
}
-static void enic_free_wq_buf(struct rte_mbuf **buf)
+void enic_free_wq_buf(struct rte_mbuf **buf)
{
struct rte_mbuf *mbuf = *buf;
static void enic_log_q_error(struct enic *enic)
{
unsigned int i;
- u32 error_status;
+ uint32_t error_status;
for (i = 0; i < enic->wq_count; i++) {
error_status = vnic_wq_error_status(&enic->wq[i]);
enic_clear_soft_stats(enic);
}
-void enic_dev_stats_clear(struct enic *enic)
+int enic_dev_stats_clear(struct enic *enic)
{
- if (vnic_dev_stats_clear(enic->vdev))
+ int ret;
+
+ ret = vnic_dev_stats_clear(enic->vdev);
+ if (ret != 0) {
dev_err(enic, "Error in clearing stats\n");
+ return ret;
+ }
enic_clear_soft_stats(enic);
+
+ return 0;
}
int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
return err;
}
-static void
-enic_free_rq_buf(struct rte_mbuf **mbuf)
+void enic_free_rq_buf(struct rte_mbuf **mbuf)
{
if (*mbuf == NULL)
return;
error_interrupt_enable,
error_interrupt_offset);
- data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
+ data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)];
if (data_rq->in_use)
vnic_rq_init(data_rq,
cq_idx,
error_interrupt_enable,
error_interrupt_offset);
-
vnic_cq_init(&enic->cq[cq_idx],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_entry_enable */,
1 /* cq_message_enable */,
0 /* interrupt offset */,
- (u64)enic->wq[index].cqmsg_rz->iova);
+ (uint64_t)enic->wq[index].cqmsg_rz->iova);
}
for (index = 0; index < enic->intr_count; index++) {
}
-static int
+int
enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
{
struct rte_mbuf *mb;
rq->need_initial_post = false;
}
-static void *
+void *
enic_alloc_consistent(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name)
+ dma_addr_t *dma_handle, uint8_t *name)
{
void *vaddr;
const struct rte_memzone *rz;
struct enic_memzone_entry *mze;
rz = rte_memzone_reserve_aligned((const char *)name, size,
- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
if (!rz) {
pr_err("%s : Failed to allocate memory requested for %s\n",
__func__, name);
return vaddr;
}
-static void
+void
enic_free_consistent(void *priv,
__rte_unused size_t size,
void *vaddr,
rte_free(mze);
}
-int enic_link_update(struct enic *enic)
+int enic_link_update(struct rte_eth_dev *eth_dev)
{
- struct rte_eth_dev *eth_dev = enic->rte_dev;
+ struct enic *enic = pmd_priv(eth_dev);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
- enic_link_update(enic);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ enic_link_update(dev);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
+ /* Re-enable irq in case of INTx */
+ rte_intr_ack(&enic->pdev->intr_handle);
}
static int enic_rxq_intr_init(struct enic *enic)
* used when that file is not compiled.
*/
__rte_weak bool
-enic_use_vector_rx_handler(__rte_unused struct enic *enic)
+enic_use_vector_rx_handler(__rte_unused struct rte_eth_dev *eth_dev)
{
return false;
}
-static void pick_rx_handler(struct enic *enic)
+void enic_pick_rx_handler(struct rte_eth_dev *eth_dev)
{
- struct rte_eth_dev *eth_dev;
+ struct enic *enic = pmd_priv(eth_dev);
+ if (enic->cq64) {
+ ENICPMD_LOG(DEBUG, " use the normal Rx handler for 64B CQ entry");
+ eth_dev->rx_pkt_burst = &enic_recv_pkts_64;
+ return;
+ }
/*
* Preference order:
* 1. The vectorized handler if possible and requested.
* 2. The non-scatter, simplified handler if scatter Rx is not used.
* 3. The default handler as a fallback.
*/
- eth_dev = enic->rte_dev;
- if (enic_use_vector_rx_handler(enic))
+ if (enic_use_vector_rx_handler(eth_dev))
return;
if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
- PMD_INIT_LOG(DEBUG, " use the non-scatter Rx handler");
+ ENICPMD_LOG(DEBUG, " use the non-scatter Rx handler");
eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
} else {
- PMD_INIT_LOG(DEBUG, " use the normal Rx handler");
+ ENICPMD_LOG(DEBUG, " use the normal Rx handler");
eth_dev->rx_pkt_burst = &enic_recv_pkts;
}
}
+/* Secondary process uses this to set the Tx handler */
+void enic_pick_tx_handler(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (enic->use_simple_tx_handler) {
+ ENICPMD_LOG(DEBUG, " use the simple tx handler");
+ eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
+ } else {
+ ENICPMD_LOG(DEBUG, " use the default tx handler");
+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+ }
+}
+
int enic_enable(struct enic *enic)
{
unsigned int index;
err = enic_rxq_intr_init(enic);
if (err)
return err;
- if (enic_clsf_init(enic))
- dev_warning(enic, "Init of hash table for clsf failed."\
- "Flow director feature will not work\n");
+
+ /* Initialize flowman if not already initialized during probe */
+ if (enic->fm == NULL && enic_fm_init(enic))
+ dev_warning(enic, "Init of flowman failed.\n");
for (index = 0; index < enic->rq_count; index++) {
err = enic_alloc_rx_queue_mbufs(enic,
return err;
}
err = enic_alloc_rx_queue_mbufs(enic,
- &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
+ &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)]);
if (err) {
/* release the allocated mbufs for the sop rq*/
enic_rxmbuf_queue_release(enic,
DEV_TX_OFFLOAD_TCP_CKSUM);
if ((eth_dev->data->dev_conf.txmode.offloads &
~simple_tx_offloads) == 0) {
- PMD_INIT_LOG(DEBUG, " use the simple tx handler");
+ ENICPMD_LOG(DEBUG, " use the simple tx handler");
eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
for (index = 0; index < enic->wq_count; index++)
enic_prep_wq_for_simple_tx(enic, index);
+ enic->use_simple_tx_handler = 1;
} else {
- PMD_INIT_LOG(DEBUG, " use the default tx handler");
+ ENICPMD_LOG(DEBUG, " use the default tx handler");
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
}
- pick_rx_handler(enic);
+ enic_pick_rx_handler(eth_dev);
for (index = 0; index < enic->wq_count; index++)
enic_start_wq(enic, index);
void enic_start_wq(struct enic *enic, uint16_t queue_idx)
{
- struct rte_eth_dev *eth_dev = enic->rte_dev;
+ struct rte_eth_dev_data *data = enic->dev_data;
vnic_wq_enable(&enic->wq[queue_idx]);
- eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
}
int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
{
- struct rte_eth_dev *eth_dev = enic->rte_dev;
+ struct rte_eth_dev_data *data = enic->dev_data;
int ret;
ret = vnic_wq_disable(&enic->wq[queue_idx]);
if (ret)
return ret;
- eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
void enic_start_rq(struct enic *enic, uint16_t queue_idx)
{
+ struct rte_eth_dev_data *data = enic->dev_data;
struct vnic_rq *rq_sop;
struct vnic_rq *rq_data;
rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
rq_data = &enic->rq[rq_sop->data_queue_idx];
- struct rte_eth_dev *eth_dev = enic->rte_dev;
if (rq_data->in_use) {
vnic_rq_enable(rq_data);
rte_mb();
vnic_rq_enable(rq_sop);
enic_initial_post_rx(enic, rq_sop);
- eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
}
int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
{
+ struct rte_eth_dev_data *data = enic->dev_data;
int ret1 = 0, ret2 = 0;
- struct rte_eth_dev *eth_dev = enic->rte_dev;
struct vnic_rq *rq_sop;
struct vnic_rq *rq_data;
rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
else if (ret1)
return ret1;
- eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
unsigned int socket_id, struct rte_mempool *mp,
uint16_t nb_desc, uint16_t free_thresh)
{
+ struct enic_vf_representor *vf;
int rc;
- uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
- uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
- struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
- struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
+ uint16_t sop_queue_idx;
+ uint16_t data_queue_idx;
+ uint16_t cq_idx;
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
unsigned int mbuf_size, mbufs_per_pkt;
unsigned int nb_sop_desc, nb_data_desc;
uint16_t min_sop, max_sop, min_data, max_data;
uint32_t max_rx_pkt_len;
+ /*
+ * Representor uses a reserved PF queue. Translate representor
+ * queue number to PF queue number.
+ */
+ if (enic_is_vf_rep(enic)) {
+ RTE_ASSERT(queue_idx == 0);
+ vf = VF_ENIC_TO_VF_REP(enic);
+ sop_queue_idx = vf->pf_rq_sop_idx;
+ data_queue_idx = vf->pf_rq_data_idx;
+ enic = vf->pf;
+ queue_idx = sop_queue_idx;
+ } else {
+ sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
+ data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx, enic);
+ }
+ cq_idx = enic_cq_rq(enic, sop_queue_idx);
+ rq_sop = &enic->rq[sop_queue_idx];
+ rq_data = &enic->rq[data_queue_idx];
rq_sop->is_sop = 1;
rq_sop->data_queue_idx = data_queue_idx;
rq_data->is_sop = 0;
}
nb_data_desc = rq_data->ring.desc_count;
}
- rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
+ /* Enable 64B CQ entry if requested */
+ if (enic->cq64 && vnic_dev_set_cq_entry_size(enic->vdev,
+ sop_queue_idx, VNIC_RQ_CQ_ENTRY_SIZE_64)) {
+ dev_err(enic, "failed to enable 64B CQ entry on sop rq\n");
+ goto err_free_rq_data;
+ }
+ if (rq_data->in_use && enic->cq64 &&
+ vnic_dev_set_cq_entry_size(enic->vdev, data_queue_idx,
+ VNIC_RQ_CQ_ENTRY_SIZE_64)) {
+ dev_err(enic, "failed to enable 64B CQ entry on data rq\n");
+ goto err_free_rq_data;
+ }
+
+ rc = vnic_cq_alloc(enic->vdev, &enic->cq[cq_idx], cq_idx,
socket_id, nb_sop_desc + nb_data_desc,
+ enic->cq64 ? sizeof(struct cq_enet_rq_desc_64) :
sizeof(struct cq_enet_rq_desc));
if (rc) {
dev_err(enic, "error in allocation of cq for rq\n");
rte_free(rq_sop->mbuf_ring);
err_free_cq:
/* cleanup on error */
- vnic_cq_free(&enic->cq[queue_idx]);
+ vnic_cq_free(&enic->cq[cq_idx]);
err_free_rq_data:
if (rq_data->in_use)
vnic_rq_free(rq_data);
int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
unsigned int socket_id, uint16_t nb_desc)
{
+ struct enic_vf_representor *vf;
int err;
- struct vnic_wq *wq = &enic->wq[queue_idx];
- unsigned int cq_index = enic_cq_wq(enic, queue_idx);
- char name[NAME_MAX];
+ struct vnic_wq *wq;
+ unsigned int cq_index;
+ char name[RTE_MEMZONE_NAMESIZE];
static int instance;
+ /*
+ * Representor uses a reserved PF queue. Translate representor
+ * queue number to PF queue number.
+ */
+ if (enic_is_vf_rep(enic)) {
+ RTE_ASSERT(queue_idx == 0);
+ vf = VF_ENIC_TO_VF_REP(enic);
+ queue_idx = vf->pf_wq_idx;
+ cq_index = vf->pf_wq_cq_idx;
+ enic = vf->pf;
+ } else {
+ cq_index = enic_cq_wq(enic, queue_idx);
+ }
+ wq = &enic->wq[queue_idx];
wq->socket_id = socket_id;
/*
* rte_eth_tx_queue_setup() checks min, max, and alignment. So just
wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
sizeof(uint32_t), SOCKET_ID_ANY,
- RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
+ RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
if (!wq->cqmsg_rz)
return -ENOMEM;
vnic_dev_disable(enic->vdev);
- enic_clsf_destroy(enic);
+ enic_fm_destroy(enic);
if (!enic_is_sriov_vf(enic))
vnic_dev_del_addr(enic->vdev, enic->mac_addr);
dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
int err, i;
- u8 name[NAME_MAX];
+ uint8_t name[RTE_MEMZONE_NAMESIZE];
RTE_ASSERT(user_key != NULL);
- snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
+ snprintf((char *)name, sizeof(name), "rss_key-%s", enic->bdf_name);
rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
&rss_key_buf_pa, name);
if (!rss_key_buf_va)
dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
int err;
- u8 name[NAME_MAX];
+ uint8_t name[RTE_MEMZONE_NAMESIZE];
- snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
+ snprintf((char *)name, sizeof(name), "rss_cpu-%s", enic->bdf_name);
rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
&rss_cpu_buf_pa, name);
if (!rss_cpu_buf_va)
return err;
}
-static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
- u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
+static int enic_set_niccfg(struct enic *enic, uint8_t rss_default_cpu,
+ uint8_t rss_hash_type, uint8_t rss_hash_bits, uint8_t rss_base_cpu,
+ uint8_t rss_enable)
{
- const u8 tso_ipid_split_en = 0;
+ const uint8_t tso_ipid_split_en = 0;
int err;
err = enic_set_nic_cfg(enic,
{
enic_init_soft_stats(enic);
+ /* switchdev: enable promisc mode on PF */
+ if (enic->switchdev_mode) {
+ vnic_dev_packet_filter(enic->vdev,
+ 0 /* directed */,
+ 0 /* multicast */,
+ 0 /* broadcast */,
+ 1 /* promisc */,
+ 0 /* allmulti */);
+ enic->promisc = 1;
+ enic->allmulti = 0;
+ return 0;
+ }
/* Default conf */
vnic_dev_packet_filter(enic->vdev,
1 /* directed */,
{
struct rte_eth_dev *eth_dev;
uint64_t rss_hf;
- u8 rss_hash_type;
- u8 rss_enable;
+ uint8_t rss_hash_type;
+ uint8_t rss_enable;
int ret;
RTE_ASSERT(rss_conf != NULL);
enic->rss_enable);
}
-void enic_add_packet_filter(struct enic *enic)
+int enic_add_packet_filter(struct enic *enic)
{
+ /* switchdev ignores packet filters */
+ if (enic->switchdev_mode) {
+ ENICPMD_LOG(DEBUG, " switchdev: ignore packet filter");
+ return 0;
+ }
/* Args -> directed, multicast, broadcast, promisc, allmulti */
- vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
+ return vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
enic->promisc, enic->allmulti);
}
if (eth_dev->data->dev_conf.intr_conf.rxq) {
required_intr += eth_dev->data->nb_rx_queues;
}
+ ENICPMD_LOG(DEBUG, "Required queues for PF: rq %u wq %u cq %u",
+ required_rq, required_wq, required_cq);
+ if (enic->vf_required_rq) {
+ /* Queues needed for VF representors */
+ required_rq += enic->vf_required_rq;
+ required_wq += enic->vf_required_wq;
+ required_cq += enic->vf_required_cq;
+ ENICPMD_LOG(DEBUG, "Required queues for VF representors: rq %u wq %u cq %u",
+ enic->vf_required_rq, enic->vf_required_wq,
+ enic->vf_required_cq);
+ }
if (enic->conf_rq_count < required_rq) {
dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
int rc = 0;
sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
- data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
- cq_idx = rq_idx;
+ data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx, enic)];
+ cq_idx = enic_cq_rq(enic, rq_idx);
vnic_cq_clean(&enic->cq[cq_idx]);
vnic_cq_init(&enic->cq[cq_idx],
if (data_rq->in_use) {
vnic_rq_init_start(data_rq,
enic_cq_rq(enic,
- enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
- data_rq->ring.desc_count - 1, 1, 0);
+ enic_rte_rq_idx_to_data_idx(rq_idx, enic)),
+ 0, data_rq->ring.desc_count - 1, 1, 0);
}
rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
/* put back the real receive function */
rte_mb();
- pick_rx_handler(enic);
+ enic_pick_rx_handler(eth_dev);
rte_mb();
/* restart Rx traffic */
return -1;
}
- /* Get the supported filters */
- enic_fdir_info(enic);
-
eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr",
sizeof(struct rte_ether_addr) *
ENIC_UNICAST_PERFECT_FILTERS, 0);
vnic_dev_set_reset_flag(enic->vdev, 0);
LIST_INIT(&enic->flows);
- rte_spinlock_init(&enic->flows_lock);
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+ /*
+ * When Geneve with options offload is available, always disable it
+ * first as it can interfere with user flow rules.
+ */
+ if (enic->geneve_opt_avail) {
+ /*
+ * Disabling fails if the feature is provisioned but
+ * not enabled. So ignore result and do not log error.
+ */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_GENEVE,
+ OVERLAY_OFFLOAD_DISABLE);
+ }
enic->overlay_offload = false;
if (enic->disable_overlay && enic->vxlan) {
/*
enic->overlay_offload = true;
dev_info(enic, "Overlay offload is enabled\n");
}
+ /* Geneve with options offload requires overlay offload */
+ if (enic->overlay_offload && enic->geneve_opt_avail &&
+ enic->geneve_opt_request) {
+ if (vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_GENEVE,
+ OVERLAY_OFFLOAD_ENABLE)) {
+ dev_err(enic, "failed to enable geneve+option\n");
+ } else {
+ enic->geneve_opt_enabled = 1;
+ dev_info(enic, "Geneve with options is enabled\n");
+ }
+ }
/*
* Reset the vxlan port if HW vxlan parsing is available. It
* is always enabled regardless of overlay offload
* enable/disable.
*/
if (enic->vxlan) {
- enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
+ enic->vxlan_port = RTE_VXLAN_DEFAULT_PORT;
/*
* Reset the vxlan port to the default, as the NIC firmware
* does not reset it automatically and keeps the old setting.
*/
if (vnic_dev_overlay_offload_cfg(enic->vdev,
OVERLAY_CFG_VXLAN_PORT_UPDATE,
- ENIC_DEFAULT_VXLAN_PORT)) {
+ RTE_VXLAN_DEFAULT_PORT)) {
dev_err(enic, "failed to update vxlan port\n");
return -EINVAL;
}
}
+ if (enic_fm_init(enic))
+ dev_warning(enic, "Init of flowman failed.\n");
return 0;
}
+static void lock_devcmd(void *priv)
+{
+ struct enic *enic = priv;
+
+ rte_spinlock_lock(&enic->devcmd_lock);
+}
+
+static void unlock_devcmd(void *priv)
+{
+ struct enic *enic = priv;
+
+ rte_spinlock_unlock(&enic->devcmd_lock);
+}
+
int enic_probe(struct enic *enic)
{
struct rte_pci_device *pdev = enic->pdev;
int err = -1;
- dev_debug(enic, " Initializing ENIC PMD\n");
+ dev_debug(enic, "Initializing ENIC PMD\n");
/* if this is a secondary process the hardware is already initialized */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
goto err_out_dev_close;
}
+ /* Use a PF spinlock to serialize devcmd from PF and VF representors */
+ if (enic->switchdev_mode) {
+ rte_spinlock_init(&enic->devcmd_lock);
+ vnic_register_lock(enic->vdev, lock_devcmd, unlock_devcmd);
+ }
return 0;
err_out_dev_close: