static void enic_log_q_error(struct enic *enic)
{
unsigned int i;
- u32 error_status;
+ uint32_t error_status;
for (i = 0; i < enic->wq_count; i++) {
error_status = vnic_wq_error_status(&enic->wq[i]);
enic_clear_soft_stats(enic);
}
-void enic_dev_stats_clear(struct enic *enic)
+int enic_dev_stats_clear(struct enic *enic)
{
- if (vnic_dev_stats_clear(enic->vdev))
+ int ret;
+
+ ret = vnic_dev_stats_clear(enic->vdev);
+ if (ret != 0) {
dev_err(enic, "Error in clearing stats\n");
+ return ret;
+ }
enic_clear_soft_stats(enic);
+
+ return 0;
}
int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
0 /* cq_entry_enable */,
1 /* cq_message_enable */,
0 /* interrupt offset */,
- (u64)enic->wq[index].cqmsg_rz->iova);
+ (uint64_t)enic->wq[index].cqmsg_rz->iova);
}
for (index = 0; index < enic->intr_count; index++) {
rq->need_initial_post = false;
}
-static void *
+void *
enic_alloc_consistent(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name)
+ dma_addr_t *dma_handle, uint8_t *name)
{
void *vaddr;
const struct rte_memzone *rz;
struct enic_memzone_entry *mze;
rz = rte_memzone_reserve_aligned((const char *)name, size,
- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
if (!rz) {
pr_err("%s : Failed to allocate memory requested for %s\n",
__func__, name);
return vaddr;
}
-static void
+void
enic_free_consistent(void *priv,
__rte_unused size_t size,
void *vaddr,
rte_free(mze);
}
-int enic_link_update(struct enic *enic)
+int enic_link_update(struct rte_eth_dev *eth_dev)
{
- struct rte_eth_dev *eth_dev = enic->rte_dev;
+ struct enic *enic = pmd_priv(eth_dev);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
- enic_link_update(enic);
+ enic_link_update(dev);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
+ /* Re-enable irq in case of INTx */
+ rte_intr_ack(&enic->pdev->intr_handle);
}
static int enic_rxq_intr_init(struct enic *enic)
* used when that file is not compiled.
*/
__rte_weak bool
-enic_use_vector_rx_handler(__rte_unused struct enic *enic)
+enic_use_vector_rx_handler(__rte_unused struct rte_eth_dev *eth_dev)
{
return false;
}
-static void pick_rx_handler(struct enic *enic)
+void enic_pick_rx_handler(struct rte_eth_dev *eth_dev)
{
- struct rte_eth_dev *eth_dev;
+ struct enic *enic = pmd_priv(eth_dev);
/*
* Preference order:
* 2. The non-scatter, simplified handler if scatter Rx is not used.
* 3. The default handler as a fallback.
*/
- eth_dev = enic->rte_dev;
- if (enic_use_vector_rx_handler(enic))
+ if (enic_use_vector_rx_handler(eth_dev))
return;
if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
ENICPMD_LOG(DEBUG, " use the non-scatter Rx handler");
}
}
+/* Secondary process uses this to set the Tx handler */
+void enic_pick_tx_handler(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (enic->use_simple_tx_handler) {
+ ENICPMD_LOG(DEBUG, " use the simple tx handler");
+ eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
+ } else {
+ ENICPMD_LOG(DEBUG, " use the default tx handler");
+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+ }
+}
+
int enic_enable(struct enic *enic)
{
unsigned int index;
dev_warning(enic, "Init of hash table for clsf failed."\
"Flow director feature will not work\n");
+ if (enic_fm_init(enic))
+ dev_warning(enic, "Init of flowman failed.\n");
+
for (index = 0; index < enic->rq_count; index++) {
err = enic_alloc_rx_queue_mbufs(enic,
&enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
for (index = 0; index < enic->wq_count; index++)
enic_prep_wq_for_simple_tx(enic, index);
+ enic->use_simple_tx_handler = 1;
} else {
ENICPMD_LOG(DEBUG, " use the default tx handler");
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
}
- pick_rx_handler(enic);
+ enic_pick_rx_handler(eth_dev);
for (index = 0; index < enic->wq_count; index++)
enic_start_wq(enic, index);
void enic_start_wq(struct enic *enic, uint16_t queue_idx)
{
- struct rte_eth_dev *eth_dev = enic->rte_dev;
+ struct rte_eth_dev_data *data = enic->dev_data;
vnic_wq_enable(&enic->wq[queue_idx]);
- eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
}
int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
{
- struct rte_eth_dev *eth_dev = enic->rte_dev;
+ struct rte_eth_dev_data *data = enic->dev_data;
int ret;
ret = vnic_wq_disable(&enic->wq[queue_idx]);
if (ret)
return ret;
- eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
void enic_start_rq(struct enic *enic, uint16_t queue_idx)
{
+ struct rte_eth_dev_data *data = enic->dev_data;
struct vnic_rq *rq_sop;
struct vnic_rq *rq_data;
rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
rq_data = &enic->rq[rq_sop->data_queue_idx];
- struct rte_eth_dev *eth_dev = enic->rte_dev;
if (rq_data->in_use) {
vnic_rq_enable(rq_data);
rte_mb();
vnic_rq_enable(rq_sop);
enic_initial_post_rx(enic, rq_sop);
- eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
}
int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
{
+ struct rte_eth_dev_data *data = enic->dev_data;
int ret1 = 0, ret2 = 0;
- struct rte_eth_dev *eth_dev = enic->rte_dev;
struct vnic_rq *rq_sop;
struct vnic_rq *rq_data;
rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
else if (ret1)
return ret1;
- eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
int err;
struct vnic_wq *wq = &enic->wq[queue_idx];
unsigned int cq_index = enic_cq_wq(enic, queue_idx);
- char name[NAME_MAX];
+ char name[RTE_MEMZONE_NAMESIZE];
static int instance;
wq->socket_id = socket_id;
wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
sizeof(uint32_t), SOCKET_ID_ANY,
- RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
+ RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
if (!wq->cqmsg_rz)
return -ENOMEM;
vnic_dev_disable(enic->vdev);
enic_clsf_destroy(enic);
+ enic_fm_destroy(enic);
if (!enic_is_sriov_vf(enic))
vnic_dev_del_addr(enic->vdev, enic->mac_addr);
dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
int err, i;
- u8 name[NAME_MAX];
+ uint8_t name[RTE_MEMZONE_NAMESIZE];
RTE_ASSERT(user_key != NULL);
- snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
+ snprintf((char *)name, sizeof(name), "rss_key-%s", enic->bdf_name);
rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
&rss_key_buf_pa, name);
if (!rss_key_buf_va)
dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
int err;
- u8 name[NAME_MAX];
+ uint8_t name[RTE_MEMZONE_NAMESIZE];
- snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
+ snprintf((char *)name, sizeof(name), "rss_cpu-%s", enic->bdf_name);
rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
&rss_cpu_buf_pa, name);
if (!rss_cpu_buf_va)
return err;
}
-static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
- u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
+static int enic_set_niccfg(struct enic *enic, uint8_t rss_default_cpu,
+ uint8_t rss_hash_type, uint8_t rss_hash_bits, uint8_t rss_base_cpu,
+ uint8_t rss_enable)
{
- const u8 tso_ipid_split_en = 0;
+ const uint8_t tso_ipid_split_en = 0;
int err;
err = enic_set_nic_cfg(enic,
{
struct rte_eth_dev *eth_dev;
uint64_t rss_hf;
- u8 rss_hash_type;
- u8 rss_enable;
+ uint8_t rss_hash_type;
+ uint8_t rss_enable;
int ret;
RTE_ASSERT(rss_conf != NULL);
/* put back the real receive function */
rte_mb();
- pick_rx_handler(enic);
+ enic_pick_rx_handler(eth_dev);
rte_mb();
/* restart Rx traffic */
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+ /*
+ * When Geneve with options offload is available, always disable it
+ * first as it can interfere with user flow rules.
+ */
+ if (enic->geneve_opt_avail) {
+ /*
+ * Disabling fails if the feature is provisioned but
+ * not enabled. So ignore result and do not log error.
+ */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_GENEVE,
+ OVERLAY_OFFLOAD_DISABLE);
+ }
enic->overlay_offload = false;
if (enic->disable_overlay && enic->vxlan) {
/*
enic->overlay_offload = true;
dev_info(enic, "Overlay offload is enabled\n");
}
+ /* Geneve with options offload requires overlay offload */
+ if (enic->overlay_offload && enic->geneve_opt_avail &&
+ enic->geneve_opt_request) {
+ if (vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_GENEVE,
+ OVERLAY_OFFLOAD_ENABLE)) {
+ dev_err(enic, "failed to enable geneve+option\n");
+ } else {
+ enic->geneve_opt_enabled = 1;
+ dev_info(enic, "Geneve with options is enabled\n");
+ }
+ }
/*
* Reset the vxlan port if HW vxlan parsing is available. It
* is always enabled regardless of overlay offload
* enable/disable.
*/
if (enic->vxlan) {
- enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
+ enic->vxlan_port = RTE_VXLAN_DEFAULT_PORT;
/*
* Reset the vxlan port to the default, as the NIC firmware
* does not reset it automatically and keeps the old setting.
*/
if (vnic_dev_overlay_offload_cfg(enic->vdev,
OVERLAY_CFG_VXLAN_PORT_UPDATE,
- ENIC_DEFAULT_VXLAN_PORT)) {
+ RTE_VXLAN_DEFAULT_PORT)) {
dev_err(enic, "failed to update vxlan port\n");
return -EINVAL;
}