#include <unistd.h>
#include <stdarg.h>
#include <inttypes.h>
+#include <assert.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
/* Maximun number of VSI */
#define I40E_MAX_NUM_VSIS (384UL)
-/* Default queue interrupt throttling time in microseconds */
-#define I40E_ITR_INDEX_DEFAULT 0
-#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
-#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
-
#define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
/* Flow control default timer */
static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstats *xstats, unsigned n);
static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
-static void i40e_dev_xstats_reset(struct rte_eth_dev *dev);
static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
uint16_t queue_id,
uint8_t stat_idx,
bool offset_loaded,
uint64_t *offset,
uint64_t *stat);
-static void i40e_pf_config_irq0(struct i40e_hw *hw);
+static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
static void i40e_dev_interrupt_handler(
__rte_unused struct rte_intr_handle *handle, void *param);
static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp);
static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
-
+static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
static const struct rte_pci_id pci_id_i40e_map[] = {
#define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
.stats_get = i40e_dev_stats_get,
.xstats_get = i40e_dev_xstats_get,
.stats_reset = i40e_dev_stats_reset,
- .xstats_reset = i40e_dev_xstats_reset,
+ .xstats_reset = i40e_dev_stats_reset,
.queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
.dev_infos_get = i40e_dev_info_get,
.vlan_filter_set = i40e_vlan_filter_set,
.tx_queue_start = i40e_dev_tx_queue_start,
.tx_queue_stop = i40e_dev_tx_queue_stop,
.rx_queue_setup = i40e_dev_rx_queue_setup,
+ .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_count = i40e_dev_rx_queue_count,
.rx_descriptor_done = i40e_dev_rx_descriptor_done,
i40e_dev_interrupt_handler, (void *)dev);
/* configure and enable device interrupt */
- i40e_pf_config_irq0(hw);
+ i40e_pf_config_irq0(hw, TRUE);
i40e_pf_enable_irq0(hw);
/* enable uio intr after callback register */
void
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t i;
}
if (vsi->type != I40E_VSI_SRIOV) {
- I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
- I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
- msix_vect - 1), 0);
+ if (!rte_intr_allow_others(intr_handle)) {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
+ 0);
+ } else {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+ msix_vect - 1), 0);
+ }
} else {
uint32_t reg;
reg = (hw->func_caps.num_msix_vectors_vf - 1) *
vsi->user_param + (msix_vect - 1);
- I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
+ I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
}
I40E_WRITE_FLUSH(hw);
}
-static inline uint16_t
-i40e_calc_itr_interval(int16_t interval)
-{
- if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
- interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
-
- /* Convert to hardware count, as writing each 1 represents 2 us */
- return (interval/2);
-}
-
-void
-i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
+static void
+__vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
+ int base_queue, int nb_queue)
{
+ int i;
uint32_t val;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
- uint16_t msix_vect = vsi->msix_intr;
- int i;
-
- for (i = 0; i < vsi->nb_qps; i++)
- I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
/* Bind all RX queues to allocated MSIX interrupt */
- for (i = 0; i < vsi->nb_qps; i++) {
+ for (i = 0; i < nb_queue; i++) {
val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
I40E_QINT_RQCTL_ITR_INDX_MASK |
- ((vsi->base_queue + i + 1) <<
- I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ ((base_queue + i + 1) <<
+ I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
(0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
I40E_QINT_RQCTL_CAUSE_ENA_MASK;
- if (i == vsi->nb_qps - 1)
+ if (i == nb_queue - 1)
val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
- I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
}
/* Write first RX queue to Link list register as the head element */
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
- I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
- (vsi->base_queue <<
- I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
- (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
-
- I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
- msix_vect - 1), interval);
-
-#ifndef I40E_GLINT_CTL
-#define I40E_GLINT_CTL 0x0003F800
-#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
-#endif
- /* Disable auto-mask on enabling of all none-zero interrupt */
- I40E_WRITE_REG(hw, I40E_GLINT_CTL,
- I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
+ if (msix_vect == I40E_MISC_VEC_ID) {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ (base_queue <<
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
+ interval);
+ } else {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+ (base_queue <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+ msix_vect - 1),
+ interval);
+ }
} else {
uint32_t reg;
- /* num_msix_vectors_vf needs to minus irq0 */
- reg = (hw->func_caps.num_msix_vectors_vf - 1) *
- vsi->user_param + (msix_vect - 1);
+ if (msix_vect == I40E_MISC_VEC_ID) {
+ I40E_WRITE_REG(hw,
+ I40E_VPINT_LNKLST0(vsi->user_param),
+ (base_queue <<
+ I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+ } else {
+ /* num_msix_vectors_vf needs to minus irq0 */
+ reg = (hw->func_caps.num_msix_vectors_vf - 1) *
+ vsi->user_param + (msix_vect - 1);
- I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
+ I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
+ (base_queue <<
I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
- (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ (0x0 <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ }
}
I40E_WRITE_FLUSH(hw);
}
+void
+i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_vect = vsi->msix_intr;
+ uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+ uint16_t queue_idx = 0;
+ int record = 0;
+ uint32_t val;
+ int i;
+
+ for (i = 0; i < vsi->nb_qps; i++) {
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
+ }
+
+ /* INTENA flag is not auto-cleared for interrupt */
+ val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+ I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
+
+ /* VF bind interrupt */
+ if (vsi->type == I40E_VSI_SRIOV) {
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue, vsi->nb_qps);
+ return;
+ }
+
+ /* PF & VMDq bind interrupt */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (vsi->type == I40E_VSI_MAIN) {
+ queue_idx = 0;
+ record = 1;
+ } else if (vsi->type == I40E_VSI_VMDQ2) {
+ struct i40e_vsi *main_vsi =
+ I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
+ queue_idx = vsi->base_queue - main_vsi->nb_qps;
+ record = 1;
+ }
+ }
+
+ for (i = 0; i < vsi->nb_used_qps; i++) {
+ if (nb_msix <= 1) {
+ if (!rte_intr_allow_others(intr_handle))
+ /* allow to share MISC_VEC_ID */
+ msix_vect = I40E_MISC_VEC_ID;
+
+ /* no enough msix_vect, map all to one */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i,
+ vsi->nb_used_qps - i);
+ for (; !!record && i < vsi->nb_used_qps; i++)
+ intr_handle->intr_vec[queue_idx + i] =
+ msix_vect;
+ break;
+ }
+ /* 1:1 queue/msix_vect mapping */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i, 1);
+ if (!!record)
+ intr_handle->intr_vec[queue_idx + i] = msix_vect;
+
+ msix_vect++;
+ nb_msix--;
+ }
+}
+
static void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t interval = i40e_calc_itr_interval(\
- RTE_LIBRTE_I40E_ITR_INTERVAL);
-
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
- I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ RTE_LIBRTE_I40E_ITR_INTERVAL);
+ uint16_t msix_intr, i;
+
+ if (rte_intr_allow_others(intr_handle))
+ for (i = 0; i < vsi->nb_msix; i++) {
+ msix_intr = vsi->msix_intr + i;
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ (interval <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ }
+ else
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
+
+ I40E_WRITE_FLUSH(hw);
}
static void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_intr, i;
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
+ if (rte_intr_allow_others(intr_handle))
+ for (i = 0; i < vsi->nb_msix; i++) {
+ msix_intr = vsi->msix_intr + i;
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
+ 0);
+ }
+ else
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+
+ I40E_WRITE_FLUSH(hw);
}
static inline uint8_t
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
hw->adapter_stopped = 0;
return -EINVAL;
}
+ rte_intr_disable(intr_handle);
+
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int),
+ 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
/* Initialize VSI */
ret = i40e_dev_rxtx_init(pf);
if (ret != I40E_SUCCESS) {
}
/* Map queues with MSIX interrupt */
+ main_vsi->nb_used_qps = dev->data->nb_rx_queues -
+ pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
i40e_vsi_queues_bind_intr(main_vsi);
i40e_vsi_enable_queues_intr(main_vsi);
/* Map VMDQ VSI queues with MSIX interrupt */
for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
}
goto err_up;
}
+ if (!rte_intr_allow_others(intr_handle)) {
+ rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler,
+ (void *)dev);
+ /* configure and enable device interrupt */
+ i40e_pf_config_irq0(hw, FALSE);
+ i40e_pf_enable_irq0(hw);
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex\n");
+ }
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(intr_handle);
+
return I40E_SUCCESS;
err_up:
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct i40e_mirror_rule *p_mirror;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
int i;
/* Disable all queues */
}
if (pf->fdir.fdir_vsi) {
- i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
- i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
}
/* Clear all queues and release memory */
i40e_dev_clear_queues(dev);
}
pf->nb_mirror_rule = 0;
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ i40e_dev_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
}
static void
/* call read registers - updates values, now write them to struct */
i40e_read_stats_registers(pf, hw);
- stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast;
- stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast;
- stats->ibytes = ns->eth.rx_bytes;
- stats->obytes = ns->eth.tx_bytes;
- stats->oerrors = ns->eth.tx_errors;
- stats->imcasts = ns->eth.rx_multicast;
+ stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
+ pf->main_vsi->eth_stats.rx_multicast +
+ pf->main_vsi->eth_stats.rx_broadcast -
+ pf->main_vsi->eth_stats.rx_discards;
+ stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
+ pf->main_vsi->eth_stats.tx_multicast +
+ pf->main_vsi->eth_stats.tx_broadcast;
+ stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
+ stats->obytes = pf->main_vsi->eth_stats.tx_bytes;
+ stats->oerrors = ns->eth.tx_errors +
+ pf->main_vsi->eth_stats.tx_errors;
+ stats->imcasts = pf->main_vsi->eth_stats.rx_multicast;
stats->fdirmatch = ns->fd_sb_match;
/* Rx Errors */
stats->ibadcrc = ns->crc_errors;
stats->ibadlen = ns->rx_length_errors + ns->rx_undersize +
ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
- stats->imissed = ns->eth.rx_discards;
+ stats->imissed = ns->eth.rx_discards +
+ pf->main_vsi->eth_stats.rx_discards;
stats->ierrors = stats->ibadcrc + stats->ibadlen + stats->imissed;
PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
}
+/* Reset the statistics */
static void
-i40e_dev_xstats_reset(struct rte_eth_dev *dev)
+i40e_dev_stats_reset(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_hw_port_stats *hw_stats = &pf->stats;
- /* The hw registers are cleared on read */
+ /* Mark PF and VSI stats to update the offset, aka "reset" */
pf->offset_loaded = false;
- i40e_read_stats_registers(pf, hw);
+ if (pf->main_vsi)
+ pf->main_vsi->offset_loaded = false;
- /* reset software counters */
- memset(hw_stats, 0, sizeof(*hw_stats));
+ /* read the stats, reading current register values into offset */
+ i40e_read_stats_registers(pf, hw);
}
static int
return I40E_NB_XSTATS;
}
-/* Reset the statistics */
-static void
-i40e_dev_stats_reset(struct rte_eth_dev *dev)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
- /* It results in reloading the start point of each counter */
- pf->offset_loaded = false;
-}
-
static int
i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
__rte_unused uint16_t queue_id,
u64 size,
u32 alignment)
{
- static uint64_t id = 0;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
if (!mem)
return I40E_ERR_PARAM;
- id++;
- snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
+ snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
#ifdef RTE_LIBRTE_XEN_DOM0
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
alignment, RTE_PGSIZE_2M);
if (!mz)
return I40E_ERR_NO_MEMORY;
- mem->id = id;
mem->size = size;
mem->va = mz->addr;
#ifdef RTE_LIBRTE_XEN_DOM0
#else
mem->pa = mz->phys_addr;
#endif
+ mem->zone = (const void *)mz;
+ PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
+ "%"PRIu64, mz->name, mem->pa);
return I40E_SUCCESS;
}
i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
struct i40e_dma_mem *mem)
{
- if (!mem || !mem->va)
+ if (!mem)
return I40E_ERR_PARAM;
+ PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
+ "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
+ mem->pa);
+ rte_memzone_free((const struct rte_memzone *)mem->zone);
+ mem->zone = NULL;
mem->va = NULL;
mem->pa = (u64)0;
/* VMDq queue/VSI allocation */
pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
+ pf->vmdq_nb_qps = 0;
+ pf->max_nb_vmdq_vsi = 0;
if (hw->func_caps.vmdq) {
- pf->flags |= I40E_FLAG_VMDQ;
- pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
- pf->max_nb_vmdq_vsi = 1;
- PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues per VMDQ VSI, "
- "in total %u queues", pf->max_nb_vmdq_vsi,
- pf->vmdq_nb_qps,
- pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
- } else {
- pf->vmdq_nb_qps = 0;
- pf->max_nb_vmdq_vsi = 0;
+ if (qp_count < hw->func_caps.num_tx_qp) {
+ pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
+ qp_count) / pf->vmdq_nb_qp_max;
+
+ /* Limit the maximum number of VMDq vsi to the maximum
+ * ethdev can support
+ */
+ pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+ ETH_64_POOLS);
+ if (pf->max_nb_vmdq_vsi) {
+ pf->flags |= I40E_FLAG_VMDQ;
+ pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
+ PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
+ "per VMDQ VSI, in total %u queues",
+ pf->max_nb_vmdq_vsi,
+ pf->vmdq_nb_qps, pf->vmdq_nb_qps *
+ pf->max_nb_vmdq_vsi);
+ } else {
+ PMD_DRV_LOG(INFO, "No enough queues left for "
+ "VMDq");
+ }
+ } else {
+ PMD_DRV_LOG(INFO, "No queue left for VMDq");
+ }
}
qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
vsi_count += pf->max_nb_vmdq_vsi;
return !((src1 ^ src2) & src2);
}
-static int
+static enum i40e_status_code
validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
{
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
/* If DCB is not supported, only default TC is supported */
if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
- return -EINVAL;
+ return I40E_NOT_SUPPORTED;
}
if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
"HW support 0x%x", hw->func_caps.enabled_tcmap,
enabled_tcmap);
- return -EINVAL;
+ return I40E_NOT_SUPPORTED;
}
return I40E_SUCCESS;
}
return I40E_SUCCESS;
}
-static int
+static enum i40e_status_code
i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
struct i40e_aqc_vsi_properties_data *info,
uint8_t enabled_tcmap)
{
- int ret, i, total_tc = 0;
+ enum i40e_status_code ret;
+ int i, total_tc = 0;
uint16_t qpnum_per_tc, bsf, qp_idx;
ret = validate_tcmap_parameter(vsi, enabled_tcmap);
vsi->base_queue = I40E_FDIR_QUEUE_ID;
/* VF has MSIX interrupt in VF range, don't allocate here */
- if (type != I40E_VSI_SRIOV) {
+ if (type == I40E_VSI_MAIN) {
+ ret = i40e_res_pool_alloc(&pf->msix_pool,
+ RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
+ vsi->seid, ret);
+ goto fail_queue_alloc;
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
+ } else if (type != I40E_VSI_SRIOV) {
ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
goto fail_queue_alloc;
}
vsi->msix_intr = ret;
- } else
+ vsi->nb_msix = 1;
+ } else {
vsi->msix_intr = 0;
+ vsi->nb_msix = 0;
+ }
+
/* Add VSI */
if (type == I40E_VSI_MAIN) {
/* For main VSI, no need to add since it's default one */
}
static void
-i40e_pf_config_irq0(struct i40e_hw *hw)
+i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
{
/* read pending request and disable first */
i40e_pf_disable_irq0(hw);
I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
- /* Link no queues with irq0 */
- I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
- I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+ if (no_queue)
+ /* Link no queues with irq0 */
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
}
static void
static int
i40e_tunnel_filter_param_check(struct i40e_pf *pf,
- struct rte_eth_tunnel_filter_conf *filter)
+ struct rte_eth_tunnel_filter_conf *filter)
{
if (pf == NULL || filter == NULL) {
PMD_DRV_LOG(ERR, "Invalid parameter");
return 0;
}
+#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
+static int
+i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
+{
+ uint32_t val, reg;
+ int ret = -EINVAL;
+
+ val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
+ PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
+
+ if (len == 3) {
+ reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
+ } else if (len == 4) {
+ reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
+ } else {
+ PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
+ return ret;
+ }
+
+ if (reg != val) {
+ ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
+ reg, NULL);
+ if (ret != 0)
+ return ret;
+ } else {
+ ret = 0;
+ }
+ PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
+ I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
+
+ return ret;
+}
+
+static int
+i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
+{
+ int ret = -EINVAL;
+
+ if (!hw || !cfg)
+ return -EINVAL;
+
+ switch (cfg->cfg_type) {
+ case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
+ ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = I40E_ERR_PARAM;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_SET:
+ ret = i40e_dev_global_config_set(hw,
+ (struct rte_eth_global_cfg *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ break;
+ }
+
+ return ret;
+}
+
static int
-i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
- void *arg)
+i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
{
struct rte_eth_tunnel_filter_conf *filter;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
case RTE_ETH_FILTER_NOP:
if (!(pf->flags & I40E_FLAG_VXLAN))
ret = I40E_NOT_SUPPORTED;
+ break;
case RTE_ETH_FILTER_ADD:
ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
break;
return -EINVAL;
switch (filter_type) {
+ case RTE_ETH_FILTER_NONE:
+ /* For global configuration */
+ ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
+ break;
case RTE_ETH_FILTER_HASH:
ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
break;
*
* Returns 0 on success, negative value on failure
*/
-static int
+static enum i40e_status_code
i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
{
struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
- int i, ret;
+ enum i40e_status_code ret;
+ int i;
uint32_t tc_bw_max;
/* Get the VSI level BW configuration */
"couldn't get PF vsi bw config, err %s aq_err %s\n",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
- return -EINVAL;
+ return ret;
}
/* Get the VSI level BW configuration per TC */
"couldn't get PF vsi ets bw config, err %s aq_err %s\n",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
- return -EINVAL;
+ return ret;
}
if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
__func__, vsi->seid, i, bw_config.qs_handles[i]);
}
- return 0;
+ return ret;
}
-static int
+static enum i40e_status_code
i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
struct i40e_aqc_vsi_properties_data *info,
uint8_t enabled_tcmap)
{
- int ret, i, total_tc = 0;
+ enum i40e_status_code ret;
+ int i, total_tc = 0;
uint16_t qpnum_per_tc, bsf, qp_idx;
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
*
* Returns 0 on success, negative value on failure
*/
-static int
+static enum i40e_status_code
i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
struct i40e_vsi_context ctxt;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
- int ret = 0;
+ enum i40e_status_code ret = I40E_SUCCESS;
int i;
/* Check if enabled_tc is same as existing or new TCs */
struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
struct i40e_vsi *main_vsi = pf->main_vsi;
struct i40e_vsi_list *vsi_list;
- int i, ret;
+ enum i40e_status_code ret;
+ int i;
uint32_t val;
/* Use the FW API if FW > v4.4*/
PMD_INIT_LOG(ERR, "dcb sw configure fails");
return -ENOSYS;
}
+
return 0;
}
dcb_info->tc_queue.tc_rxq[0][i].nb_queue;
}
}
+
+ return 0;
+}
+
+static int
+i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t interval =
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ else
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_DYN_CTLN(msix_intr -
+ I40E_RX_VEC_START),
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+
+ I40E_WRITE_FLUSH(hw);
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ else
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_DYN_CTLN(msix_intr -
+ I40E_RX_VEC_START),
+ 0);
+ I40E_WRITE_FLUSH(hw);
+
return 0;
}