static int i40e_get_cap(struct i40e_hw *hw);
static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
static int i40e_pf_setup(struct i40e_pf *pf);
-static int i40e_vsi_init(struct i40e_vsi *vsi);
+static int i40e_dev_rxtx_init(struct i40e_pf *pf);
static int i40e_vmdq_setup(struct rte_eth_dev *dev);
static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
bool offset_loaded, uint64_t *offset, uint64_t *stat);
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_vsi *vsi = pf->main_vsi;
- int ret;
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ int ret, i;
if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
}
/* Initialize VSI */
- ret = i40e_vsi_init(vsi);
+ ret = i40e_dev_rxtx_init(pf);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to init VSI");
+ PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
goto err_up;
}
/* Map queues with MSIX interrupt */
- i40e_vsi_queues_bind_intr(vsi);
- i40e_vsi_enable_queues_intr(vsi);
+ i40e_vsi_queues_bind_intr(main_vsi);
+ i40e_vsi_enable_queues_intr(main_vsi);
+
+ /* Map VMDQ VSI queues with MSIX interrupt */
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
+ i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
+ }
/* Enable all queues which have been configured */
- ret = i40e_vsi_switch_queues(vsi, TRUE);
+ ret = i40e_dev_switch_queues(pf, TRUE);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to enable VSI");
goto err_up;
}
/* Enable receiving broadcast packets */
- if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
- ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+ ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
+ true, NULL);
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
}
return I40E_SUCCESS;
err_up:
- i40e_vsi_switch_queues(vsi, FALSE);
+ i40e_dev_switch_queues(pf, FALSE);
+ i40e_dev_clear_queues(dev);
return ret;
}
i40e_dev_stop(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ int i;
/* Disable all queues */
- i40e_vsi_switch_queues(vsi, FALSE);
+ i40e_dev_switch_queues(pf, FALSE);
+
+ /* un-map queues with interrupt registers */
+ i40e_vsi_disable_queues_intr(main_vsi);
+ i40e_vsi_queues_unbind_intr(main_vsi);
+
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
+ i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
+ }
+
+ /* Clear all queues and release memory */
+ i40e_dev_clear_queues(dev);
/* Set link down */
i40e_dev_set_link_down(dev);
-
- /* un-map queues with interrupt registers */
- i40e_vsi_disable_queues_intr(vsi);
- i40e_vsi_queues_unbind_intr(vsi);
}
static void
static void
i40e_macaddr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
- __attribute__((unused)) uint32_t index,
- __attribute__((unused)) uint32_t pool)
+ __rte_unused uint32_t index,
+ uint32_t pool)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
- struct i40e_vsi *vsi = pf->main_vsi;
- struct ether_addr old_mac;
+ struct i40e_vsi *vsi;
int ret;
- if (!is_valid_assigned_ether_addr(mac_addr)) {
- PMD_DRV_LOG(ERR, "Invalid ethernet address");
+ /* If VMDQ not enabled or configured, return */
+ if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
+ PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
+ pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
+ pool);
return;
}
- if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
- PMD_DRV_LOG(INFO, "Ignore adding permanent mac address");
+ if (pool > pf->nb_cfg_vmdq_vsi) {
+ PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
+ pool, pf->nb_cfg_vmdq_vsi);
return;
}
- /* Write mac address */
- ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
- mac_addr->addr_bytes, NULL);
- if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to write mac address");
- return;
- }
-
- (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
- (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
- ETHER_ADDR_LEN);
(void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ if (pool == 0)
+ vsi = pf->main_vsi;
+ else
+ vsi = pf->vmdq[pool - 1].vsi;
+
ret = i40e_vsi_add_mac(vsi, &mac_filter);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
return;
}
-
- ether_addr_copy(mac_addr, &pf->dev_addr);
- i40e_vsi_delete_mac(vsi, &old_mac);
}
/* Remove a MAC address, and update filters */
i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_vsi *vsi = pf->main_vsi;
- struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
+ struct i40e_vsi *vsi;
+ struct rte_eth_dev_data *data = dev->data;
struct ether_addr *macaddr;
int ret;
- struct i40e_hw *hw =
- I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (index >= vsi->max_macaddrs)
- return;
+ uint32_t i;
+ uint64_t pool_sel;
macaddr = &(data->mac_addrs[index]);
- if (!is_valid_assigned_ether_addr(macaddr))
- return;
-
- ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
- hw->mac.perm_addr, NULL);
- if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to write mac address");
- return;
- }
- (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
-
- ret = i40e_vsi_delete_mac(vsi, macaddr);
- if (ret != I40E_SUCCESS)
- return;
+ pool_sel = dev->data->mac_pool_sel[index];
+
+ for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
+ if (pool_sel & (1ULL << i)) {
+ if (i == 0)
+ vsi = pf->main_vsi;
+ else {
+ /* No VMDQ pool enabled or configured */
+ if (!(pf->flags | I40E_FLAG_VMDQ) ||
+ (i > pf->nb_cfg_vmdq_vsi)) {
+ PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
+ "/configured");
+ return;
+ }
+ vsi = pf->vmdq[i - 1].vsi;
+ }
+ ret = i40e_vsi_delete_mac(vsi, macaddr);
- /* Clear device address as it has been removed */
- if (is_same_ether_addr(&(pf->dev_addr), macaddr))
- memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
+ return;
+ }
+ }
+ }
}
/* Set perfect match or hash match of MAC and VLAN for a VF */
filter = (struct rte_eth_mac_filter *)(arg);
switch (filter_op) {
- case RTE_ETH_FILTER_NONE:
+ case RTE_ETH_FILTER_NOP:
ret = I40E_SUCCESS;
break;
case RTE_ETH_FILTER_ADD:
/* Swith on or off the tx queues */
static int
-i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
+i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
{
- struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
struct i40e_tx_queue *txq;
- struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
uint16_t i;
int ret;
txq = dev_data->tx_queues[i];
/* Don't operate the queue if not configured or
* if starting only per queue */
- if (!txq->q_set || (on && txq->tx_deferred_start))
+ if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
continue;
if (on)
ret = i40e_dev_tx_queue_start(dev, i);
}
/* Switch on or off the rx queues */
static int
-i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
+i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
{
- struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
struct i40e_rx_queue *rxq;
- struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
uint16_t i;
int ret;
rxq = dev_data->rx_queues[i];
/* Don't operate the queue if not configured or
* if starting only per queue */
- if (!rxq->q_set || (on && rxq->rx_deferred_start))
+ if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
continue;
if (on)
ret = i40e_dev_rx_queue_start(dev, i);
/* Switch on or off all the rx/tx queues */
int
-i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
+i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
{
int ret;
if (on) {
/* enable rx queues before enabling tx queues */
- ret = i40e_vsi_switch_rx_queues(vsi, on);
+ ret = i40e_dev_switch_rx_queues(pf, on);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to switch rx queues");
return ret;
}
- ret = i40e_vsi_switch_tx_queues(vsi, on);
+ ret = i40e_dev_switch_tx_queues(pf, on);
} else {
/* Stop tx queues before stopping rx queues */
- ret = i40e_vsi_switch_tx_queues(vsi, on);
+ ret = i40e_dev_switch_tx_queues(pf, on);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to switch tx queues");
return ret;
}
- ret = i40e_vsi_switch_rx_queues(vsi, on);
+ ret = i40e_dev_switch_rx_queues(pf, on);
}
return ret;
/* Initialize VSI for TX */
static int
-i40e_vsi_tx_init(struct i40e_vsi *vsi)
+i40e_dev_tx_init(struct i40e_pf *pf)
{
- struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
struct rte_eth_dev_data *data = pf->dev_data;
uint16_t i;
uint32_t ret = I40E_SUCCESS;
+ struct i40e_tx_queue *txq;
for (i = 0; i < data->nb_tx_queues; i++) {
- ret = i40e_tx_queue_init(data->tx_queues[i]);
+ txq = data->tx_queues[i];
+ if (!txq || !txq->q_set)
+ continue;
+ ret = i40e_tx_queue_init(txq);
if (ret != I40E_SUCCESS)
break;
}
/* Initialize VSI for RX */
static int
-i40e_vsi_rx_init(struct i40e_vsi *vsi)
+i40e_dev_rx_init(struct i40e_pf *pf)
{
- struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
struct rte_eth_dev_data *data = pf->dev_data;
int ret = I40E_SUCCESS;
uint16_t i;
+ struct i40e_rx_queue *rxq;
i40e_pf_config_mq_rx(pf);
for (i = 0; i < data->nb_rx_queues; i++) {
- ret = i40e_rx_queue_init(data->rx_queues[i]);
+ rxq = data->rx_queues[i];
+ if (!rxq || !rxq->q_set)
+ continue;
+
+ ret = i40e_rx_queue_init(rxq);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to do RX queue "
"initialization");
return ret;
}
-/* Initialize VSI */
static int
-i40e_vsi_init(struct i40e_vsi *vsi)
+i40e_dev_rxtx_init(struct i40e_pf *pf)
{
int err;
- err = i40e_vsi_tx_init(vsi);
+ err = i40e_dev_tx_init(pf);
if (err) {
- PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
+ PMD_DRV_LOG(ERR, "Failed to do TX initialization");
return err;
}
- err = i40e_vsi_rx_init(vsi);
+ err = i40e_dev_rx_init(pf);
if (err) {
- PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
+ PMD_DRV_LOG(ERR, "Failed to do RX initialization");
return err;
}
return -1;
}
- PMD_DRV_LOG(INFO, "Added %s port %d with AQ command with index %d",
- port, filter_index);
+ PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
+ port, filter_idx);
/* New port: add it and mark its index in the bitmap */
pf->vxlan_ports[idx] = port;
return ret;
}
+/* Calculate the maximum number of contiguous PF queues that are configured */
+static int
+i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
+{
+ struct rte_eth_dev_data *data = pf->dev_data;
+ int i, num;
+ struct i40e_rx_queue *rxq;
+
+ num = 0;
+ for (i = 0; i < pf->lan_nb_qps; i++) {
+ rxq = data->rx_queues[i];
+ if (rxq && rxq->q_set)
+ num++;
+ else
+ break;
+ }
+
+ return num;
+}
+
/* Configure RSS */
static int
i40e_pf_config_rss(struct i40e_pf *pf)
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct rte_eth_rss_conf rss_conf;
uint32_t i, lut = 0;
- uint16_t j, num = i40e_align_floor(pf->dev_data->nb_rx_queues);
+ uint16_t j, num;
+
+ /*
+ * If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calulate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ num = i40e_pf_calc_configured_queues_num(pf);
+ num = i40e_align_floor(num);
+ } else
+ num = i40e_align_floor(pf->dev_data->nb_rx_queues);
+
+ PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
+ return -ENOTSUP;
+ }
for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
if (j == num)
static int
i40e_pf_config_mq_rx(struct i40e_pf *pf)
{
- if (!pf->dev_data->sriov.active) {
- switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
- i40e_pf_config_rss(pf);
- break;
- default:
- i40e_pf_disable_rss(pf);
- break;
- }
+ int ret = 0;
+ enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
+
+ if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
+ return -ENOTSUP;
}
- return 0;
+ /* RSS setup */
+ if (mq_mode & ETH_MQ_RX_RSS_FLAG)
+ ret = i40e_pf_config_rss(pf);
+ else
+ i40e_pf_disable_rss(pf);
+
+ return ret;
}
static int