#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_dev.h>
#define I40EVF_BUSY_WAIT_COUNT 50
#define MAX_RESET_WAIT_CNT 20
+#define I40EVF_ALARM_INTERVAL 50000 /* us */
+
struct i40evf_arq_msg_info {
enum virtchnl_ops ops;
enum i40e_status_code result;
static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
uint16_t tx_queue_id);
static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr,
+ struct rte_ether_addr *addr,
uint32_t index,
uint32_t pool);
static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr);
+static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr);
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
static int
uint8_t *msg,
uint16_t msglen);
+static int
+i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr, bool add);
+static int
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+static void
+i40evf_dev_alarm_handler(void *param);
+
/* Default hash key buffer for RSS */
static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
.txq_info_get = i40e_txq_info_get,
.mac_addr_add = i40evf_add_mac_addr,
.mac_addr_remove = i40evf_del_mac_addr,
+ .set_mc_addr_list = i40evf_set_mc_addr_list,
.reta_update = i40evf_dev_rss_reta_update,
.reta_query = i40evf_dev_rss_reta_query,
.rss_hash_update = i40evf_dev_rss_hash_update,
} while (i++ < MAX_TRY_TIMES);
_clear_cmd(vf);
break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ /**
+ * ignore async reply, only wait for system message,
+ * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING,
+ * if not, means request queues failed.
+ */
+ err = -1;
+ do {
+ ret = i40evf_read_pfmsg(dev, &info);
+ vf->cmd_retval = info.result;
+ if (ret == I40EVF_MSG_SYS && vf->vf_reset) {
+ err = 0;
+ break;
+ } else if (ret == I40EVF_MSG_ERR ||
+ ret == I40EVF_MSG_CMD) {
+ break;
+ }
+ rte_delay_ms(ASQ_DELAY_MS);
+ /* If don't read msg or read sys event, continue */
+ } while (i++ < MAX_TRY_TIMES);
+ _clear_cmd(vf);
+ break;
default:
/* for other adminq in running time, waiting the cmd done flag */
{
txq_info->vsi_id = vsi_id;
txq_info->queue_id = queue_id;
- if (queue_id < nb_txq) {
+ if (queue_id < nb_txq && txq) {
txq_info->ring_len = txq->nb_tx_desc;
txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
}
rxq_info->vsi_id = vsi_id;
rxq_info->queue_id = queue_id;
rxq_info->max_pkt_size = max_pkt_size;
- if (queue_id < nb_rxq) {
+ if (queue_id < nb_rxq && rxq) {
rxq_info->ring_len = rxq->nb_rx_desc;
rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
rxq_info->databuffer_size =
for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
- vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+ vc_vqci->vsi_id, i, dev->data->nb_tx_queues,
+ txq ? txq[i] : NULL);
i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
- vf->max_pkt_len, rxq[i]);
+ vf->max_pkt_len, rxq ? rxq[i] : NULL);
}
memset(&args, 0, sizeof(args));
args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
static int
i40evf_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr,
+ struct rte_ether_addr *addr,
__rte_unused uint32_t index,
__rte_unused uint32_t pool)
{
int err;
struct vf_cmd_info args;
- if (is_zero_ether_addr(addr)) {
+ if (rte_is_zero_ether_addr(addr)) {
PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
addr->addr_bytes[0], addr->addr_bytes[1],
addr->addr_bytes[2], addr->addr_bytes[3],
static void
i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr)
+ struct rte_ether_addr *addr)
{
struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
{
struct rte_eth_dev_data *data = dev->data;
- struct ether_addr *addr;
+ struct rte_ether_addr *addr;
addr = &data->mac_addrs[index];
static void
i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
{
+ int ret;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_eth_stats *pstats = NULL;
/* read stat values to clear hardware registers */
- i40evf_query_stats(dev, &pstats);
+ ret = i40evf_query_stats(dev, &pstats);
/* set stats offset base on current values */
- vf->vsi.eth_stats_offset = *pstats;
+ if (ret == 0)
+ vf->vsi.eth_stats_offset = *pstats;
}
static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
return err;
}
+static int
+i40evf_request_queues(struct rte_eth_dev *dev, uint16_t num)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct virtchnl_vf_res_request vfres;
+ struct vf_cmd_info args;
+ int err;
+
+ vfres.num_queue_pairs = num;
+
+ args.ops = VIRTCHNL_OP_REQUEST_QUEUES;
+ args.in_args = (u8 *)&vfres;
+ args.in_args_size = sizeof(vfres);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
+
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, dev);
+ return err;
+}
+
static int
i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
{
{ .vendor_id = 0, /* sentinel */ },
};
-static inline int
-i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
/* Disable IRQ0 */
static inline void
i40evf_disable_irq0(struct i40e_hw *hw)
}
static int
-i40evf_check_vf_reset_done(struct i40e_hw *hw)
+i40evf_check_vf_reset_done(struct rte_eth_dev *dev)
{
int i, reset;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
reset = I40E_READ_REG(hw, I40E_VFGEN_RSTAT) &
if (i >= MAX_RESET_WAIT_CNT)
return -1;
+ vf->vf_reset = false;
+ vf->pend_msg &= ~PFMSG_RESET_IMPENDING;
+
return 0;
}
static int
-i40evf_reset_vf(struct i40e_hw *hw)
+i40evf_reset_vf(struct rte_eth_dev *dev)
{
int ret;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (i40e_vf_reset(hw) != I40E_SUCCESS) {
PMD_INIT_LOG(ERR, "Reset VF NIC failed");
*/
rte_delay_ms(200);
- ret = i40evf_check_vf_reset_done(hw);
+ ret = i40evf_check_vf_reset_done(dev);
if (ret) {
PMD_INIT_LOG(ERR, "VF is still resetting");
return ret;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
+ i40e_calc_itr_interval(0, 0);
vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
vf->dev_data = dev->data;
goto err;
}
- err = i40evf_check_vf_reset_done(hw);
+ err = i40evf_check_vf_reset_done(dev);
if (err)
goto err;
}
/* Reset VF and wait until it's complete */
- if (i40evf_reset_vf(hw)) {
+ if (i40evf_reset_vf(dev)) {
PMD_INIT_LOG(ERR, "reset NIC failed");
goto err_aq;
}
vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Store the MAC address configured by host, or generate random one */
- if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+ if (rte_is_valid_assigned_ether_addr(
+ (struct rte_ether_addr *)hw->mac.addr))
vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
else
- eth_random_addr(hw->mac.addr); /* Generate a random one */
+ rte_eth_random_addr(hw->mac.addr); /* Generate a random one */
I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
(I40E_ITR_INDEX_DEFAULT <<
PMD_INIT_FUNC_TRACE();
- if (hw->adapter_stopped == 0)
+ if (hw->adapter_closed == 0)
i40evf_dev_close(dev);
rte_free(vf->vf_res);
vf->vf_res = NULL;
case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
- NULL, NULL);
+ NULL);
break;
case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
* void
*/
static void
-i40evf_dev_interrupt_handler(void *param)
+i40evf_dev_alarm_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
/* No interrupt event indicated */
- if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
- PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
+ if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK))
goto done;
- }
if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
done:
i40evf_enable_irq0(hw);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, dev);
}
static int
hw->bus.func = pci_dev->addr.function;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->adapter_stopped = 0;
+ hw->adapter_closed = 0;
if(i40evf_init_vf(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Init vf failed");
return -1;
}
- /* register callback func to eal lib */
- rte_intr_callback_register(&pci_dev->intr_handle,
- i40evf_dev_interrupt_handler, (void *)eth_dev);
-
- /* enable uio intr after callback register */
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, eth_dev);
/* configure and enable device interrupt */
i40evf_enable_irq0(hw);
/* copy mac addr */
eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
- ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
- 0);
+ RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
+ 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
" store MAC addresses",
- ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
+ RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
return -ENOMEM;
}
- ether_addr_copy((struct ether_addr *)hw->mac.addr,
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
return 0;
return -1;
}
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
return 0;
}
*/
static struct rte_pci_driver rte_i40evf_pmd = {
.id_table = pci_id_i40evf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.probe = eth_i40evf_pci_probe,
.remove = eth_i40evf_pci_remove,
};
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- struct rte_eth_conf *conf = &dev->data->dev_conf;
- struct i40e_vf *vf;
+ uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
/* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
* allocation or vector Rx preconditions we will reset it.
ad->tx_simple_allowed = true;
ad->tx_vec_allowed = true;
- /* For non-DPDK PF drivers, VF has no ability to disable HW
- * CRC strip, and is implicitly enabled by the PF.
- */
- if (!conf->rxmode.hw_strip_crc) {
- vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
- (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
- /* Peer is running non-DPDK PF driver. */
- PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
- return -EINVAL;
- }
+ if (num_queue_pairs > vf->vsi_res->num_queue_pairs) {
+ int ret = 0;
+
+ PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
+ vf->vsi_res->num_queue_pairs, num_queue_pairs);
+ ret = i40evf_request_queues(dev, num_queue_pairs);
+ if (ret != 0)
+ return ret;
+
+ ret = i40evf_dev_reset(dev);
+ if (ret != 0)
+ return ret;
}
return i40evf_init_vlan(dev);
i40evf_init_vlan(struct rte_eth_dev *dev)
{
/* Apply vlan offload setting */
- return i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+ i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+
+ return 0;
}
static int
i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ return -ENOTSUP;
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = 0;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
-
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail register. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- I40EVF_WRITE_FLUSH(hw);
+ rte_wmb();
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ /* Init the RX tail register. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
- else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
struct i40e_rx_queue *rxq;
int err;
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
static int
i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = 0;
+ int err;
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
-
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
-
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
struct i40e_tx_queue *txq;
int err;
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
- tx_queue_id);
- return err;
- }
-
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
- if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
- "frame is enabled", (uint32_t)ETHER_MAX_LEN,
+ "frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN,
(uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
- if (rxq->max_pkt_len < ETHER_MIN_LEN ||
- rxq->max_pkt_len > ETHER_MAX_LEN) {
+ if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
- "frame is disabled", (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ "frame is disabled",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
- (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ rxq->max_pkt_len > buf_size)
dev_data->scattered_rx = 1;
- }
return 0;
}
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ i40e_calc_itr_interval(0, 0);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
I40EVF_WRITE_FLUSH(hw);
- rte_intr_enable(&pci_dev->intr_handle);
-
return 0;
}
int next_begin = 0;
int begin = 0;
uint32_t len;
- struct ether_addr *addr;
+ struct rte_ether_addr *addr;
struct vf_cmd_info args;
do {
j = 0;
len = sizeof(struct virtchnl_ether_addr_list);
for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
- if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
+ if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
continue;
len += sizeof(struct virtchnl_ether_addr);
if (len >= I40E_AQ_BUF_SZ) {
for (i = begin; i < next_begin; i++) {
addr = &dev->data->mac_addrs[i];
- if (is_zero_ether_addr(addr))
+ if (rte_is_zero_ether_addr(addr))
continue;
rte_memcpy(list->list[j].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
dev->data->nb_tx_queues);
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
intr_vector = dev->data->nb_rx_queues;
if (rte_intr_efd_enable(intr_handle, intr_vector))
return -1;
/* Set all mac addrs */
i40evf_add_del_all_mac_addr(dev, TRUE);
+ /* Set all multicast addresses */
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ TRUE);
if (i40evf_start_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "enable queues failed");
goto err_mac;
}
- /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
- * is mapped to VFIO vector 0 in i40evf_dev_init( ).
- * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is
- * not cleared, it will fail when rte_intr_enable( ) tries to map Rx
- * queue interrupt to other VFIO vectors.
- * So clear uio/vfio intr/evevnfd first to avoid failure.
- */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
- rte_intr_disable(intr_handle);
+ /* only enable interrupt in rx interrupt mode */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
rte_intr_enable(intr_handle);
- }
i40evf_enable_queues_intr(dev);
err_mac:
i40evf_add_del_all_mac_addr(dev, FALSE);
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
err_queue:
return -1;
}
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ rte_intr_disable(intr_handle);
+
if (hw->adapter_stopped == 1)
return;
i40evf_stop_queues(dev);
}
/* remove all mac addrs */
i40evf_add_del_all_mac_addr(dev, FALSE);
+ /* remove all multicast addresses */
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
hw->adapter_stopped = 1;
}
* while Linux driver does not
*/
+ memset(&new_link, 0, sizeof(new_link));
/* Linux driver PF host */
switch (vf->link_speed) {
case I40E_LINK_SPEED_100MB:
new_link.link_status = vf->link_up ? ETH_LINK_UP :
ETH_LINK_DOWN;
new_link.link_autoneg =
- dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED;
+ !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
- i40evf_dev_atomic_write_link_status(dev, &new_link);
-
- return 0;
+ return rte_eth_linkstatus_set(dev, &new_link);
}
static void
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- memset(dev_info, 0, sizeof(*dev_info));
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
- dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->max_rx_queues = I40E_MAX_QP_NUM_PER_VF;
+ dev_info->max_tx_queues = I40E_MAX_QP_NUM_PER_VF;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM;
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
},
.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
i40evf_dev_stop(dev);
i40e_dev_free_queues(dev);
- i40evf_reset_vf(hw);
- i40e_shutdown_adminq(hw);
- /* disable uio intr before callback unregister */
- rte_intr_disable(intr_handle);
+ /*
+ * disable promiscuous mode before reset vf
+ * it is a workaround solution when work with kernel driver
+ * and it is not the normal way
+ */
+ i40evf_dev_promiscuous_disable(dev);
+ i40evf_dev_allmulticast_disable(dev);
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
- /* unregister callback func from eal lib */
- rte_intr_callback_unregister(intr_handle,
- i40evf_dev_interrupt_handler, dev);
+ i40evf_reset_vf(dev);
+ i40e_shutdown_adminq(hw);
i40evf_disable_irq0(hw);
+ hw->adapter_closed = 1;
}
/*
int ret = 0;
/* check if mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
return -EINVAL;
/* mtu setting is forbidden if port is start */
return -EBUSY;
}
- if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
}
-static void
+static int
i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr)
+ struct rte_ether_addr *mac_addr)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!is_valid_assigned_ether_addr(mac_addr)) {
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
- return;
+ return -EINVAL;
}
- if (is_same_ether_addr(mac_addr, dev->data->mac_addrs))
- return;
-
if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
- return;
+ return -EPERM;
+
+ i40evf_del_mac_addr_by_addr(dev, (struct rte_ether_addr *)hw->mac.addr);
+
+ if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)
+ return -EIO;
+
+ rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
+ return 0;
+}
- i40evf_del_mac_addr_by_addr(dev, dev->data->mac_addrs);
+static int
+i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addrs,
+ uint32_t mc_addrs_num, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
+ (I40E_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))];
+ uint32_t i;
+ int err;
+ struct vf_cmd_info args;
- i40evf_add_mac_addr(dev, mac_addr, 0, 0);
+ if (mc_addrs == NULL || mc_addrs_num == 0)
+ return 0;
+
+ if (mc_addrs_num > I40E_NUM_MACADDR_MAX)
+ return -EINVAL;
+
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = mc_addrs_num;
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ if (!I40E_IS_MULTICAST(mc_addrs[i].addr_bytes)) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
+ mc_addrs[i].addr_bytes[0],
+ mc_addrs[i].addr_bytes[1],
+ mc_addrs[i].addr_bytes[2],
+ mc_addrs[i].addr_bytes[3],
+ mc_addrs[i].addr_bytes[4],
+ mc_addrs[i].addr_bytes[5]);
+ return -EINVAL;
+ }
+
+ memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
+ sizeof(list->list[i].addr));
+ }
+
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(struct virtchnl_ether_addr_list) +
+ i * sizeof(struct virtchnl_ether_addr);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+
+ /* flush previous addresses */
+ err = i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
+ if (err)
+ return err;
+
+ vf->mc_addrs_num = 0;
+
+ /* add new ones */
+ err = i40evf_add_del_mc_addr_list(dev, mc_addrs, mc_addrs_num,
+ TRUE);
+ if (err)
+ return err;
+
+ vf->mc_addrs_num = mc_addrs_num;
+ memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
+
+ return 0;
+}
+
+bool
+is_i40evf_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_i40evf_pmd);
}