#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_dev.h>
static int i40evf_dev_configure(struct rte_eth_dev *dev);
static int i40evf_dev_start(struct rte_eth_dev *dev);
-static void i40evf_dev_stop(struct rte_eth_dev *dev);
+static int i40evf_dev_stop(struct rte_eth_dev *dev);
static int i40evf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int i40evf_dev_link_update(struct rte_eth_dev *dev,
static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static void i40evf_dev_close(struct rte_eth_dev *dev);
+static int i40evf_dev_close(struct rte_eth_dev *dev);
static int i40evf_dev_reset(struct rte_eth_dev *dev);
static int i40evf_check_vf_reset_done(struct rte_eth_dev *dev);
static int i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
- .rx_descriptor_done = i40e_dev_rx_descriptor_done,
- .rx_descriptor_status = i40e_dev_rx_descriptor_status,
- .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
- .rx_queue_count = i40e_dev_rx_queue_count,
.rxq_info_get = i40e_rxq_info_get,
.txq_info_get = i40e_txq_info_get,
.mac_addr_add = i40evf_add_mac_addr,
case VIRTCHNL_EVENT_RESET_IMPENDING:
vf->vf_reset = true;
vf->pend_msg |= PFMSG_RESET_IMPENDING;
- PMD_DRV_LOG(INFO, "vf is reseting");
+ PMD_DRV_LOG(INFO, "VF is resetting");
break;
case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
vf->dev_closed = true;
#define ASQ_DELAY_MS 10
static int
-i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
+_i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
return err | vf->cmd_retval;
}
+static int
+i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+
+ while (!rte_spinlock_trylock(&vf->cmd_send_lock))
+ rte_delay_us_sleep(50);
+ err = _i40evf_execute_vf_cmd(dev, args);
+ rte_spinlock_unlock(&vf->cmd_send_lock);
+ return err;
+}
+
/*
* Check API version with sync wait until version read or fail from admin queue
*/
VIRTCHNL_VF_OFFLOAD_RSS_AQ |
VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN |
- VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING |
+ VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
args.in_args = (uint8_t *)∩︀
args.in_args_size = sizeof(caps);
} else {
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
+ if (err) {
PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
+ return err;
+ }
+ /**
+ * In linux kernel driver on receiving ADD_VLAN it enables
+ * VLAN_STRIP by default. So reconfigure the vlan_offload
+ * as it was done by the app earlier.
+ */
+ err = i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to set vlan_strip");
return err;
}
vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
vf->dev_data = dev->data;
+ rte_spinlock_init(&vf->cmd_send_lock);
err = i40e_set_mac_type(hw);
if (err) {
PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
switch (pf_msg->event) {
case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
- _rte_eth_dev_callback_process(dev,
+ rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_RESET, NULL);
break;
case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
- vf->link_up = pf_msg->event_data.link_event.link_status;
- vf->link_speed = pf_msg->event_data.link_event.link_speed;
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ vf->link_up =
+ pf_msg->event_data.link_event_adv.link_status;
+
+ switch (pf_msg->event_data.link_event_adv.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ETH_SPEED_NUM_1G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ETH_SPEED_NUM_2_5G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_2_5GB;
+ break;
+ case ETH_SPEED_NUM_5G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_5GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ETH_SPEED_NUM_20G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ETH_SPEED_NUM_25G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ETH_SPEED_NUM_40G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ vf->link_up =
+ pf_msg->event_data.link_event.link_status;
+ vf->link_speed =
+ pf_msg->event_data.link_event.link_speed;
+ }
+
i40evf_dev_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev,
+ rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
break;
case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
info.msg_len);
else {
/* read message and it's expected one */
- if (msg_opc == vf->pend_cmd) {
+ if ((volatile uint32_t)msg_opc ==
+ vf->pend_cmd) {
vf->cmd_retval = msg_ret;
/* prevent compiler reordering */
rte_compiler_barrier();
/* assign ops func pointer */
eth_dev->dev_ops = &i40evf_eth_dev_ops;
+ eth_dev->rx_queue_count = i40e_dev_rx_queue_count;
+ eth_dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &i40e_recv_pkts;
eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
}
i40e_set_default_ptype_table(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
hw->adapter_stopped = 1;
hw->adapter_closed = 0;
- /* Pass the information to the rte_eth_dev_close() that it should also
- * release the private port resources.
- */
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
if(i40evf_init_vf(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Init vf failed");
return -1;
* Check if the jumbo frame and maximum packet length are set correctly
*/
if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
- "frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN,
+ "frame is enabled", (uint32_t)I40E_ETH_MAX_LEN,
(uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
- rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
+ rxq->max_pkt_len > I40E_ETH_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
"frame is disabled",
(uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)RTE_ETHER_MAX_LEN);
+ (uint32_t)I40E_ETH_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
return -1;
}
-static void
+static int
i40evf_dev_stop(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
rte_intr_disable(intr_handle);
if (hw->adapter_stopped == 1)
- return;
+ return 0;
i40evf_stop_queues(dev);
i40evf_disable_queues_intr(dev);
i40e_dev_clear_queues(dev);
i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
FALSE);
hw->adapter_stopped = 1;
+ dev->data->dev_started = 0;
+ return 0;
}
static int
stats->imissed = pstats->rx_discards;
stats->oerrors = pstats->tx_errors + pstats->tx_discards;
stats->ibytes = pstats->rx_bytes;
+ stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
stats->obytes = pstats->tx_bytes;
} else {
PMD_DRV_LOG(ERR, "Get statistics failed");
return ret;
}
-static void
+static int
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ ret = i40evf_dev_stop(dev);
- i40evf_dev_stop(dev);
i40e_dev_free_queues(dev);
/*
* disable promiscuous mode before reset vf
i40e_shutdown_adminq(hw);
i40evf_disable_irq0(hw);
- dev->dev_ops = NULL;
- dev->rx_pkt_burst = NULL;
- dev->tx_pkt_burst = NULL;
-
rte_free(vf->vf_res);
vf->vf_res = NULL;
rte_free(vf->aq_resp);
vf->aq_resp = NULL;
hw->adapter_closed = 1;
+ return ret;
}
/*
}
for (i = 0; i < rss_lut_size; i++)
- lut_info[i] = i % vf->num_queue_pairs;
+ lut_info[i] = i % num;
ret = i40evf_set_rss_lut(&vf->vsi, lut_info,
rss_lut_size);
return -EBUSY;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (frame_size > I40E_ETH_MAX_LEN)
dev_data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else