#define I40EVF_BUSY_WAIT_COUNT 50
#define MAX_RESET_WAIT_CNT 20
+#define I40EVF_ALARM_INTERVAL 50000 /* us */
+
struct i40evf_arq_msg_info {
enum virtchnl_ops ops;
enum i40e_status_code result;
static int i40evf_dev_configure(struct rte_eth_dev *dev);
static int i40evf_dev_start(struct rte_eth_dev *dev);
-static void i40evf_dev_stop(struct rte_eth_dev *dev);
-static void i40evf_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int i40evf_dev_stop(struct rte_eth_dev *dev);
+static int i40evf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int i40evf_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static int i40evf_dev_stats_get(struct rte_eth_dev *dev,
static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
unsigned limit);
-static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
+static int i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static void i40evf_dev_close(struct rte_eth_dev *dev);
-static int i40evf_dev_reset(struct rte_eth_dev *dev);
-static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
-static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
-static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
-static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int i40evf_dev_close(struct rte_eth_dev *dev);
+static int i40evf_dev_reset(struct rte_eth_dev *dev);
+static int i40evf_check_vf_reset_done(struct rte_eth_dev *dev);
+static int i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static int i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static int i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static int i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int i40evf_init_vlan(struct rte_eth_dev *dev);
static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
uint16_t tx_queue_id);
static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr,
+ struct rte_ether_addr *addr,
uint32_t index,
uint32_t pool);
static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
struct rte_eth_rss_conf *rss_conf);
static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr);
+ struct rte_ether_addr *mac_addr);
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
static int
static int
i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
- struct ether_addr *mc_addr_set,
+ struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr, bool add);
static int
-i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
+static void
+i40evf_dev_alarm_handler(void *param);
/* Default hash key buffer for RSS */
static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
- .rx_descriptor_done = i40e_dev_rx_descriptor_done,
- .rx_descriptor_status = i40e_dev_rx_descriptor_status,
- .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
- .rx_queue_count = i40e_dev_rx_queue_count,
.rxq_info_get = i40e_rxq_info_get,
.txq_info_get = i40e_txq_info_get,
.mac_addr_add = i40evf_add_mac_addr,
.rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
.mtu_set = i40evf_dev_mtu_set,
.mac_addr_set = i40evf_set_default_mac_addr,
+ .tx_done_cleanup = i40e_tx_done_cleanup,
};
/*
} while (i++ < MAX_TRY_TIMES);
_clear_cmd(vf);
break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ /**
+ * ignore async reply, only wait for system message,
+ * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING,
+ * if not, means request queues failed.
+ */
+ err = -1;
+ do {
+ ret = i40evf_read_pfmsg(dev, &info);
+ vf->cmd_retval = info.result;
+ if (ret == I40EVF_MSG_SYS && vf->vf_reset) {
+ err = 0;
+ break;
+ } else if (ret == I40EVF_MSG_ERR ||
+ ret == I40EVF_MSG_CMD) {
+ break;
+ }
+ rte_delay_ms(ASQ_DELAY_MS);
+ /* If don't read msg or read sys event, continue */
+ } while (i++ < MAX_TRY_TIMES);
+ _clear_cmd(vf);
+ break;
default:
/* for other adminq in running time, waiting the cmd done flag */
VIRTCHNL_VF_OFFLOAD_RSS_AQ |
VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN |
- VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING |
+ VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
args.in_args = (uint8_t *)∩︀
args.in_args_size = sizeof(caps);
} else {
err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
+ if (err) {
PMD_DRV_LOG(ERR, "fail to execute command "
"CONFIG_PROMISCUOUS_MODE");
- return err;
+
+ if (err == I40E_NOT_SUPPORTED)
+ return -ENOTSUP;
+
+ return -EAGAIN;
+ }
+
+ vf->promisc_unicast_enabled = enable_unicast;
+ vf->promisc_multicast_enabled = enable_multicast;
+ return 0;
}
static int
{
txq_info->vsi_id = vsi_id;
txq_info->queue_id = queue_id;
- if (queue_id < nb_txq) {
+ if (queue_id < nb_txq && txq) {
txq_info->ring_len = txq->nb_tx_desc;
txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
}
rxq_info->vsi_id = vsi_id;
rxq_info->queue_id = queue_id;
rxq_info->max_pkt_size = max_pkt_size;
- if (queue_id < nb_rxq) {
+ if (queue_id < nb_rxq && rxq) {
rxq_info->ring_len = rxq->nb_rx_desc;
rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
rxq_info->databuffer_size =
for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
- vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+ vc_vqci->vsi_id, i, dev->data->nb_tx_queues,
+ txq ? txq[i] : NULL);
i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
- vf->max_pkt_len, rxq[i]);
+ vf->max_pkt_len, rxq ? rxq[i] : NULL);
}
memset(&args, 0, sizeof(args));
args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct vf_cmd_info args;
- uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
- sizeof(struct virtchnl_vector_map)];
+ uint8_t *cmd_buffer = NULL;
struct virtchnl_irq_map_info *map_info;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- uint32_t vector_id;
- int i, err;
+ uint32_t vec, cmd_buffer_size, max_vectors, nb_msix, msix_base, i;
+ uint16_t rxq_map[vf->vf_res->max_vectors];
+ int err;
+ memset(rxq_map, 0, sizeof(rxq_map));
if (dev->data->dev_conf.intr_conf.rxq != 0 &&
- rte_intr_allow_others(intr_handle))
- vector_id = I40E_RX_VEC_START;
- else
- vector_id = I40E_MISC_VEC_ID;
+ rte_intr_allow_others(intr_handle)) {
+ msix_base = I40E_RX_VEC_START;
+ /* For interrupt mode, available vector id is from 1. */
+ max_vectors = vf->vf_res->max_vectors - 1;
+ nb_msix = RTE_MIN(max_vectors, intr_handle->nb_efd);
+
+ vec = msix_base;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq_map[vec] |= 1 << i;
+ intr_handle->intr_vec[i] = vec++;
+ if (vec >= vf->vf_res->max_vectors)
+ vec = msix_base;
+ }
+ } else {
+ msix_base = I40E_MISC_VEC_ID;
+ nb_msix = 1;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq_map[msix_base] |= 1 << i;
+ if (rte_intr_dp_is_en(intr_handle))
+ intr_handle->intr_vec[i] = msix_base;
+ }
+ }
+
+ cmd_buffer_size = sizeof(struct virtchnl_irq_map_info) +
+ sizeof(struct virtchnl_vector_map) * nb_msix;
+ cmd_buffer = rte_zmalloc("i40e", cmd_buffer_size, 0);
+ if (!cmd_buffer) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
- map_info->num_vectors = 1;
- map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
- map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
- /* Alway use default dynamic MSIX interrupt */
- map_info->vecmap[0].vector_id = vector_id;
- /* Don't map any tx queue */
- map_info->vecmap[0].txq_map = 0;
- map_info->vecmap[0].rxq_map = 0;
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- map_info->vecmap[0].rxq_map |= 1 << i;
- if (rte_intr_dp_is_en(intr_handle))
- intr_handle->intr_vec[i] = vector_id;
+ map_info->num_vectors = nb_msix;
+ for (i = 0; i < nb_msix; i++) {
+ map_info->vecmap[i].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
+ map_info->vecmap[i].vsi_id = vf->vsi_res->vsi_id;
+ map_info->vecmap[i].vector_id = msix_base + i;
+ map_info->vecmap[i].txq_map = 0;
+ map_info->vecmap[i].rxq_map = rxq_map[msix_base + i];
}
args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
args.in_args = (u8 *)cmd_buffer;
- args.in_args_size = sizeof(cmd_buffer);
+ args.in_args_size = cmd_buffer_size;
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
err = i40evf_execute_vf_cmd(dev, &args);
if (err)
PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
+ rte_free(cmd_buffer);
+
return err;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
- return -1;
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
- return -1;
}
}
static int
i40evf_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr,
+ struct rte_ether_addr *addr,
__rte_unused uint32_t index,
__rte_unused uint32_t pool)
{
int err;
struct vf_cmd_info args;
- if (is_zero_ether_addr(addr)) {
+ if (rte_is_zero_ether_addr(addr)) {
PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
addr->addr_bytes[0], addr->addr_bytes[1],
addr->addr_bytes[2], addr->addr_bytes[3],
static void
i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr)
+ struct rte_ether_addr *addr)
{
struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
{
struct rte_eth_dev_data *data = dev->data;
- struct ether_addr *addr;
+ struct rte_ether_addr *addr;
addr = &data->mac_addrs[index];
i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
}
-static void
+static int
i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
{
int ret;
/* set stats offset base on current values */
if (ret == 0)
vf->vsi.eth_stats_offset = *pstats;
+
+ return ret;
}
static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
return err;
}
+static int
+i40evf_request_queues(struct rte_eth_dev *dev, uint16_t num)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct virtchnl_vf_res_request vfres;
+ struct vf_cmd_info args;
+ int err;
+
+ vfres.num_queue_pairs = num;
+
+ args.ops = VIRTCHNL_OP_REQUEST_QUEUES;
+ args.in_args = (u8 *)&vfres;
+ args.in_args_size = sizeof(vfres);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, i40evf_dev_alarm_handler, dev);
+
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
+ return err;
+ }
+
+ /* The PF will issue a reset to the VF when change the number of
+ * queues. The PF will set I40E_VFGEN_RSTAT to COMPLETE first, then
+ * wait 10ms and set it to ACTIVE. In this duration, vf may not catch
+ * the moment that COMPLETE is set. So, for vf, we'll try to wait a
+ * long time.
+ */
+ rte_delay_ms(100);
+
+ err = i40evf_check_vf_reset_done(dev);
+ if (err)
+ PMD_DRV_LOG(ERR, "VF is still resetting");
+
+ return err;
+}
+
static int
i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
{
}
static int
-i40evf_check_vf_reset_done(struct i40e_hw *hw)
+i40evf_check_vf_reset_done(struct rte_eth_dev *dev)
{
int i, reset;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
reset = I40E_READ_REG(hw, I40E_VFGEN_RSTAT) &
if (i >= MAX_RESET_WAIT_CNT)
return -1;
+ vf->vf_reset = false;
+ vf->pend_msg &= ~PFMSG_RESET_IMPENDING;
+
return 0;
}
static int
-i40evf_reset_vf(struct i40e_hw *hw)
+i40evf_reset_vf(struct rte_eth_dev *dev)
{
int ret;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (i40e_vf_reset(hw) != I40E_SUCCESS) {
PMD_INIT_LOG(ERR, "Reset VF NIC failed");
*/
rte_delay_ms(200);
- ret = i40evf_check_vf_reset_done(hw);
+ ret = i40evf_check_vf_reset_done(dev);
if (ret) {
PMD_INIT_LOG(ERR, "VF is still resetting");
return ret;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+ i40e_calc_itr_interval(0, 0);
vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
vf->dev_data = dev->data;
goto err;
}
- err = i40evf_check_vf_reset_done(hw);
+ err = i40evf_check_vf_reset_done(dev);
if (err)
goto err;
}
/* Reset VF and wait until it's complete */
- if (i40evf_reset_vf(hw)) {
+ if (i40evf_reset_vf(dev)) {
PMD_INIT_LOG(ERR, "reset NIC failed");
goto err_aq;
}
vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Store the MAC address configured by host, or generate random one */
- if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
- vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
- else
- eth_random_addr(hw->mac.addr); /* Generate a random one */
+ if (!rte_is_valid_assigned_ether_addr(
+ (struct rte_ether_addr *)hw->mac.addr))
+ rte_eth_random_addr(hw->mac.addr); /* Generate a random one */
I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
(I40E_ITR_INDEX_DEFAULT <<
static int
i40evf_uninit_vf(struct rte_eth_dev *dev)
{
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (hw->adapter_stopped == 0)
+ if (hw->adapter_closed == 0)
i40evf_dev_close(dev);
- rte_free(vf->vf_res);
- vf->vf_res = NULL;
- rte_free(vf->aq_resp);
- vf->aq_resp = NULL;
return 0;
}
switch (pf_msg->event) {
case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
- NULL);
+ rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_RESET, NULL);
break;
case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
- vf->link_up = pf_msg->event_data.link_event.link_status;
- vf->link_speed = pf_msg->event_data.link_event.link_speed;
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ vf->link_up =
+ pf_msg->event_data.link_event_adv.link_status;
+
+ switch (pf_msg->event_data.link_event_adv.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ETH_SPEED_NUM_1G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ETH_SPEED_NUM_2_5G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_2_5GB;
+ break;
+ case ETH_SPEED_NUM_5G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_5GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ETH_SPEED_NUM_20G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ETH_SPEED_NUM_25G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ETH_SPEED_NUM_40G:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ vf->link_speed = VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ vf->link_up =
+ pf_msg->event_data.link_event.link_status;
+ vf->link_speed =
+ pf_msg->event_data.link_event.link_speed;
+ }
+
+ i40evf_dev_link_update(dev, 0);
+ rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
break;
case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
}
break;
default:
- PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
aq_opc);
break;
}
* void
*/
static void
-i40evf_dev_interrupt_handler(void *param)
+i40evf_dev_alarm_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
/* No interrupt event indicated */
- if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
- PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
+ if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK))
goto done;
- }
if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
done:
i40evf_enable_irq0(hw);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, dev);
}
static int
/* assign ops func pointer */
eth_dev->dev_ops = &i40evf_eth_dev_ops;
+ eth_dev->rx_queue_count = i40e_dev_rx_queue_count;
+ eth_dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &i40e_recv_pkts;
eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
return 0;
}
i40e_set_default_ptype_table(eth_dev);
- i40e_set_default_pctype_table(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
hw->bus.device = pci_dev->addr.devid;
hw->bus.func = pci_dev->addr.function;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
- hw->adapter_stopped = 0;
+ hw->adapter_stopped = 1;
+ hw->adapter_closed = 0;
if(i40evf_init_vf(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Init vf failed");
return -1;
}
- /* register callback func to eal lib */
- rte_intr_callback_register(&pci_dev->intr_handle,
- i40evf_dev_interrupt_handler, (void *)eth_dev);
-
- /* enable uio intr after callback register */
- rte_intr_enable(&pci_dev->intr_handle);
+ i40e_set_default_pctype_table(eth_dev);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, eth_dev);
/* configure and enable device interrupt */
i40evf_enable_irq0(hw);
/* copy mac addr */
eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
- ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
- 0);
+ RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
+ 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
" store MAC addresses",
- ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
+ RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
return -ENOMEM;
}
- ether_addr_copy((struct ether_addr *)hw->mac.addr,
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
return 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
-
if (i40evf_uninit_vf(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
return -1;
}
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
return 0;
}
*/
static struct rte_pci_driver rte_i40evf_pmd = {
.id_table = pci_id_i40evf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = eth_i40evf_pci_probe,
.remove = eth_i40evf_pci_remove,
};
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- struct rte_eth_conf *conf = &dev->data->dev_conf;
- struct i40e_vf *vf;
+ uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
/* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
* allocation or vector Rx preconditions we will reset it.
ad->tx_simple_allowed = true;
ad->tx_vec_allowed = true;
- /* For non-DPDK PF drivers, VF has no ability to disable HW
- * CRC strip, and is implicitly enabled by the PF.
- */
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
- vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
- (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
- /* Peer is running non-DPDK PF driver. */
- PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
- return -EINVAL;
+ dev->data->dev_conf.intr_conf.lsc =
+ !!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC);
+
+ if (num_queue_pairs > vf->vsi_res->num_queue_pairs) {
+ struct i40e_hw *hw;
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ PMD_DRV_LOG(ERR,
+ "For secondary processes, change queue pairs is not supported!");
+ return -ENOTSUP;
+ }
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw->adapter_stopped) {
+ PMD_DRV_LOG(ERR, "Device must be stopped first!");
+ return -EBUSY;
}
+
+ PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
+ vf->vsi_res->num_queue_pairs, num_queue_pairs);
+ ret = i40evf_request_queues(dev, num_queue_pairs);
+ if (ret != 0)
+ return ret;
+
+ ret = i40evf_dev_reset(dev);
+ if (ret != 0)
+ return ret;
}
return i40evf_init_vlan(dev);
i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = 0;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail register. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- I40EVF_WRITE_FLUSH(hw);
+ rte_wmb();
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ /* Init the RX tail register. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
- else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
struct i40e_rx_queue *rxq;
int err;
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ rxq = dev->data->rx_queues[rx_queue_id];
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
-
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
static int
i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = 0;
+ int err;
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
-
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
-
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
struct i40e_tx_queue *txq;
int err;
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
- tx_queue_id);
- return err;
- }
-
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
* Check if the jumbo frame and maximum packet length are set correctly
*/
if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
- "frame is enabled", (uint32_t)ETHER_MAX_LEN,
+ "frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN,
(uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
- if (rxq->max_pkt_len < ETHER_MIN_LEN ||
- rxq->max_pkt_len > ETHER_MAX_LEN) {
+ if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
- "frame is disabled", (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ "frame is disabled",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
- (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ rxq->max_pkt_len > buf_size)
dev_data->scattered_rx = 1;
- }
return 0;
}
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+ i40e_calc_itr_interval(0, 0);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
I40EVF_WRITE_FLUSH(hw);
- rte_intr_enable(&pci_dev->intr_handle);
-
return 0;
}
int next_begin = 0;
int begin = 0;
uint32_t len;
- struct ether_addr *addr;
+ struct rte_ether_addr *addr;
struct vf_cmd_info args;
do {
j = 0;
len = sizeof(struct virtchnl_ether_addr_list);
for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
- if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
+ if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
continue;
len += sizeof(struct virtchnl_ether_addr);
if (len >= I40E_AQ_BUF_SZ) {
for (i = begin; i < next_begin; i++) {
addr = &dev->data->mac_addrs[i];
- if (is_zero_ether_addr(addr))
+ if (rte_is_zero_ether_addr(addr))
continue;
rte_memcpy(list->list[j].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
goto err_mac;
}
- /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
- * is mapped to VFIO vector 0 in i40evf_dev_init( ).
- * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is
- * not cleared, it will fail when rte_intr_enable( ) tries to map Rx
- * queue interrupt to other VFIO vectors.
- * So clear uio/vfio intr/evevnfd first to avoid failure.
- */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
- rte_intr_disable(intr_handle);
+ /* only enable interrupt in rx interrupt mode */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
rte_intr_enable(intr_handle);
- }
i40evf_enable_queues_intr(dev);
return -1;
}
-static void
+static int
i40evf_dev_stop(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
PMD_INIT_FUNC_TRACE();
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ rte_intr_disable(intr_handle);
+
if (hw->adapter_stopped == 1)
- return;
+ return 0;
i40evf_stop_queues(dev);
i40evf_disable_queues_intr(dev);
i40e_dev_clear_queues(dev);
i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
FALSE);
hw->adapter_stopped = 1;
+ dev->data->dev_started = 0;
+ return 0;
}
static int
new_link.link_speed = ETH_SPEED_NUM_40G;
break;
default:
- new_link.link_speed = ETH_SPEED_NUM_100M;
+ if (vf->link_up)
+ new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ else
+ new_link.link_speed = ETH_SPEED_NUM_NONE;
break;
}
/* full duplex only */
new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- new_link.link_status = vf->link_up ? ETH_LINK_UP :
- ETH_LINK_DOWN;
+ new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
new_link.link_autoneg =
!(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
return rte_eth_linkstatus_set(dev, &new_link);
}
-static void
+static int
i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- int ret;
-
- /* If enabled, just return */
- if (vf->promisc_unicast_enabled)
- return;
- ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
- if (ret == 0)
- vf->promisc_unicast_enabled = TRUE;
+ return i40evf_config_promisc(dev, true, vf->promisc_multicast_enabled);
}
-static void
+static int
i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- int ret;
-
- /* If disabled, just return */
- if (!vf->promisc_unicast_enabled)
- return;
- ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
- if (ret == 0)
- vf->promisc_unicast_enabled = FALSE;
+ return i40evf_config_promisc(dev, false, vf->promisc_multicast_enabled);
}
-static void
+static int
i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- int ret;
- /* If enabled, just return */
- if (vf->promisc_multicast_enabled)
- return;
-
- ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
- if (ret == 0)
- vf->promisc_multicast_enabled = TRUE;
+ return i40evf_config_promisc(dev, vf->promisc_unicast_enabled, true);
}
-static void
+static int
i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- int ret;
-
- /* If enabled, just return */
- if (!vf->promisc_multicast_enabled)
- return;
- ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
- if (ret == 0)
- vf->promisc_multicast_enabled = FALSE;
+ return i40evf_config_promisc(dev, vf->promisc_unicast_enabled, false);
}
-static void
+static int
i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- memset(dev_info, 0, sizeof(*dev_info));
- dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
- dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->max_rx_queues = I40E_MAX_QP_NUM_PER_VF;
+ dev_info->max_tx_queues = I40E_MAX_QP_NUM_PER_VF;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_SCATTER;
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
},
.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
.offloads = 0,
};
.nb_min = I40E_MIN_RING_DESC,
.nb_align = I40E_ALIGN_RING_DESC,
};
+
+ return 0;
}
static int
return ret;
}
-static void
+static int
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ ret = i40evf_dev_stop(dev);
- i40evf_dev_stop(dev);
i40e_dev_free_queues(dev);
- i40evf_reset_vf(hw);
- i40e_shutdown_adminq(hw);
- /* disable uio intr before callback unregister */
- rte_intr_disable(intr_handle);
+ /*
+ * disable promiscuous mode before reset vf
+ * it is a workaround solution when work with kernel driver
+ * and it is not the normal way
+ */
+ if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled)
+ i40evf_config_promisc(dev, false, false);
- /* unregister callback func from eal lib */
- rte_intr_callback_unregister(intr_handle,
- i40evf_dev_interrupt_handler, dev);
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
+
+ i40evf_reset_vf(dev);
+ i40e_shutdown_adminq(hw);
i40evf_disable_irq0(hw);
+
+ rte_free(vf->vf_res);
+ vf->vf_res = NULL;
+ rte_free(vf->aq_resp);
+ vf->aq_resp = NULL;
+
+ hw->adapter_closed = 1;
+ return ret;
}
/*
struct i40e_hw *hw = I40E_VF_TO_HW(vf);
struct rte_eth_rss_conf rss_conf;
uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+ uint32_t rss_lut_size = (I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4;
uint16_t num;
+ uint8_t *lut_info;
+ int ret;
if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
i40evf_disable_rss(vf);
num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
/* Fill out the look up table */
- for (i = 0, j = 0; i < nb_q; i++, j++) {
- if (j >= num)
- j = 0;
- lut = (lut << 8) | j;
- if ((i & 3) == 3)
- I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ if (!(vf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) {
+ for (i = 0, j = 0; i < nb_q; i++, j++) {
+ if (j >= num)
+ j = 0;
+ lut = (lut << 8) | j;
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ }
+ } else {
+ lut_info = rte_zmalloc("i40e_rss_lut", rss_lut_size, 0);
+ if (!lut_info) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rss_lut_size; i++)
+ lut_info[i] = i % vf->num_queue_pairs;
+
+ ret = i40evf_set_rss_lut(&vf->vsi, lut_info,
+ rss_lut_size);
+ rte_free(lut_info);
+ if (ret)
+ return ret;
}
rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
int ret = 0;
/* check if mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
return -EINVAL;
/* mtu setting is forbidden if port is start */
return -EBUSY;
}
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev_data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
static int
i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr)
+ struct rte_ether_addr *mac_addr)
{
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!is_valid_assigned_ether_addr(mac_addr)) {
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
return -EINVAL;
}
- if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
- return -EPERM;
-
- i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr);
+ i40evf_del_mac_addr_by_addr(dev, (struct rte_ether_addr *)hw->mac.addr);
if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)
return -EIO;
- ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
+ rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
return 0;
}
static int
i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
- struct ether_addr *mc_addrs,
+ struct rte_ether_addr *mc_addrs,
uint32_t mc_addrs_num, bool add)
{
struct virtchnl_ether_addr_list *list;
}
static int
-i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addrs,
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addrs,
uint32_t mc_addrs_num)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
return 0;
}
+
+bool
+is_i40evf_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_i40evf_pmd);
+}