X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev_vf.c;h=1149978055161157f99ea2695f6fd60609b5ddf6;hb=2551ed46ff379679dbf0a733a65f4c650d29c120;hp=91df13bd308479b03745e6023b07cddc0e9e75f6;hpb=09419f235e099ecb265a590778fe64a685a2a241;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 91df13bd30..1149978055 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -111,7 +111,10 @@ static int i40evf_dev_link_update(struct rte_eth_dev *dev, static void i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); +static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit); static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev); static int i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); @@ -123,8 +126,6 @@ static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev); static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev); static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev); static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev); -static int i40evf_get_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link); static int i40evf_init_vlan(struct rte_eth_dev *dev); static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); @@ -175,11 +176,11 @@ static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = { {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats, rx_unknown_protocol)}, {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)}, - {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, - {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, - {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, - {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, - {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, + {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)}, + {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)}, + {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)}, + {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)}, + {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)}, }; #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \ @@ -196,6 +197,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .link_update = i40evf_dev_link_update, .stats_get = i40evf_dev_stats_get, .xstats_get = i40evf_dev_xstats_get, + .xstats_get_names = i40evf_dev_xstats_get_names, .xstats_reset = i40evf_dev_xstats_reset, .dev_close = i40evf_dev_close, .dev_infos_get = i40evf_dev_info_get, @@ -214,6 +216,9 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .rx_descriptor_done = i40e_dev_rx_descriptor_done, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, + .rx_queue_count = i40e_dev_rx_queue_count, + .rxq_info_get = i40e_rxq_info_get, + .txq_info_get = i40e_txq_info_get, .mac_addr_add = i40evf_add_mac_addr, .mac_addr_remove = i40evf_del_mac_addr, .reta_update = i40evf_dev_rss_reta_update, @@ -258,6 +263,8 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data) case I40E_VIRTCHNL_EVENT_LINK_CHANGE: vf->link_up = vpe->event_data.link_event.link_status; + vf->link_speed = + vpe->event_data.link_event.link_speed; vf->pend_msg |= PFMSG_LINK_CHANGE; PMD_DRV_LOG(INFO, "Link status update:%s", vf->link_up ? "up" : "down"); @@ -325,8 +332,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40evf_arq_msg_info info; enum i40evf_aq_result ret; - int err = -1; - int i = 0; + int err, i = 0; if (_atomic_set_cmd(vf, args->ops)) return -1; @@ -347,19 +353,19 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) switch (args->ops) { case I40E_VIRTCHNL_OP_RESET_VF: /*no need to process in this function */ + err = 0; break; case I40E_VIRTCHNL_OP_VERSION: case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: /* for init adminq commands, need to poll the response */ + err = -1; do { ret = i40evf_read_pfmsg(dev, &info); if (ret == I40EVF_MSG_CMD) { err = 0; break; - } else if (ret == I40EVF_MSG_ERR) { - err = -1; + } else if (ret == I40EVF_MSG_ERR) break; - } rte_delay_ms(ASQ_DELAY_MS); /* If don't read msg or read sys event, continue */ } while (i++ < MAX_TRY_TIMES); @@ -368,6 +374,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) default: /* for other adminq in running time, waiting the cmd done flag */ + err = -1; do { if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) { err = 0; @@ -536,7 +543,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev, struct vf_cmd_info args; struct i40e_virtchnl_pvid_info tpid_info; - if (dev == NULL || info == NULL) { + if (info == NULL) { PMD_DRV_LOG(ERR, "invalid parameters"); return I40E_ERR_PARAM; } @@ -711,7 +718,8 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \ sizeof(struct i40e_virtchnl_vector_map)]; struct i40e_virtchnl_irq_map_info *map_info; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t vector_id; int i, err; @@ -945,7 +953,7 @@ i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats) } static int -i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +i40evf_get_statistics(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { int ret; struct i40e_eth_stats *pstats = NULL; @@ -979,8 +987,23 @@ i40evf_dev_xstats_reset(struct rte_eth_dev *dev) vf->vsi.eth_stats_offset = vf->vsi.eth_stats; } +static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned i; + + if (xstats_names != NULL) + for (i = 0; i < I40EVF_NB_XSTATS; i++) { + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_i40evf_stats_strings[i].name); + } + return I40EVF_NB_XSTATS; +} + static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n) + struct rte_eth_xstat *xstats, unsigned n) { int ret; unsigned i; @@ -998,8 +1021,7 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, /* loop over xstats array and values from pstats */ for (i = 0; i < I40EVF_NB_XSTATS; i++) { - snprintf(xstats[i].name, sizeof(xstats[i].name), - "%s", rte_i40evf_stats_strings[i].name); + xstats[i].id = i; xstats[i].value = *(uint64_t *)(((char *)pstats) + rte_i40evf_stats_strings[i].offset); } @@ -1061,35 +1083,13 @@ i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid) return err; } -static int -i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link) -{ - struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - int err; - struct vf_cmd_info args; - struct rte_eth_link *new_link; - - args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT; - args.in_args = NULL; - args.in_args_size = 0; - args.out_buffer = vf->aq_resp; - args.out_size = I40E_AQ_BUF_SZ; - err = i40evf_execute_vf_cmd(dev, &args); - if (err) { - PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT"); - return err; - } - - new_link = (struct rte_eth_link *)args.out_buffer; - (void)rte_memcpy(link, new_link, sizeof(*link)); - - return 0; -} - static const struct rte_pci_id pci_id_i40evf_map[] = { -#define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" -{ .vendor_id = 0, /* sentinel */ }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV) }, + { .vendor_id = 0, /* sentinel */ }, }; static inline int @@ -1310,14 +1310,17 @@ i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev, { struct i40e_virtchnl_pf_event *pf_msg = (struct i40e_virtchnl_pf_event *)msg; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); switch (pf_msg->event) { case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n"); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL); break; case I40E_VIRTCHNL_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n"); + vf->link_up = pf_msg->event_data.link_event.link_status; + vf->link_speed = pf_msg->event_data.link_event.link_speed; break; case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n"); @@ -1334,8 +1337,9 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_arq_event_info info; - struct i40e_virtchnl_msg *v_msg; - uint16_t pending, opcode; + uint16_t pending, aq_opc; + enum i40e_virtchnl_ops msg_opc; + enum i40e_status_code msg_ret; int ret; info.buf_len = I40E_AQ_BUF_SZ; @@ -1344,7 +1348,6 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev) return; } info.msg_buf = vf->aq_resp; - v_msg = (struct i40e_virtchnl_msg *)&info.desc; pending = 1; while (pending) { @@ -1355,32 +1358,39 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev) "ret: %d", ret); break; } - opcode = rte_le_to_cpu_16(info.desc.opcode); - - switch (opcode) { + aq_opc = rte_le_to_cpu_16(info.desc.opcode); + /* For the message sent from pf to vf, opcode is stored in + * cookie_high of struct i40e_aq_desc, while return error code + * are stored in cookie_low, Which is done by + * i40e_aq_send_msg_to_vf in PF driver.*/ + msg_opc = (enum i40e_virtchnl_ops)rte_le_to_cpu_32( + info.desc.cookie_high); + msg_ret = (enum i40e_status_code)rte_le_to_cpu_32( + info.desc.cookie_low); + switch (aq_opc) { case i40e_aqc_opc_send_msg_to_vf: - if (v_msg->v_opcode == I40E_VIRTCHNL_OP_EVENT) + if (msg_opc == I40E_VIRTCHNL_OP_EVENT) /* process event*/ i40evf_handle_pf_event(dev, info.msg_buf, info.msg_len); else { /* read message and it's expected one */ - if (v_msg->v_opcode == vf->pend_cmd) { - vf->cmd_retval = v_msg->v_retval; + if (msg_opc == vf->pend_cmd) { + vf->cmd_retval = msg_ret; /* prevent compiler reordering */ rte_compiler_barrier(); _clear_cmd(vf); } else PMD_DRV_LOG(ERR, "command mismatch," "expect %u, get %u", - vf->pend_cmd, v_msg->v_opcode); + vf->pend_cmd, msg_opc); PMD_DRV_LOG(DEBUG, "adminq response is received," - " opcode = %d\n", v_msg->v_opcode); + " opcode = %d\n", msg_opc); } break; default: PMD_DRV_LOG(ERR, "Request %u is not supported yet", - opcode); + aq_opc); break; } } @@ -1399,7 +1409,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev) * void */ static void -i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, +i40evf_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; @@ -1429,15 +1439,15 @@ i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, done: i40evf_enable_irq0(hw); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(intr_handle); } static int i40evf_dev_init(struct rte_eth_dev *eth_dev) { - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\ - eth_dev->data->dev_private); - struct rte_pci_device *pci_dev = eth_dev->pci_dev; + struct i40e_hw *hw + = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(eth_dev); PMD_INIT_FUNC_TRACE(); @@ -1456,15 +1466,16 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) return 0; } - rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev); + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; - hw->vendor_id = eth_dev->pci_dev->id.vendor_id; - hw->device_id = eth_dev->pci_dev->id.device_id; - hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id; - hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id; - hw->bus.device = eth_dev->pci_dev->addr.devid; - hw->bus.func = eth_dev->pci_dev->addr.function; - hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr; + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->bus.device = pci_dev->addr.devid; + hw->bus.func = pci_dev->addr.function; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; hw->adapter_stopped = 0; if(i40evf_init_vf(eth_dev) != 0) { @@ -1525,43 +1536,27 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev) */ static struct eth_driver rte_i40evf_pmd = { .pci_drv = { - .name = "rte_i40evf_pmd", .id_table = pci_id_i40evf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = i40evf_dev_init, .eth_dev_uninit = i40evf_dev_uninit, .dev_private_size = sizeof(struct i40e_adapter), }; -/* - * VF Driver initialization routine. - * Invoked one at EAL init time. - * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices. - */ -static int -rte_i40evf_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - - rte_eth_driver_register(&rte_i40evf_pmd); - - return 0; -} - -static struct rte_driver rte_i40evf_driver = { - .type = PMD_PDEV, - .init = rte_i40evf_pmd_init, -}; - -PMD_REGISTER_DRIVER(rte_i40evf_driver); +RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio"); static int i40evf_dev_configure(struct rte_eth_dev *dev) { struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_eth_conf *conf = &dev->data->dev_conf; + struct i40e_vf *vf; /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk * allocation or vector Rx preconditions we will reset it. @@ -1571,6 +1566,19 @@ i40evf_dev_configure(struct rte_eth_dev *dev) ad->tx_simple_allowed = true; ad->tx_vec_allowed = true; + /* For non-DPDK PF drivers, VF has no ability to disable HW + * CRC strip, and is implicitly enabled by the PF. + */ + if (!conf->rxmode.hw_strip_crc) { + vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && + (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) { + /* Peer is running non-DPDK PF driver. */ + PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip"); + return -EINVAL; + } + } + return i40evf_init_vlan(dev); } @@ -1855,7 +1863,8 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; if (!rte_intr_allow_others(intr_handle)) { I40E_WRITE_REG(hw, @@ -1887,7 +1896,8 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; if (!rte_intr_allow_others(intr_handle)) { I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, @@ -1913,7 +1923,8 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev) static int i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); @@ -1939,7 +1950,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) I40EVF_WRITE_FLUSH(hw); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(&pci_dev->intr_handle); return 0; } @@ -1947,7 +1958,8 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t msix_intr; @@ -2027,7 +2039,8 @@ i40evf_dev_start(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; PMD_INIT_FUNC_TRACE(); @@ -2092,7 +2105,8 @@ err_queue: static void i40evf_dev_stop(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); @@ -2121,14 +2135,33 @@ i40evf_dev_link_update(struct rte_eth_dev *dev, * DPDK pf host provide interfacet to acquire link status * while Linux driver does not */ - if (vf->version_major == I40E_DPDK_VERSION_MAJOR) - i40evf_get_link_status(dev, &new_link); - else { - /* Always assume it's up, for Linux driver PF host */ - new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX; - new_link.link_speed = ETH_LINK_SPEED_10000; - new_link.link_status = ETH_LINK_UP; + + /* Linux driver PF host */ + switch (vf->link_speed) { + case I40E_LINK_SPEED_100MB: + new_link.link_speed = ETH_SPEED_NUM_100M; + break; + case I40E_LINK_SPEED_1GB: + new_link.link_speed = ETH_SPEED_NUM_1G; + break; + case I40E_LINK_SPEED_10GB: + new_link.link_speed = ETH_SPEED_NUM_10G; + break; + case I40E_LINK_SPEED_20GB: + new_link.link_speed = ETH_SPEED_NUM_20G; + break; + case I40E_LINK_SPEED_40GB: + new_link.link_speed = ETH_SPEED_NUM_40G; + break; + default: + new_link.link_speed = ETH_SPEED_NUM_100M; + break; } + /* full duplex only */ + new_link.link_duplex = ETH_LINK_FULL_DUPLEX; + new_link.link_status = vf->link_up ? ETH_LINK_UP : + ETH_LINK_DOWN; + i40evf_dev_atomic_write_link_status(dev, &new_link); return 0; @@ -2200,6 +2233,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); memset(dev_info, 0, sizeof(*dev_info)); + dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; @@ -2260,15 +2294,16 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) static void i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { - if (i40evf_get_statics(dev, stats)) - PMD_DRV_LOG(ERR, "Get statics failed"); + if (i40evf_get_statistics(dev, stats)) + PMD_DRV_LOG(ERR, "Get statistics failed"); } static void i40evf_dev_close(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = dev->pci_dev; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; i40evf_dev_stop(dev); hw->adapter_stopped = 1; @@ -2276,11 +2311,11 @@ i40evf_dev_close(struct rte_eth_dev *dev) i40evf_reset_vf(hw); i40e_shutdown_adminq(hw); /* disable uio intr before callback unregister */ - rte_intr_disable(&pci_dev->intr_handle); + rte_intr_disable(intr_handle); /* unregister callback func from eal lib */ - rte_intr_callback_unregister(&pci_dev->intr_handle, - i40evf_dev_interrupt_handler, (void *)dev); + rte_intr_callback_unregister(intr_handle, + i40evf_dev_interrupt_handler, dev); i40evf_disable_irq0(hw); } @@ -2315,13 +2350,16 @@ i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) static int i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { - struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); - struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_vf *vf; + struct i40e_hw *hw; int ret; if (!vsi || !lut) return -EINVAL; + vf = I40E_VSI_TO_VF(vsi); + hw = I40E_VSI_TO_HW(vsi); + if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE, lut, lut_size); @@ -2499,8 +2537,11 @@ i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf) rss_hf = rss_conf->rss_hf; hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; - hena &= ~I40E_RSS_HENA_ALL; - hena |= i40e_config_hena(rss_hf); + if (hw->mac.type == I40E_MAC_X722) + hena &= ~I40E_RSS_HENA_ALL_X722; + else + hena &= ~I40E_RSS_HENA_ALL; + hena |= i40e_config_hena(rss_hf, hw->mac.type); i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); I40EVF_WRITE_FLUSH(hw); @@ -2516,7 +2557,10 @@ i40evf_disable_rss(struct i40e_vf *vf) hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; - hena &= ~I40E_RSS_HENA_ALL; + if (hw->mac.type == I40E_MAC_X722) + hena &= ~I40E_RSS_HENA_ALL_X722; + else + hena &= ~I40E_RSS_HENA_ALL; i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); I40EVF_WRITE_FLUSH(hw); @@ -2577,7 +2621,9 @@ i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; - if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */ + if (!(hena & ((hw->mac.type == I40E_MAC_X722) + ? I40E_RSS_HENA_ALL_X722 + : I40E_RSS_HENA_ALL))) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -EINVAL; return 0;