#include "vnic_enet.h"
#include "enic.h"
-int enic_pmd_logtype;
-
/*
* The set of PCI devices this driver supports
*/
#define ENIC_DEVARG_GENEVE_OPT "geneve-opt"
#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
-RTE_INIT(enicpmd_init_log)
-{
- enic_pmd_logtype = rte_log_register("pmd.net.enic");
- if (enic_pmd_logtype >= 0)
- rte_log_set_level(enic_pmd_logtype, RTE_LOG_INFO);
-}
+RTE_LOG_REGISTER(enic_pmd_logtype, pmd.net.enic, INFO);
static int
enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
enic->ig_vlan_strip_en = 0;
}
- if ((mask & ETH_VLAN_FILTER_MASK) &&
- (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
- dev_warning(enic,
- "Configuration of VLAN filter is not supported\n");
- }
-
- if ((mask & ETH_VLAN_EXTEND_MASK) &&
- (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
- dev_warning(enic,
- "Configuration of extended VLAN is not supported\n");
- }
-
return enic_set_vlan_strip(enic);
}
return ret;
}
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_RSS_HASH;
+
enic->mc_count = 0;
enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CHECKSUM);
/* 1300 and later models are at least 40G */
if (id >= 0x0100)
return ETH_LINK_SPEED_40G;
+ /* VFs have subsystem id 0, check device id */
+ if (id == 0) {
+ /* Newer VF implies at least 40G model */
+ if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
+ return ETH_LINK_SPEED_40G;
+ }
return ETH_LINK_SPEED_10G;
}
ENICPMD_FUNC_TRACE();
sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
- data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
+ data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic);
rq_sop = &enic->rq[sop_queue_idx];
rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
qinfo->mp = rq_sop->mp;
/* tx_thresh, and all the other fields are not applicable for enic */
}
+static int enicpmd_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+ struct enic *enic = pmd_priv(dev);
+ const char *info_str = NULL;
+ int ret = -EINVAL;
+
+ ENICPMD_FUNC_TRACE();
+ if (enic->use_noscatter_vec_rx_handler)
+ info_str = "Vector AVX2 No Scatter";
+ else if (pkt_burst == enic_noscatter_recv_pkts)
+ info_str = "Scalar No Scatter";
+ else if (pkt_burst == enic_recv_pkts)
+ info_str = "Scalar";
+ if (info_str) {
+ strlcpy(mode->info, info_str, sizeof(mode->info));
+ ret = 0;
+ }
+ return ret;
+}
+
+static int enicpmd_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+ const char *info_str = NULL;
+ int ret = -EINVAL;
+
+ ENICPMD_FUNC_TRACE();
+ if (pkt_burst == enic_simple_xmit_pkts)
+ info_str = "Scalar Simplified";
+ else if (pkt_burst == enic_xmit_pkts)
+ info_str = "Scalar";
+ if (info_str) {
+ strlcpy(mode->info, info_str, sizeof(mode->info));
+ ret = 0;
+ }
+ return ret;
+}
+
static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
uint16_t rx_queue_id)
{
tnl->udp_port);
return -EINVAL;
}
- return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
+ return update_vxlan_port(enic, RTE_VXLAN_DEFAULT_PORT);
}
static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
.rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
.rxq_info_get = enicpmd_dev_rxq_info_get,
.txq_info_get = enicpmd_dev_txq_info_get,
+ .rx_burst_mode_get = enicpmd_dev_rx_burst_mode_get,
+ .tx_burst_mode_get = enicpmd_dev_tx_burst_mode_get,
.dev_led_on = NULL,
.dev_led_off = NULL,
.flow_ctrl_get = NULL,