static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
size_t size;
- if (fw_ver == NULL)
- return 0;
-
if (IS_PF(edev))
snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
QEDE_PMD_FW_VERSION);
static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
{
+ OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+ OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
}
static void
}
use_tx_offload = !!(tx_offloads &
- (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
- DEV_TX_OFFLOAD_TCP_TSO | /* tso */
- DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+ (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
if (use_tx_offload) {
DP_INFO(edev, "Assigning qede_xmit_pkts\n");
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
(void)qede_vlan_stripping(eth_dev, 1);
else
(void)qede_vlan_stripping(eth_dev, 0);
}
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* VLAN filtering kicks in when a VLAN is added */
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
qede_vlan_filter_set(eth_dev, 0, 1);
} else {
if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
* enabled
*/
eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_VLAN_FILTER;
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
} else {
qede_vlan_filter_set(eth_dev, 0, 0);
}
}
}
- if (mask & ETH_VLAN_EXTEND_MASK)
- DP_ERR(edev, "Extend VLAN not supported\n");
-
qdev->vlan_offload_mask = mask;
DP_INFO(edev, "VLAN offload mask %d\n", mask);
/* Configure default RETA */
memset(reta_conf, 0, sizeof(reta_conf));
for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
- reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+ reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
- id = i / RTE_RETA_GROUP_SIZE;
- pos = i % RTE_RETA_GROUP_SIZE;
+ id = i / RTE_ETH_RETA_GROUP_SIZE;
+ pos = i % RTE_ETH_RETA_GROUP_SIZE;
q = i % QEDE_RSS_COUNT(eth_dev);
reta_conf[id].reta[pos] = q;
}
}
/* Configure TPA parameters */
- if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
if (qede_enable_tpa(eth_dev, true))
return -EINVAL;
/* Enable scatter mode for LRO */
if (!eth_dev->data->scattered_rx)
- rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
}
/* Start queues */
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
- if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
if (qede_config_rss(eth_dev))
goto err;
return -1; /* common error code is < 0 */
}
-static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+static int qede_dev_stop(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
PMD_INIT_FUNC_TRACE(edev);
+ eth_dev->data->dev_started = 0;
/* Bring the link down */
qede_dev_set_link_state(eth_dev, false);
/* Disable vport */
if (qede_activate_vport(eth_dev, false))
- return;
+ return 0;
if (qdev->enable_lro)
qede_enable_tpa(eth_dev, false);
ecore_hw_stop_fastpath(edev); /* TBD - loop */
DP_INFO(edev, "Device is stopped\n");
+
+ return 0;
}
static const char * const valid_args[] = {
PMD_INIT_FUNC_TRACE(edev);
- if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
- rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* We need to have min 1 RX queue.There is no min check in
* rte_eth_dev_configure(), so we are checking it here.
DP_NOTICE(edev, false,
"Invalid devargs supplied, requested change will not take effect\n");
- if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
- rxmode->mq_mode == ETH_MQ_RX_RSS)) {
+ if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE ||
+ rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) {
DP_ERR(edev, "Unsupported multi-queue mode\n");
return -ENOTSUP;
}
return -ENOMEM;
}
- /* If jumbo enabled adjust MTU */
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- eth_dev->data->mtu =
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
if (qede_start_vport(qdev, eth_dev->data->mtu))
qdev->mtu = eth_dev->data->mtu;
/* Enable VLAN offloads by default */
- ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK);
+ ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK);
if (ret)
return ret;
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
- dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_RSS_HASH);
+ dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH);
dev_info->rx_queue_offload_capa = 0;
/* TX offloads are on a per-packet basis, so it is applicable
* to both at port and queue levels.
*/
- dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
dev_info->default_txconf = (struct rte_eth_txconf) {
- .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+ .offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
};
dev_info->default_rxconf = (struct rte_eth_rxconf) {
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- speed_cap |= ETH_LINK_SPEED_1G;
+ speed_cap |= RTE_ETH_LINK_SPEED_1G;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- speed_cap |= ETH_LINK_SPEED_10G;
+ speed_cap |= RTE_ETH_LINK_SPEED_10G;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
- speed_cap |= ETH_LINK_SPEED_25G;
+ speed_cap |= RTE_ETH_LINK_SPEED_25G;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- speed_cap |= ETH_LINK_SPEED_40G;
+ speed_cap |= RTE_ETH_LINK_SPEED_40G;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- speed_cap |= ETH_LINK_SPEED_50G;
+ speed_cap |= RTE_ETH_LINK_SPEED_50G;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- speed_cap |= ETH_LINK_SPEED_100G;
+ speed_cap |= RTE_ETH_LINK_SPEED_100G;
dev_info->speed_capa = speed_cap;
return 0;
/* Link Mode */
switch (q_link.duplex) {
case QEDE_DUPLEX_HALF:
- link_duplex = ETH_LINK_HALF_DUPLEX;
+ link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
case QEDE_DUPLEX_FULL:
- link_duplex = ETH_LINK_FULL_DUPLEX;
+ link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case QEDE_DUPLEX_UNKNOWN:
default:
link.link_duplex = link_duplex;
/* Link Status */
- link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+ link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
/* AN */
link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
- ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+ RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
link.link_speed, link.link_duplex,
}
}
-static void qede_dev_close(struct rte_eth_dev *eth_dev)
+static int qede_dev_close(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int ret = 0;
PMD_INIT_FUNC_TRACE(edev);
+ /* only close in case of the primary process */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
* can release all the resources and device can be brought up newly
*/
if (eth_dev->data->dev_started)
- qede_dev_stop(eth_dev);
+ ret = qede_dev_stop(eth_dev);
if (qdev->vport_started)
qede_stop_vport(edev);
if (ECORE_IS_CMT(edev))
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
+
+ return ret;
}
static int
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
enum _ecore_status_t ecore_status;
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+ type = QED_FILTER_RX_MODE_TYPE_PROMISC;
ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
}
/* Pause is assumed to be supported (SUPPORTED_Pause) */
- if (fc_conf->mode == RTE_FC_FULL)
+ if (fc_conf->mode == RTE_ETH_FC_FULL)
params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
QED_LINK_PAUSE_RX_ENABLE);
- if (fc_conf->mode == RTE_FC_TX_PAUSE)
+ if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
- if (fc_conf->mode == RTE_FC_RX_PAUSE)
+ if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
params.link_up = true;
if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
QED_LINK_PAUSE_TX_ENABLE))
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
{
*rss_caps = 0;
- *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
- *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
- *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
- *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
- *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
- *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
- *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
- *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
}
int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
/* RSS hash key */
if (key) {
if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
- DP_ERR(edev, "RSS key length exceeds limit\n");
- return -EINVAL;
+ len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
+ DP_NOTICE(edev, false,
+ "RSS key length too big, trimmed to %d\n",
+ len);
}
DP_INFO(edev, "Applying user supplied hash key\n");
rss_params.update_rss_key = 1;
uint8_t entry;
int rc = 0;
- if (reta_size > ETH_RSS_RETA_SIZE_128) {
+ if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
DP_ERR(edev, "reta_size %d is not supported by hardware\n",
reta_size);
return -EINVAL;
for_each_hwfn(edev, i) {
for (j = 0; j < reta_size; j++) {
- idx = j / RTE_RETA_GROUP_SIZE;
- shift = j % RTE_RETA_GROUP_SIZE;
+ idx = j / RTE_ETH_RETA_GROUP_SIZE;
+ shift = j % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
entry = reta_conf[idx].reta[shift];
fid = entry * edev->num_hwfns + i;
uint16_t i, idx, shift;
uint8_t entry;
- if (reta_size > ETH_RSS_RETA_SIZE_128) {
+ if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
DP_ERR(edev, "reta_size %d is not supported\n",
reta_size);
return -EINVAL;
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
entry = qdev->rss_ind_table[i];
reta_conf[idx].reta[shift] = entry;
{
struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_dev_info dev_info = {0};
struct qede_fastpath *fp;
- uint32_t max_rx_pkt_len;
uint32_t frame_size;
uint16_t bufsz;
bool restart = false;
int i, rc;
PMD_INIT_FUNC_TRACE(edev);
- rc = qede_dev_info_get(dev, &dev_info);
- if (rc != 0) {
- DP_ERR(edev, "Error during getting ethernet device info\n");
- return rc;
- }
- max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
- frame_size = max_rx_pkt_len;
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
- DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
- mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
- QEDE_ETH_OVERHEAD);
- return -EINVAL;
- }
+
+ frame_size = mtu + QEDE_MAX_ETHER_HDR_LEN;
if (!dev->data->scattered_rx &&
frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
}
if (dev->data->dev_started) {
dev->data->dev_started = 0;
- qede_dev_stop(dev);
+ rc = qede_dev_stop(dev);
+ if (rc != 0)
+ return rc;
restart = true;
}
rte_delay_ms(1000);
fp->rxq->rx_buf_size = rc;
}
}
- if (max_rx_pkt_len > RTE_ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
if (!dev->data->dev_started && restart) {
qede_dev_start(dev);
dev->data->dev_started = 1;
}
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
-
return 0;
}
return qede_eth_dev_init(dev);
}
+static void
+qede_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ qede_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+static void
+qede_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ qede_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
static const struct eth_dev_ops qede_eth_dev_ops = {
.dev_configure = qede_dev_configure,
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
- .rx_queue_release = qede_rx_queue_release,
- .rx_descriptor_status = qede_rx_descriptor_status,
+ .rx_queue_release = qede_dev_rx_queue_release,
.tx_queue_setup = qede_tx_queue_setup,
- .tx_queue_release = qede_tx_queue_release,
+ .tx_queue_release = qede_dev_tx_queue_release,
.dev_start = qede_dev_start,
.dev_reset = qede_dev_reset,
.dev_set_link_up = qede_dev_set_link_up,
.reta_update = qede_rss_reta_update,
.reta_query = qede_rss_reta_query,
.mtu_set = qede_set_mtu,
- .filter_ctrl = qede_dev_filter_ctrl,
+ .flow_ops_get = qede_dev_flow_ops_get,
.udp_tunnel_port_add = qede_udp_dst_port_add,
.udp_tunnel_port_del = qede_udp_dst_port_del,
.fw_version_get = qede_fw_version_get,
.dev_configure = qede_dev_configure,
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
- .rx_queue_release = qede_rx_queue_release,
- .rx_descriptor_status = qede_rx_descriptor_status,
+ .rx_queue_release = qede_dev_rx_queue_release,
.tx_queue_setup = qede_tx_queue_setup,
- .tx_queue_release = qede_tx_queue_release,
+ .tx_queue_release = qede_dev_tx_queue_release,
.dev_start = qede_dev_start,
.dev_reset = qede_dev_reset,
.dev_set_link_up = qede_dev_set_link_up,
qed_ops->common->update_pf_params(edev, &pf_params);
}
+static void qede_generate_random_mac_addr(struct rte_ether_addr *mac_addr)
+{
+ uint64_t random;
+
+ /* Set Organizationally Unique Identifier (OUI) prefix. */
+ mac_addr->addr_bytes[0] = 0x00;
+ mac_addr->addr_bytes[1] = 0x09;
+ mac_addr->addr_bytes[2] = 0xC0;
+
+ /* Force indication of locally assigned MAC address. */
+ mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
+
+ /* Generate the last 3 bytes of the MAC address with a random number. */
+ random = rte_rand();
+
+ memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
{
struct rte_pci_device *pci_dev;
uint8_t bulletin_change;
uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
uint8_t is_mac_forced;
- bool is_mac_exist;
+ bool is_mac_exist = false;
/* Fix up ecore debug level */
uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
uint8_t dp_level = ECORE_LEVEL_VERBOSE;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* @DPDK */
edev->vendor_id = pci_dev->id.vendor_id;
DP_ERR(edev, "No VF macaddr assigned\n");
}
}
+
+ /* If MAC doesn't exist from PF, generate random one */
+ if (!is_mac_exist) {
+ struct rte_ether_addr *mac_addr;
+
+ mac_addr = (struct rte_ether_addr *)&vf_mac;
+ qede_generate_random_mac_addr(mac_addr);
+
+ rte_ether_addr_copy(mac_addr,
+ ð_dev->data->mac_addrs[0]);
+
+ rte_ether_addr_copy(ð_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
+ }
}
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
+ eth_dev->rx_descriptor_status = qede_rx_descriptor_status;
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
adapter->ipgre.num_filters = 0;
if (is_vf) {
adapter->vxlan.enable = true;
- adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
- ETH_TUNNEL_FILTER_IVLAN;
+ adapter->vxlan.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+ RTE_ETH_TUNNEL_FILTER_IVLAN;
adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
adapter->geneve.enable = true;
- adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
- ETH_TUNNEL_FILTER_IVLAN;
+ adapter->geneve.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+ RTE_ETH_TUNNEL_FILTER_IVLAN;
adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
adapter->ipgre.enable = true;
- adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
- ETH_TUNNEL_FILTER_IVLAN;
+ adapter->ipgre.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+ RTE_ETH_TUNNEL_FILTER_IVLAN;
} else {
adapter->vxlan.enable = false;
adapter->geneve.enable = false;
adapter->ipgre.enable = false;
+ qed_ops->sriov_configure(edev, pci_dev->max_vfs);
}
- DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
- adapter->primary_mac.addr_bytes[0],
- adapter->primary_mac.addr_bytes[1],
- adapter->primary_mac.addr_bytes[2],
- adapter->primary_mac.addr_bytes[3],
- adapter->primary_mac.addr_bytes[4],
- adapter->primary_mac.addr_bytes[5]);
+ DP_INFO(edev, "MAC address : " RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(&adapter->primary_mac));
DP_INFO(edev, "Device initialized\n");
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
-
PMD_INIT_FUNC_TRACE(edev);
-
- /* only uninitialize in the primary process */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
- /* safe to close dev here */
qede_dev_close(eth_dev);
-
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
-
return 0;
}
RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
-RTE_LOG_REGISTER(qede_logtype_init, pmd.net.qede.init, NOTICE);
-RTE_LOG_REGISTER(qede_logtype_driver, pmd.net.qede.driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(qede_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(qede_logtype_driver, driver, NOTICE);