/*-
* BSD LICENSE
- *
+ *
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
return (0);
}
+int
+rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
+
+ return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
+
+}
+
+int
+rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
+
+ return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
+
+}
+
+int
+rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues) {
+ PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
+
+ return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
+
+}
+
+int
+rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues) {
+ PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
+
+ return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
+
+}
+
static int
rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
{
if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
/* check multi-queue mode */
- if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
+ if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
(dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
(dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
(dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
/* SRIOV only works in VMDq enable mode */
- PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
- "wrong VMDQ mq_mode rx %u tx %u\n",
+ PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
+ " SRIOV active, "
+ "wrong VMDQ mq_mode rx %u tx %u\n",
port_id,
dev_conf->rxmode.mq_mode,
dev_conf->txmode.mq_mode);
case ETH_MQ_RX_VMDQ_DCB:
case ETH_MQ_RX_VMDQ_DCB_RSS:
/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
- PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
- "unsupported VMDQ mq_mode rx %u\n",
+ PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
+ " SRIOV active, "
+ "unsupported VMDQ mq_mode rx %u\n",
port_id, dev_conf->rxmode.mq_mode);
return (-EINVAL);
default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
switch (dev_conf->txmode.mq_mode) {
case ETH_MQ_TX_VMDQ_DCB:
/* DCB VMDQ in SRIOV mode, not implement yet */
- PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
- "unsupported VMDQ mq_mode tx %u\n",
+ PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
+ " SRIOV active, "
+ "unsupported VMDQ mq_mode tx %u\n",
port_id, dev_conf->txmode.mq_mode);
return (-EINVAL);
default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
(nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
- "queue number must less equal to %d\n",
+ "queue number must less equal to %d\n",
port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
return (-EINVAL);
}
/* For vmdb+dcb mode check our configuration before we go further */
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_conf *conf;
-
+
if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
"!= %d\n",
}
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_tx_conf *conf;
-
+
if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
"!= %d\n",
return (-EINVAL);
}
}
-
+
/* For DCB mode check our configuration before we go further */
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
-
+
if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
"!= %d\n",
return (-EINVAL);
}
}
-
+
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
-
+
if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
"!= %d\n",
PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
return (-EINVAL);
}
dev = &rte_eth_devices[port_id];
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+ if (dev->data->dev_started != 0) {
+ PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ " already started\n",
+ port_id);
+ return (0);
+ }
+
diag = (*dev->dev_ops->dev_start)(dev);
if (diag == 0)
dev->data->dev_started = 1;
PROC_PRIMARY_OR_RET();
if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
return;
}
dev = &rte_eth_devices[port_id];
FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+ if (dev->data->dev_started == 0) {
+ PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ " already stopped\n",
+ port_id);
+ return;
+ }
+
dev->data->dev_started = 0;
(*dev->dev_ops->dev_stop)(dev);
}
/* Default device offload capabilities to zero */
dev_info->rx_offload_capa = 0;
dev_info->tx_offload_capa = 0;
+ dev_info->if_index = 0;
FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->pci_dev = dev->pci_dev;
int ret = 0;
int mask = 0;
int cur, org = 0;
-
+
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return (-ENODEV);
dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
mask |= ETH_VLAN_STRIP_MASK;
}
-
+
cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
if (cur != org){
/*no change*/
if(mask == 0)
return ret;
-
+
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
(*dev->dev_ops->vlan_offload_set)(dev, mask);
rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
{
struct rte_eth_dev *dev;
+ uint16_t max_rxq;
uint8_t i,j;
if (port_id >= nb_ports) {
return (-EINVAL);
}
+ dev = &rte_eth_devices[port_id];
+ max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
+ dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
if (reta_conf->mask_lo != 0) {
for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
if ((reta_conf->mask_lo & (1ULL << i)) &&
- (reta_conf->reta[i] >= ETH_RSS_RETA_MAX_QUEUE)) {
+ (reta_conf->reta[i] >= max_rxq)) {
PMD_DEBUG_TRACE("RETA hash index output"
"configration for port=%d,invalid"
"queue=%d\n",port_id,reta_conf->reta[i]);
return (-EINVAL);
- }
+ }
}
}
if (reta_conf->mask_hi != 0) {
- for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
+ for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
/* Check if the max entry >= 128 */
- if ((reta_conf->mask_hi & (1ULL << i)) &&
- (reta_conf->reta[j] >= ETH_RSS_RETA_MAX_QUEUE)) {
+ if ((reta_conf->mask_hi & (1ULL << i)) &&
+ (reta_conf->reta[j] >= max_rxq)) {
PMD_DEBUG_TRACE("RETA hash index output"
"configration for port=%d,invalid"
"queue=%d\n",port_id,reta_conf->reta[j]);
}
}
- dev = &rte_eth_devices[port_id];
-
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
return (*dev->dev_ops->reta_update)(dev, reta_conf);
}
-int
+int
rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
{
struct rte_eth_dev *dev;
-
+
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return (-ENODEV);
return (*dev->dev_ops->reta_query)(dev, reta_conf);
}
+int
+rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *dev;
+ uint16_t rss_hash_protos;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ rss_hash_protos = rss_conf->rss_hf;
+ if ((rss_hash_protos != 0) &&
+ ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
+ PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
+ rss_hash_protos);
+ return (-EINVAL);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
+ return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
+}
+
+int
+rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
+ return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
+}
+
int
rte_eth_led_on(uint8_t port_id)
{
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
if (is_zero_ether_addr(addr)) {
- PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
+ PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
port_id);
return (-EINVAL);
}
PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
return (-EINVAL);
}
-
+
index = get_mac_addr_index(port_id, addr);
if (index < 0) {
index = get_mac_addr_index(port_id, &null_mac_addr);
}
} else {
pool_mask = dev->data->mac_pool_sel[index];
-
+
/* Check if both MAC address and pool is alread there, and do nothing */
if (pool_mask & (1ULL << pool))
return 0;
/* Update address in NIC data structure */
ether_addr_copy(addr, &dev->data->mac_addrs[index]);
-
+
/* Update pool bitmap in NIC data structure */
dev->data->mac_pool_sel[index] |= (1ULL << pool);
return 0;
}
-int
+int
rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf,
uint16_t rx_mode, uint8_t on)
{
PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
port_id);
return (-ENODEV);
- }
-
+ }
+
dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
if (rx_mode == 0)
{
PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
- return (-EINVAL);
+ return (-EINVAL);
}
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
int index;
int ret;
struct rte_eth_dev *dev;
-
+
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
port_id);
return (-ENODEV);
}
-
+
dev = &rte_eth_devices[port_id];
if (is_zero_ether_addr(addr)) {
- PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
+ PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
port_id);
return (-EINVAL);
}
/* Check if it's already there, and do nothing */
if ((index >= 0) && (on))
return 0;
-
+
if (index < 0) {
if (!on) {
- PMD_DEBUG_TRACE("port %d: the MAC address was not"
+ PMD_DEBUG_TRACE("port %d: the MAC address was not"
"set in UTA\n", port_id);
return (-EINVAL);
}
-
+
index = get_hash_mac_addr_index(port_id, &null_mac_addr);
if (index < 0) {
PMD_DEBUG_TRACE("port %d: MAC address array full\n",
port_id);
return (-ENOSPC);
}
- }
-
+ }
+
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
if (ret == 0) {
if (on)
ether_addr_copy(addr,
&dev->data->hash_mac_addrs[index]);
- else
+ else
ether_addr_copy(&null_mac_addr,
&dev->data->hash_mac_addrs[index]);
}
-
+
return ret;
}
rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
{
struct rte_eth_dev *dev;
-
+
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
port_id);
return (-ENODEV);
}
-
+
dev = &rte_eth_devices[port_id];
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
}
-int
+int
rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
{
uint16_t num_vfs;
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return (-ENODEV);
}
-
+
dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
-
+
num_vfs = dev_info.max_vfs;
- if (vf > num_vfs)
+ if (vf > num_vfs)
{
PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
return (-EINVAL);
- }
-
+ }
+
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
}
-int
+int
rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
{
uint16_t num_vfs;
PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
return (-ENODEV);
}
-
+
dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
num_vfs = dev_info.max_vfs;
- if (vf > num_vfs)
+ if (vf > num_vfs)
{
PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
return (-EINVAL);
}
-
+
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
}
int
-rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
+rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
uint64_t vf_mask,uint8_t vlan_on)
{
struct rte_eth_dev *dev;
PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
return (-EINVAL);
}
-
+
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
vf_mask,vlan_on);
}
int
-rte_eth_mirror_rule_set(uint8_t port_id,
+rte_eth_mirror_rule_set(uint8_t port_id,
struct rte_eth_vmdq_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return (-ENODEV);
}
-
+
if (mirror_conf->rule_type_mask == 0) {
PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
return (-EINVAL);
}
-
+
if (mirror_conf->dst_pool >= ETH_64_POOLS) {
PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
"be 0-%d\n",ETH_64_POOLS - 1);
return (-EINVAL);
}
-
- if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
+
+ if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
(mirror_conf->pool_mask == 0)) {
PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
- "be 0.\n");
+ "be 0.\n");
return (-EINVAL);
}
-
+
if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
{
PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
}
dev = &rte_eth_devices[port_id];
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
- return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+ return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
}
int