* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * version: DPDK.L.1.2.3-3
*/
#include <sys/types.h>
#define PMD_DEBUG_TRACE(fmt, args...)
#endif
-/* define two macros for quick checking for restricting functions to primary
- * instance only. First macro is for functions returning an int - and therefore
- * an error code, second macro is for functions returning null.
- */
-#define PROC_PRIMARY_OR_ERR() do { \
+/* Macros for checking for restricting functions to primary instance only */
+#define PROC_PRIMARY_OR_ERR_RET(retval) do { \
if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
- PMD_DEBUG_TRACE("Cannot run %s in secondary processes\n", \
- __func__); \
- return (-E_RTE_SECONDARY); \
- } \
+ PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
+ return (retval); \
+ } \
} while(0)
-
#define PROC_PRIMARY_OR_RET() do { \
if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
- PMD_DEBUG_TRACE("Cannot run %s in secondary processes\n", \
- __func__); \
+ PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
return; \
} \
} while(0)
enum rte_eth_event_type event; /**< Interrupt event type */
};
+enum {
+ STAT_QMAP_TX = 0,
+ STAT_QMAP_RX
+};
+
static inline void
rte_eth_dev_data_alloc(void)
{
{
struct rte_eth_dev *eth_dev;
- if (nb_ports == RTE_MAX_ETHPORTS)
+ if (nb_ports == RTE_MAX_ETHPORTS) {
+ PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
return NULL;
+ }
if (rte_eth_dev_data == NULL)
rte_eth_dev_data_alloc();
if (eth_dev == NULL)
return -ENOMEM;
-
if (rte_eal_process_type() == RTE_PROC_PRIMARY){
eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
eth_drv->dev_private_size,
CACHE_LINE_SIZE);
if (eth_dev->data->dev_private == NULL)
- return -ENOMEM;
+ rte_panic("Cannot allocate memzone for private port data\n");
}
eth_dev->pci_dev = pci_dev;
eth_dev->driver = eth_drv;
return (nb_ports);
}
+static int
+rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = dev->data->nb_rx_queues;
+ void **rxq;
+ unsigned i;
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
+
+ if (dev->data->rx_queues == NULL) {
+ dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
+ sizeof(dev->data->rx_queues[0]) * nb_queues,
+ CACHE_LINE_SIZE);
+ if (dev->data->rx_queues == NULL) {
+ dev->data->nb_rx_queues = 0;
+ return -(ENOMEM);
+ }
+ } else {
+ rxq = dev->data->rx_queues;
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->rx_queue_release)(rxq[i]);
+ rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
+ CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return -(ENOMEM);
+
+ if (nb_queues > old_nb_queues)
+ memset(rxq + old_nb_queues, 0,
+ sizeof(rxq[0]) * (nb_queues - old_nb_queues));
+
+ dev->data->rx_queues = rxq;
+
+ }
+ dev->data->nb_rx_queues = nb_queues;
+ return (0);
+}
+
+static int
+rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = dev->data->nb_tx_queues;
+ void **txq;
+ unsigned i;
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
+
+ if (dev->data->tx_queues == NULL) {
+ dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
+ sizeof(dev->data->tx_queues[0]) * nb_queues,
+ CACHE_LINE_SIZE);
+ if (dev->data->tx_queues == NULL) {
+ dev->data->nb_tx_queues = 0;
+ return -(ENOMEM);
+ }
+ } else {
+ txq = dev->data->tx_queues;
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->tx_queue_release)(txq[i]);
+ txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
+ CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -(ENOMEM);
+
+ if (nb_queues > old_nb_queues)
+ memset(txq + old_nb_queues, 0,
+ sizeof(txq[0]) * (nb_queues - old_nb_queues));
+
+ dev->data->tx_queues = txq;
+
+ }
+ dev->data->nb_tx_queues = nb_queues;
+ return (0);
+}
+
int
rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR();
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
if (dev->data->dev_started) {
PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration", port_id);
- return -EBUSY;
+ "port %d must be stopped to allow configuration\n", port_id);
+ return (-EBUSY);
}
/*
*/
(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
if (nb_rx_q > dev_info.max_rx_queues) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d",
+ PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
port_id, nb_rx_q, dev_info.max_rx_queues);
return (-EINVAL);
}
if (nb_rx_q == 0) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0", port_id);
+ PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
return (-EINVAL);
}
if (nb_tx_q > dev_info.max_tx_queues) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d",
+ PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
port_id, nb_tx_q, dev_info.max_tx_queues);
return (-EINVAL);
}
if (nb_tx_q == 0) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0", port_id);
+ PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
return (-EINVAL);
}
if (dev_conf->rxmode.max_rx_pkt_len >
dev_info.max_rx_pktlen) {
PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
- " > max valid value %u",
+ " > max valid value %u\n",
port_id,
(unsigned)dev_conf->rxmode.max_rx_pkt_len,
(unsigned)dev_info.max_rx_pktlen);
return (-EINVAL);
}
+ else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
+ " < min valid value %u\n",
+ port_id,
+ (unsigned)dev_conf->rxmode.max_rx_pkt_len,
+ (unsigned)ETHER_MIN_LEN);
+ return (-EINVAL);
+ }
} else
/* Use default value */
dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
- "!= %d",
+ "!= %d\n",
port_id, ETH_VMDQ_DCB_NUM_QUEUES);
return (-EINVAL);
}
if (! (conf->nb_queue_pools == ETH_16_POOLS ||
conf->nb_queue_pools == ETH_32_POOLS)) {
PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
- "nb_queue_pools != %d or nb_queue_pools "
- "!= %d",
+ "nb_queue_pools must be %d or %d\n",
port_id, ETH_16_POOLS, ETH_32_POOLS);
return (-EINVAL);
}
}
+ if (dev_conf->txmode.mq_mode == ETH_VMDQ_DCB_TX) {
+ const struct rte_eth_vmdq_dcb_tx_conf *conf;
- diag = (*dev->dev_ops->dev_configure)(dev, nb_rx_q, nb_tx_q);
+ if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
+ "!= %d\n",
+ port_id, ETH_VMDQ_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
+ if (! (conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
+ "nb_queue_pools != %d or nb_queue_pools "
+ "!= %d\n",
+ port_id, ETH_16_POOLS, ETH_32_POOLS);
+ return (-EINVAL);
+ }
+ }
+
+ /* For DCB mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_DCB_RX) {
+ const struct rte_eth_dcb_rx_conf *conf;
+
+ if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
+ "!= %d\n",
+ port_id, ETH_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
+ if (! (conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
+ "nb_tcs != %d or nb_tcs "
+ "!= %d\n",
+ port_id, ETH_4_TCS, ETH_8_TCS);
+ return (-EINVAL);
+ }
+ }
+
+ if (dev_conf->txmode.mq_mode == ETH_DCB_TX) {
+ const struct rte_eth_dcb_tx_conf *conf;
+
+ if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
+ "!= %d\n",
+ port_id, ETH_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
+ if (! (conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
+ "nb_tcs != %d or nb_tcs "
+ "!= %d\n",
+ port_id, ETH_4_TCS, ETH_8_TCS);
+ return (-EINVAL);
+ }
+ }
+
+ /*
+ * Setup new number of RX/TX queues and reconfigure device.
+ */
+ diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
+ if (diag != 0) {
+ PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
+ port_id, diag);
+ return diag;
+ }
+
+ diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
if (diag != 0) {
- rte_free(dev->data->rx_queues);
- rte_free(dev->data->tx_queues);
+ PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
+ port_id, diag);
+ rte_eth_dev_rx_queue_config(dev, 0);
+ return diag;
}
- return diag;
+
+ diag = (*dev->dev_ops->dev_configure)(dev);
+ if (diag != 0) {
+ PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
+ port_id, diag);
+ rte_eth_dev_rx_queue_config(dev, 0);
+ rte_eth_dev_tx_queue_config(dev, 0);
+ return diag;
+ }
+
+ return 0;
}
static void
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR();
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
}
dev = &rte_eth_devices[port_id];
+
FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
dev->data->dev_started = 0;
(*dev->dev_ops->dev_close)(dev);
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR();
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
if (dev->data->dev_started) {
PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration", port_id);
+ "port %d must be stopped to allow configuration\n", port_id);
return -EBUSY;
}
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR();
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- if (port_id >= nb_ports) {
+ if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return (-EINVAL);
}
if (dev->data->dev_started) {
PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration", port_id);
+ "port %d must be stopped to allow configuration\n", port_id);
return -EBUSY;
}
(*dev->dev_ops->stats_reset)(dev);
}
+
+static int
+set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
+ uint8_t is_rx)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
+ return (*dev->dev_ops->queue_stats_mapping_set)
+ (dev, queue_id, stat_idx, is_rx);
+}
+
+
+int
+rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
+ uint8_t stat_idx)
+{
+ return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
+ STAT_QMAP_TX);
+}
+
+
+int
+rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
+ uint8_t stat_idx)
+{
+ return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
+ STAT_QMAP_RX);
+}
+
+
void
rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
{
PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
return (-ENOSYS);
}
+
if (vlan_id > 4095) {
PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
port_id, (unsigned) vlan_id);
return (-EINVAL);
}
-
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
(*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
return (0);
}
+int
+rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
+ return (-EINVAL);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
+
+ return (0);
+}
+
+int
+rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
+
+ return (0);
+}
+
+int
+rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+ int mask = 0;
+ int cur, org = 0;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ /*check which option changed by application*/
+ cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
+ org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
+ if (cur != org){
+ dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
+ mask |= ETH_VLAN_STRIP_MASK;
+ }
+
+ cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
+ org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
+ if (cur != org){
+ dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
+ mask |= ETH_VLAN_FILTER_MASK;
+ }
+
+ cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
+ org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
+ if (cur != org){
+ dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
+ mask |= ETH_VLAN_EXTEND_MASK;
+ }
+
+ /*no change*/
+ if(mask == 0)
+ return ret;
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_offload_set)(dev, mask);
+
+ return ret;
+}
+
+int
+rte_eth_dev_get_vlan_offload(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ ret |= ETH_VLAN_STRIP_OFFLOAD ;
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ ret |= ETH_VLAN_FILTER_OFFLOAD ;
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ ret |= ETH_VLAN_EXTEND_OFFLOAD ;
+
+ return ret;
+}
+
+
int
rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
struct rte_fdir_filter *fdir_filter,
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type source & destinations ports " \
- "should be null!");
+ "None l4type, source & destinations ports " \
+ "should be null!\n");
return (-EINVAL);
}
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
- if (*dev->dev_ops->fdir_add_signature_filter)
- return (*dev->dev_ops->fdir_add_signature_filter)(dev,
- fdir_filter,
- queue);
-
- PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
- return (-ENOTSUP);
+ return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
+ queue);
}
int
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type source & destinations ports " \
- "should be null!");
+ "None l4type, source & destinations ports " \
+ "should be null!\n");
return (-EINVAL);
}
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
- if (*dev->dev_ops->fdir_update_signature_filter)
- return (*dev->dev_ops->fdir_update_signature_filter)(dev,
- fdir_filter,
- queue);
-
+ return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
+ queue);
- PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
- return (-ENOTSUP);
}
int
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
"None l4type source & destinations ports " \
- "should be null!");
+ "should be null!\n");
return (-EINVAL);
}
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
- if (*dev->dev_ops->fdir_remove_signature_filter)
- return (*dev->dev_ops->fdir_remove_signature_filter)(dev,
- fdir_filter);
-
- PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
- return (-ENOTSUP);
+ return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
}
int
}
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
- if (*dev->dev_ops->fdir_infos_get) {
- (*dev->dev_ops->fdir_infos_get)(dev, fdir);
- return (0);
- }
- PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
- return (-ENOTSUP);
+ (*dev->dev_ops->fdir_infos_get)(dev, fdir);
+ return (0);
}
int
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type source & destinations ports " \
- "should be null!");
+ "None l4type, source & destinations ports " \
+ "should be null!\n");
return (-EINVAL);
}
return (-ENOTSUP);
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
- if (*dev->dev_ops->fdir_add_perfect_filter)
- return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
+ return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
soft_id, queue,
drop);
-
- PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
- return (-ENOTSUP);
}
int
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type source & destinations ports " \
- "should be null!");
+ "None l4type, source & destinations ports " \
+ "should be null!\n");
return (-EINVAL);
}
return (-ENOTSUP);
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
- if (*dev->dev_ops->fdir_update_perfect_filter)
- return (*dev->dev_ops->fdir_update_perfect_filter)(dev,
- fdir_filter,
- soft_id,
- queue,
- drop);
-
- PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
- return (-ENOTSUP);
+ return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
+ soft_id, queue, drop);
}
int
|| fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
&& (fdir_filter->port_src || fdir_filter->port_dst)) {
PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type source & destinations ports " \
- "should be null!");
+ "None l4type, source & destinations ports " \
+ "should be null!\n");
return (-EINVAL);
}
return (-ENOTSUP);
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
- if (*dev->dev_ops->fdir_remove_perfect_filter)
- return (*dev->dev_ops->fdir_remove_perfect_filter)(dev,
- fdir_filter,
- soft_id);
-
- PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
- return -ENOTSUP;
+ return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
+ soft_id);
}
int
return (-ENOSYS);
}
- /* IPv6 mask are not supported */
- if (fdir_mask->src_ipv6_mask)
- return (-ENOTSUP);
-
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
- if (*dev->dev_ops->fdir_set_masks)
- return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
-
- PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n",
- port_id);
- return -ENOTSUP;
+ return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
}
int
}
dev = &rte_eth_devices[port_id];
-
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
return ((*dev->dev_ops->dev_led_on)(dev));
}
}
dev = &rte_eth_devices[port_id];
-
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
return ((*dev->dev_ops->dev_led_off)(dev));
}
return (-ENODEV);
}
dev = &rte_eth_devices[port_id];
-
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
+
if (is_zero_ether_addr(addr)) {
PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id);
return (-EINVAL);
return (-ENODEV);
}
dev = &rte_eth_devices[port_id];
-
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
+
index = get_mac_addr_index(port_id, addr);
if (index == 0) {
PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
return 0;
}
dev = &rte_eth_devices[port_id];
-
FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
if (queue_id >= dev->data->nb_rx_queues) {
PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);