X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ether%2Frte_ethdev.c;h=25bbd9c32982a2624da221b3fa474b72021b09fe;hb=1d99384f4d161d9d8faad110315a68e7011882d4;hp=4bb27ceebebfe141f4a400dce6ecd05d5083e77c;hpb=7238e63bce528167a7f1241e3c3b25172e7b5ad6;p=dpdk.git diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 4bb27ceebe..25bbd9c329 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * + * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -261,9 +261,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) void **rxq; unsigned i; - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); - - if (dev->data->rx_queues == NULL) { + if (dev->data->rx_queues == NULL) { /* first time configuration */ dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", sizeof(dev->data->rx_queues[0]) * nb_queues, CACHE_LINE_SIZE); @@ -271,7 +269,9 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) dev->data->nb_rx_queues = 0; return -(ENOMEM); } - } else { + } else { /* re-configure */ + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); + rxq = dev->data->rx_queues; for (i = nb_queues; i < old_nb_queues; i++) @@ -292,6 +292,110 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) return (0); } +int +rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -EINVAL; + } + + dev = &rte_eth_devices[port_id]; + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id); + return -EINVAL; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); + + return dev->dev_ops->rx_queue_start(dev, rx_queue_id); + +} + +int +rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -EINVAL; + } + + dev = &rte_eth_devices[port_id]; + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id); + return -EINVAL; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); + + return dev->dev_ops->rx_queue_stop(dev, rx_queue_id); + +} + +int +rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -EINVAL; + } + + dev = &rte_eth_devices[port_id]; + if (tx_queue_id >= dev->data->nb_tx_queues) { + PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id); + return -EINVAL; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); + + return dev->dev_ops->tx_queue_start(dev, tx_queue_id); + +} + +int +rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -EINVAL; + } + + dev = &rte_eth_devices[port_id]; + if (tx_queue_id >= dev->data->nb_tx_queues) { + PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id); + return -EINVAL; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); + + return dev->dev_ops->tx_queue_stop(dev, tx_queue_id); + +} + static int rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) { @@ -299,9 +403,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) void **txq; unsigned i; - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); - - if (dev->data->tx_queues == NULL) { + if (dev->data->tx_queues == NULL) { /* first time configuration */ dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", sizeof(dev->data->tx_queues[0]) * nb_queues, CACHE_LINE_SIZE); @@ -309,7 +411,9 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) dev->data->nb_tx_queues = 0; return -(ENOMEM); } - } else { + } else { /* re-configure */ + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); + txq = dev->data->tx_queues; for (i = nb_queues; i < old_nb_queues; i++) @@ -338,13 +442,14 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, if (RTE_ETH_DEV_SRIOV(dev).active != 0) { /* check multi-queue mode */ - if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) || + if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) || (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) || (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) || (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) { /* SRIOV only works in VMDq enable mode */ - PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, " - "wrong VMDQ mq_mode rx %u tx %u\n", + PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8 + " SRIOV active, " + "wrong VMDQ mq_mode rx %u tx %u\n", port_id, dev_conf->rxmode.mq_mode, dev_conf->txmode.mq_mode); @@ -356,8 +461,9 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, case ETH_MQ_RX_VMDQ_DCB: case ETH_MQ_RX_VMDQ_DCB_RSS: /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ - PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, " - "unsupported VMDQ mq_mode rx %u\n", + PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8 + " SRIOV active, " + "unsupported VMDQ mq_mode rx %u\n", port_id, dev_conf->rxmode.mq_mode); return (-EINVAL); default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */ @@ -371,8 +477,9 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, switch (dev_conf->txmode.mq_mode) { case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV mode, not implement yet */ - PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, " - "unsupported VMDQ mq_mode tx %u\n", + PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8 + " SRIOV active, " + "unsupported VMDQ mq_mode tx %u\n", port_id, dev_conf->txmode.mq_mode); return (-EINVAL); default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ @@ -387,7 +494,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, " - "queue number must less equal to %d\n", + "queue number must less equal to %d\n", port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); return (-EINVAL); } @@ -395,7 +502,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, /* For vmdb+dcb mode check our configuration before we go further */ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { const struct rte_eth_vmdq_dcb_conf *conf; - + if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) { PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q " "!= %d\n", @@ -413,7 +520,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, } if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { const struct rte_eth_vmdq_dcb_tx_conf *conf; - + if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) { PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q " "!= %d\n", @@ -430,11 +537,11 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return (-EINVAL); } } - + /* For DCB mode check our configuration before we go further */ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { const struct rte_eth_dcb_rx_conf *conf; - + if (nb_rx_q != ETH_DCB_NUM_QUEUES) { PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q " "!= %d\n", @@ -451,10 +558,10 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return (-EINVAL); } } - + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { const struct rte_eth_dcb_tx_conf *conf; - + if (nb_tx_q != ETH_DCB_NUM_QUEUES) { PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q " "!= %d\n", @@ -654,12 +761,20 @@ rte_eth_dev_start(uint8_t port_id) PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY); if (port_id >= nb_ports) { - PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id); return (-EINVAL); } dev = &rte_eth_devices[port_id]; FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); + + if (dev->data->dev_started != 0) { + PMD_DEBUG_TRACE("Device with port_id=%" PRIu8 + " already started\n", + port_id); + return (0); + } + diag = (*dev->dev_ops->dev_start)(dev); if (diag == 0) dev->data->dev_started = 1; @@ -681,12 +796,20 @@ rte_eth_dev_stop(uint8_t port_id) PROC_PRIMARY_OR_RET(); if (port_id >= nb_ports) { - PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id); return; } dev = &rte_eth_devices[port_id]; FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); + + if (dev->data->dev_started == 0) { + PMD_DEBUG_TRACE("Device with port_id=%" PRIu8 + " already stopped\n", + port_id); + return; + } + dev->data->dev_started = 0; (*dev->dev_ops->dev_stop)(dev); } @@ -1037,6 +1160,7 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) /* Default device offload capabilities to zero */ dev_info->rx_offload_capa = 0; dev_info->tx_offload_capa = 0; + dev_info->if_index = 0; FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); (*dev->dev_ops->dev_infos_get)(dev, dev_info); dev_info->pci_dev = dev->pci_dev; @@ -1128,7 +1252,7 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) int ret = 0; int mask = 0; int cur, org = 0; - + if (port_id >= nb_ports) { PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); return (-ENODEV); @@ -1143,7 +1267,7 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur; mask |= ETH_VLAN_STRIP_MASK; } - + cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter); if (cur != org){ @@ -1161,7 +1285,7 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) /*no change*/ if(mask == 0) return ret; - + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); (*dev->dev_ops->vlan_offload_set)(dev, mask); @@ -1500,6 +1624,7 @@ int rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf) { struct rte_eth_dev *dev; + uint16_t max_rxq; uint8_t i,j; if (port_id >= nb_ports) { @@ -1513,26 +1638,29 @@ rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf) return (-EINVAL); } + dev = &rte_eth_devices[port_id]; + max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ? + dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE; if (reta_conf->mask_lo != 0) { for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) { if ((reta_conf->mask_lo & (1ULL << i)) && - (reta_conf->reta[i] >= ETH_RSS_RETA_MAX_QUEUE)) { + (reta_conf->reta[i] >= max_rxq)) { PMD_DEBUG_TRACE("RETA hash index output" "configration for port=%d,invalid" "queue=%d\n",port_id,reta_conf->reta[i]); return (-EINVAL); - } + } } } if (reta_conf->mask_hi != 0) { - for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) { + for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) { j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2); /* Check if the max entry >= 128 */ - if ((reta_conf->mask_hi & (1ULL << i)) && - (reta_conf->reta[j] >= ETH_RSS_RETA_MAX_QUEUE)) { + if ((reta_conf->mask_hi & (1ULL << i)) && + (reta_conf->reta[j] >= max_rxq)) { PMD_DEBUG_TRACE("RETA hash index output" "configration for port=%d,invalid" "queue=%d\n",port_id,reta_conf->reta[j]); @@ -1542,17 +1670,15 @@ rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf) } } - dev = &rte_eth_devices[port_id]; - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); return (*dev->dev_ops->reta_update)(dev, reta_conf); } -int +int rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf) { struct rte_eth_dev *dev; - + if (port_id >= nb_ports) { PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); return (-ENODEV); @@ -1568,6 +1694,43 @@ rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf) return (*dev->dev_ops->reta_query)(dev, reta_conf); } +int +rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev *dev; + uint16_t rss_hash_protos; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + rss_hash_protos = rss_conf->rss_hf; + if ((rss_hash_protos != 0) && + ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) { + PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n", + rss_hash_protos); + return (-EINVAL); + } + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); + return (*dev->dev_ops->rss_hash_update)(dev, rss_conf); +} + +int +rte_eth_dev_rss_hash_conf_get(uint8_t port_id, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); + return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf); +} + int rte_eth_led_on(uint8_t port_id) { @@ -1636,7 +1799,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); if (is_zero_ether_addr(addr)) { - PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", + PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id); return (-EINVAL); } @@ -1644,7 +1807,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1); return (-EINVAL); } - + index = get_mac_addr_index(port_id, addr); if (index < 0) { index = get_mac_addr_index(port_id, &null_mac_addr); @@ -1655,7 +1818,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, } } else { pool_mask = dev->data->mac_pool_sel[index]; - + /* Check if both MAC address and pool is alread there, and do nothing */ if (pool_mask & (1ULL << pool)) return 0; @@ -1666,7 +1829,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, /* Update address in NIC data structure */ ether_addr_copy(addr, &dev->data->mac_addrs[index]); - + /* Update pool bitmap in NIC data structure */ dev->data->mac_pool_sel[index] |= (1ULL << pool); @@ -1702,7 +1865,7 @@ rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr) return 0; } -int +int rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf, uint16_t rx_mode, uint8_t on) { @@ -1714,8 +1877,8 @@ rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf, PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n", port_id); return (-ENODEV); - } - + } + dev = &rte_eth_devices[port_id]; rte_eth_dev_info_get(port_id, &dev_info); @@ -1728,7 +1891,7 @@ rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf, if (rx_mode == 0) { PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n"); - return (-EINVAL); + return (-EINVAL); } FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP); return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on); @@ -1764,16 +1927,16 @@ rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr, int index; int ret; struct rte_eth_dev *dev; - + if (port_id >= nb_ports) { PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n", port_id); return (-ENODEV); } - + dev = &rte_eth_devices[port_id]; if (is_zero_ether_addr(addr)) { - PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", + PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id); return (-EINVAL); } @@ -1782,22 +1945,22 @@ rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr, /* Check if it's already there, and do nothing */ if ((index >= 0) && (on)) return 0; - + if (index < 0) { if (!on) { - PMD_DEBUG_TRACE("port %d: the MAC address was not" + PMD_DEBUG_TRACE("port %d: the MAC address was not" "set in UTA\n", port_id); return (-EINVAL); } - + index = get_hash_mac_addr_index(port_id, &null_mac_addr); if (index < 0) { PMD_DEBUG_TRACE("port %d: MAC address array full\n", port_id); return (-ENOSPC); } - } - + } + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); if (ret == 0) { @@ -1805,11 +1968,11 @@ rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr, if (on) ether_addr_copy(addr, &dev->data->hash_mac_addrs[index]); - else + else ether_addr_copy(&null_mac_addr, &dev->data->hash_mac_addrs[index]); } - + return ret; } @@ -1817,20 +1980,20 @@ int rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on) { struct rte_eth_dev *dev; - + if (port_id >= nb_ports) { PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n", port_id); return (-ENODEV); } - + dev = &rte_eth_devices[port_id]; FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); return (*dev->dev_ops->uc_all_hash_table_set)(dev, on); } -int +int rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on) { uint16_t num_vfs; @@ -1841,22 +2004,22 @@ rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on) PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); return (-ENODEV); } - + dev = &rte_eth_devices[port_id]; rte_eth_dev_info_get(port_id, &dev_info); - + num_vfs = dev_info.max_vfs; - if (vf > num_vfs) + if (vf > num_vfs) { PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id); return (-EINVAL); - } - + } + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP); return (*dev->dev_ops->set_vf_rx)(dev, vf,on); } -int +int rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on) { uint16_t num_vfs; @@ -1867,23 +2030,23 @@ rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on) PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id); return (-ENODEV); } - + dev = &rte_eth_devices[port_id]; rte_eth_dev_info_get(port_id, &dev_info); num_vfs = dev_info.max_vfs; - if (vf > num_vfs) + if (vf > num_vfs) { PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf); return (-EINVAL); } - + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP); return (*dev->dev_ops->set_vf_tx)(dev, vf,on); } int -rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, +rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, uint64_t vf_mask,uint8_t vlan_on) { struct rte_eth_dev *dev; @@ -1906,14 +2069,14 @@ rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n"); return (-EINVAL); } - + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP); return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id, vf_mask,vlan_on); } int -rte_eth_mirror_rule_set(uint8_t port_id, +rte_eth_mirror_rule_set(uint8_t port_id, struct rte_eth_vmdq_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on) { @@ -1923,25 +2086,25 @@ rte_eth_mirror_rule_set(uint8_t port_id, PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); return (-ENODEV); } - + if (mirror_conf->rule_type_mask == 0) { PMD_DEBUG_TRACE("mirror rule type can not be 0.\n"); return (-EINVAL); } - + if (mirror_conf->dst_pool >= ETH_64_POOLS) { PMD_DEBUG_TRACE("Invalid dst pool, pool id must" "be 0-%d\n",ETH_64_POOLS - 1); return (-EINVAL); } - - if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) && + + if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) && (mirror_conf->pool_mask == 0)) { PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not" - "be 0.\n"); + "be 0.\n"); return (-EINVAL); } - + if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE) { PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n", @@ -2031,7 +2194,7 @@ rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id) } dev = &rte_eth_devices[port_id]; FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP); - return (*dev->dev_ops->rx_queue_count)(dev, queue_id); + return (*dev->dev_ops->rx_queue_count)(dev, queue_id); } int