X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ether%2Frte_ethdev.c;h=f008dfa204567c26bc12134f5b78ddb1b4e7db54;hb=e9d48c0072d36eb6423b45fba4ec49d0def6c36f;hp=fbe2286895050cf5e1c7e220596d3bb577d1cf23;hpb=02331c16ec0ba97b4716ddc4a68d2b3c1f15da0b;p=dpdk.git diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index fbe2286895..f008dfa204 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -152,7 +152,7 @@ rte_eth_dev_data_alloc(void) RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data)); } -static inline struct rte_eth_dev * +struct rte_eth_dev * rte_eth_dev_allocate(void) { struct rte_eth_dev *eth_dev; @@ -1037,7 +1037,8 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); (*dev->dev_ops->dev_infos_get)(dev, dev_info); dev_info->pci_dev = dev->pci_dev; - dev_info->driver_name = dev->driver->pci_drv.name; + if (dev->driver) + dev_info->driver_name = dev->driver->pci_drv.name; } void @@ -1618,10 +1619,11 @@ static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}}; int rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, - uint32_t pool) + uint32_t pool) { struct rte_eth_dev *dev; int index; + uint64_t pool_mask; if (port_id >= nb_ports) { PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); @@ -1631,19 +1633,29 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); if (is_zero_ether_addr(addr)) { - PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id); + PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", + port_id); return (-EINVAL); } - - /* Check if it's already there, and do nothing */ + if (pool >= ETH_64_POOLS) { + PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1); + return (-EINVAL); + } + index = get_mac_addr_index(port_id, addr); - if (index >= 0) - return 0; - - index = get_mac_addr_index(port_id, &null_mac_addr); if (index < 0) { - PMD_DEBUG_TRACE("port %d: MAC address array full\n", port_id); - return (-ENOSPC); + index = get_mac_addr_index(port_id, &null_mac_addr); + if (index < 0) { + PMD_DEBUG_TRACE("port %d: MAC address array full\n", + port_id); + return (-ENOSPC); + } + } else { + pool_mask = dev->data->mac_pool_sel[index]; + + /* Check if both MAC address and pool is alread there, and do nothing */ + if (pool_mask & (1ULL << pool)) + return 0; } /* Update NIC */ @@ -1651,6 +1663,9 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, /* Update address in NIC data structure */ ether_addr_copy(addr, &dev->data->mac_addrs[index]); + + /* Update pool bitmap in NIC data structure */ + dev->data->mac_pool_sel[index] |= (1ULL << pool); return 0; } @@ -1684,6 +1699,282 @@ rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr) return 0; } +int +rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf, + uint16_t rx_mode, uint8_t on) +{ + uint16_t num_vfs; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n", + port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + rte_eth_dev_info_get(port_id, &dev_info); + + num_vfs = dev_info.max_vfs; + if (vf > num_vfs) + { + PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf); + return (-EINVAL); + } + if (rx_mode == 0) + { + PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n"); + return (-EINVAL); + } + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP); + return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on); +} + +/* + * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find + * an empty spot. + */ +static inline int +get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + unsigned i; + + rte_eth_dev_info_get(port_id, &dev_info); + if (!dev->data->hash_mac_addrs) + return -1; + + for (i = 0; i < dev_info.max_hash_mac_addrs; i++) + if (memcmp(addr, &dev->data->hash_mac_addrs[i], + ETHER_ADDR_LEN) == 0) + return i; + + return -1; +} + +int +rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr, + uint8_t on) +{ + int index; + int ret; + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n", + port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + if (is_zero_ether_addr(addr)) { + PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", + port_id); + return (-EINVAL); + } + + index = get_hash_mac_addr_index(port_id, addr); + /* Check if it's already there, and do nothing */ + if ((index >= 0) && (on)) + return 0; + + if (index < 0) { + if (!on) { + PMD_DEBUG_TRACE("port %d: the MAC address was not" + "set in UTA\n", port_id); + return (-EINVAL); + } + + index = get_hash_mac_addr_index(port_id, &null_mac_addr); + if (index < 0) { + PMD_DEBUG_TRACE("port %d: MAC address array full\n", + port_id); + return (-ENOSPC); + } + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); + ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); + if (ret == 0) { + /* Update address in NIC data structure */ + if (on) + ether_addr_copy(addr, + &dev->data->hash_mac_addrs[index]); + else + ether_addr_copy(&null_mac_addr, + &dev->data->hash_mac_addrs[index]); + } + + return ret; +} + +int +rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n", + port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); + return (*dev->dev_ops->uc_all_hash_table_set)(dev, on); +} + +int +rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on) +{ + uint16_t num_vfs; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + rte_eth_dev_info_get(port_id, &dev_info); + + num_vfs = dev_info.max_vfs; + if (vf > num_vfs) + { + PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP); + return (*dev->dev_ops->set_vf_rx)(dev, vf,on); +} + +int +rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on) +{ + uint16_t num_vfs; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + rte_eth_dev_info_get(port_id, &dev_info); + + num_vfs = dev_info.max_vfs; + if (vf > num_vfs) + { + PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP); + return (*dev->dev_ops->set_vf_tx)(dev, vf,on); +} + +int +rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, + uint64_t vf_mask,uint8_t vlan_on) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n", + port_id); + return (-ENODEV); + } + dev = &rte_eth_devices[port_id]; + + if(vlan_id > ETHER_MAX_VLAN_ID) + { + PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n", + vlan_id); + return (-EINVAL); + } + if (vf_mask == 0) + { + PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n"); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP); + return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id, + vf_mask,vlan_on); +} + +int +rte_eth_mirror_rule_set(uint8_t port_id, + struct rte_eth_vmdq_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if (mirror_conf->rule_type_mask == 0) { + PMD_DEBUG_TRACE("mirror rule type can not be 0.\n"); + return (-EINVAL); + } + + if (mirror_conf->dst_pool >= ETH_64_POOLS) { + PMD_DEBUG_TRACE("Invalid dst pool, pool id must" + "be 0-%d\n",ETH_64_POOLS - 1); + return (-EINVAL); + } + + if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) && + (mirror_conf->pool_mask == 0)) { + PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not" + "be 0.\n"); + return (-EINVAL); + } + + if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE) + { + PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n", + ETH_VMDQ_NUM_MIRROR_RULE - 1); + return (-EINVAL); + } + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP); + + return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on); +} + +int +rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE) + { + PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n", + ETH_VMDQ_NUM_MIRROR_RULE-1); + return (-EINVAL); + } + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP); + + return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id); +} + #ifdef RTE_LIBRTE_ETHDEV_DEBUG uint16_t rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id, @@ -1861,3 +2152,182 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev, } rte_spinlock_unlock(&rte_eth_dev_cb_lock); } +#ifdef RTE_NIC_BYPASS +int rte_eth_dev_bypass_init(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP); + (*dev->dev_ops->bypass_init)(dev); + return 0; +} + +int +rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP); + (*dev->dev_ops->bypass_state_show)(dev, state); + return 0; +} + +int +rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP); + (*dev->dev_ops->bypass_state_set)(dev, new_state); + return 0; +} + +int +rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP); + (*dev->dev_ops->bypass_event_show)(dev, event, state); + return 0; +} + +int +rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP); + (*dev->dev_ops->bypass_event_set)(dev, event, state); + return 0; +} + +int +rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP); + (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout); + return 0; +} + +int +rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP); + (*dev->dev_ops->bypass_ver_show)(dev, ver); + return 0; +} + +int +rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP); + (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout); + return 0; +} + +int +rte_eth_dev_bypass_wd_reset(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP); + (*dev->dev_ops->bypass_wd_reset)(dev); + return 0; +} +#endif