X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k_ethdev.c;h=421266ba32d261cc3c92988d45f230ef63b0a704;hb=6069d815bc4dd73e82396a607882fe8395e592ed;hp=7f5c852c9739993cce950fb43972f20493604fbb;hpb=06977caf347018647dd763fdc91171671c3541a6;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index 7f5c852c97..421266ba32 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -55,9 +55,6 @@ #define CHARS_PER_UINT32 (sizeof(uint32_t)) #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1) -#define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - static void fm10k_close_mbx_service(struct fm10k_hw *hw); static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev); static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev); @@ -132,6 +129,65 @@ fm10k_mbx_unlock(struct fm10k_hw *hw) rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)); } +/* Stubs needed for linkage when vPMD is disabled */ +int __attribute__((weak)) +fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev) +{ + return -1; +} + +uint16_t __attribute__((weak)) +fm10k_recv_pkts_vec( + __rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +fm10k_recv_scattered_pkts_vec( + __rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int __attribute__((weak)) +fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq) + +{ + return -1; +} + +void __attribute__((weak)) +fm10k_rx_queue_release_mbufs_vec( + __rte_unused struct fm10k_rx_queue *rxq) +{ + return; +} + +void __attribute__((weak)) +fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq) +{ + return; +} + +int __attribute__((weak)) +fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq) +{ + return -1; +} + +uint16_t __attribute__((weak)) +fm10k_xmit_pkts_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + /* * reset queue to initial state, allocate software buffers used when starting * device. @@ -1294,12 +1350,12 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */ PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode"); - return (-EINVAL); + return -EINVAL; } if (vlan_id > ETH_VLAN_ID_MAX) { PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096"); - return (-EINVAL); + return -EINVAL; } vid_idx = FM10K_VFTA_IDX(vlan_id); @@ -1311,7 +1367,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) { PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing " "in the VLAN filter table"); - return (-EINVAL); + return -EINVAL; } fm10k_mbx_lock(hw); @@ -1319,7 +1375,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) fm10k_mbx_unlock(hw); if (result != FM10K_SUCCESS) { PMD_INIT_LOG(ERR, "VLAN update failed: %d", result); - return (-EIO); + return -EIO; } for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) && @@ -1340,7 +1396,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } if (result != FM10K_SUCCESS) { PMD_INIT_LOG(ERR, "MAC address update failed: %d", result); - return (-EIO); + return -EIO; } if (on) { @@ -1523,7 +1579,7 @@ handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf) rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q), FM10K_RX_FREE_THRESH_MIN(q), FM10K_RX_FREE_THRESH_DIV(q)); - return (-EINVAL); + return -EINVAL; } q->alloc_thresh = rx_free_thresh; @@ -1579,7 +1635,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, /* make sure the mempool element size can account for alignment. */ if (!mempool_element_size_valid(mp)) { PMD_INIT_LOG(ERR, "Error : Mempool element size is too small"); - return (-EINVAL); + return -EINVAL; } /* make sure a valid number of descriptors have been requested */ @@ -1591,7 +1647,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, "and a multiple of %u", nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC, FM10K_MULT_RX_DESC); - return (-EINVAL); + return -EINVAL; } /* @@ -1609,7 +1665,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, socket_id); if (q == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); - return (-ENOMEM); + return -ENOMEM; } /* setup queue */ @@ -1621,7 +1677,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)]; if (handle_rxconf(q, conf)) - return (-EINVAL); + return -EINVAL; /* allocate memory for the software ring */ q->sw_ring = rte_zmalloc_socket("fm10k sw ring", @@ -1630,7 +1686,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, if (q->sw_ring == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate software ring"); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } /* @@ -1645,7 +1701,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, PMD_INIT_LOG(ERR, "Cannot allocate hardware ring"); rte_free(q->sw_ring); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } q->hw_ring = mz->addr; q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); @@ -1697,7 +1753,7 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q), FM10K_TX_FREE_THRESH_MIN(q), FM10K_TX_FREE_THRESH_DIV(q)); - return (-EINVAL); + return -EINVAL; } q->free_thresh = tx_free_thresh; @@ -1721,7 +1777,7 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q), FM10K_TX_RS_THRESH_MIN(q), FM10K_TX_RS_THRESH_DIV(q)); - return (-EINVAL); + return -EINVAL; } q->rs_thresh = tx_rs_thresh; @@ -1749,7 +1805,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, "and a multiple of %u", nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC, FM10K_MULT_TX_DESC); - return (-EINVAL); + return -EINVAL; } /* @@ -1769,7 +1825,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, socket_id); if (q == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); - return (-ENOMEM); + return -ENOMEM; } /* setup queue */ @@ -1781,7 +1837,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)]; if (handle_txconf(q, conf)) - return (-EINVAL); + return -EINVAL; /* allocate memory for the software ring */ q->sw_ring = rte_zmalloc_socket("fm10k sw ring", @@ -1790,7 +1846,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, if (q->sw_ring == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate software ring"); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } /* @@ -1805,7 +1861,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, PMD_INIT_LOG(ERR, "Cannot allocate hardware ring"); rte_free(q->sw_ring); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } q->hw_ring = mz->addr; q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); @@ -1822,7 +1878,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker"); rte_free(q->sw_ring); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } dev->data->tx_queues[queue_id] = q; @@ -2394,21 +2450,24 @@ fm10k_set_tx_function(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) != - FM10K_SIMPLE_TX_FLAG) { + /* Check if Vector Tx is satisfied */ + if (fm10k_tx_vec_condition_check(txq)) { use_sse = 0; break; } } if (use_sse) { + PMD_INIT_LOG(DEBUG, "Use vector Tx func"); for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; fm10k_txq_vec_setup(txq); } dev->tx_pkt_burst = fm10k_xmit_pkts_vec; - } else + } else { dev->tx_pkt_burst = fm10k_xmit_pkts; + PMD_INIT_LOG(DEBUG, "Use regular Tx func"); + } } static void __attribute__((cold)) @@ -2427,11 +2486,18 @@ fm10k_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = fm10k_recv_pkts_vec; } else if (dev->data->scattered_rx) dev->rx_pkt_burst = fm10k_recv_scattered_pkts; + else + dev->rx_pkt_burst = fm10k_recv_pkts; rx_using_sse = (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec || dev->rx_pkt_burst == fm10k_recv_pkts_vec); + if (rx_using_sse) + PMD_INIT_LOG(DEBUG, "Use vector Rx func"); + else + PMD_INIT_LOG(DEBUG, "Use regular Rx func"); + for (i = 0; i < dev->data->nb_rx_queues; i++) { struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];