X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_rxtx.c;h=b07417024b6623dd97c9db07ef839f1badf3ec66;hb=09d9ae1ac9820c216991edc0d3c853fe28d37a66;hp=e1152ff0ea58f22ac64093182efddc9badf43d0d;hpb=054d1be48cc114c5d3bf87c7ebdf46703876e8d5;p=dpdk.git diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index e1152ff0ea..b07417024b 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -69,7 +69,7 @@ I40E_TX_IEEE1588_TMST) #define I40E_TX_OFFLOAD_NOTSUP_MASK \ - ~(PKT_TX_OFFLOAD_MASK & I40E_TX_OFFLOAD_MASK) + (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK) static inline void i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) @@ -307,7 +307,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags, break; case PKT_TX_SCTP_CKSUM: *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct sctp_hdr) >> 2) << + *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case PKT_TX_UDP_CKSUM: @@ -889,17 +889,17 @@ i40e_recv_scattered_pkts(void *rx_queue, */ rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt_len -= ETHER_CRC_LEN; - if (rx_packet_len <= ETHER_CRC_LEN) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (rx_packet_len <= RTE_ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); first_seg->nb_segs--; last_seg->data_len = (uint16_t)(last_seg->data_len - - (ETHER_CRC_LEN - rx_packet_len)); + (RTE_ETHER_CRC_LEN - rx_packet_len)); last_seg->next = NULL; } else rxm->data_len = (uint16_t)(rx_packet_len - - ETHER_CRC_LEN); + RTE_ETHER_CRC_LEN); } first_seg->port = rxq->port_id; @@ -1718,7 +1718,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM); int use_scattered_rx = - ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size); + (rxq->max_pkt_len > buf_size); if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) { PMD_DRV_LOG(ERR, @@ -1839,7 +1839,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = reg_idx; rxq->port_id = dev->data->port_id; if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) - rxq->crc_len = ETHER_CRC_LEN; + rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; rxq->drop_en = rx_conf->rx_drop_en; @@ -2169,15 +2169,30 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, * - tx_rs_thresh must be a divisor of the ring size. * - tx_free_thresh must be greater than 0. * - tx_free_thresh must be less than the size of the ring minus 3. + * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc. * * One descriptor in the TX ring is used as a sentinel to avoid a H/W * race condition, hence the maximum threshold constraints. When set * to zero use default values. */ - tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? - tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; + if (tx_conf->tx_rs_thresh > 0) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " "number of TX descriptors minus 2. " @@ -2423,13 +2438,13 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) struct rte_eth_dev *dev; uint16_t i; - dev = &rte_eth_devices[txq->port_id]; - if (!txq || !txq->sw_ring) { - PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); + PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL"); return; } + dev = &rte_eth_devices[txq->port_id]; + /** * vPMD tx will not set sw_ring's mbuf to NULL after free, * so need to free remains more carefully. @@ -2619,23 +2634,23 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len; rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len); if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - if (rxq->max_pkt_len <= ETHER_MAX_LEN || + if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN || rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must " "be larger than %u and smaller than %u," "as jumbo frame is enabled", - (uint32_t)ETHER_MAX_LEN, + (uint32_t)RTE_ETHER_MAX_LEN, (uint32_t)I40E_FRAME_SIZE_MAX); return I40E_ERR_CONFIG; } } else { - if (rxq->max_pkt_len < ETHER_MIN_LEN || - rxq->max_pkt_len > ETHER_MAX_LEN) { + if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN || + rxq->max_pkt_len > RTE_ETHER_MAX_LEN) { PMD_DRV_LOG(ERR, "maximum packet length must be " "larger than %u and smaller than %u, " "as jumbo frame is disabled", - (uint32_t)ETHER_MIN_LEN, - (uint32_t)ETHER_MAX_LEN); + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); return I40E_ERR_CONFIG; } } @@ -2708,9 +2723,8 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) RTE_PKTMBUF_HEADROOM); /* Check if scattered RX needs to be used. */ - if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { + if (rxq->max_pkt_len > buf_size) dev_data->scattered_rx = 1; - } /* Init the RX tail regieter. */ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); @@ -2753,7 +2767,6 @@ i40e_dev_free_queues(struct rte_eth_dev *dev) i40e_dev_rx_queue_release(dev->data->rx_queues[i]); dev->data->rx_queues[i] = NULL; } - dev->data->nb_rx_queues = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { if (!dev->data->tx_queues[i]) @@ -2761,7 +2774,6 @@ i40e_dev_free_queues(struct rte_eth_dev *dev) i40e_dev_tx_queue_release(dev->data->tx_queues[i]); dev->data->tx_queues[i] = NULL; } - dev->data->nb_tx_queues = 0; } #define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC @@ -3184,7 +3196,7 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev) } } -/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */ +/* Stubs needed for linkage when CONFIG_RTE_LIBRTE_I40E_INC_VECTOR is set to 'n' */ __rte_weak int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) {