X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_rxtx.c;h=dd520cd82cc8508aaf52133572649a589be5b4f2;hb=185fe122f4899f48569d0086c9dcacc431ef0967;hp=a3776a0d7b2d2b9e197ab6a9a351c620a99e6656;hpb=18aee2861a1f8c5f7511fed32ecc59295c26f79a;p=dpdk.git diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index a3776a0d7b..dd520cd82c 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -50,6 +50,10 @@ #endif /* Bit Mask to indicate what bits required for building TX context */ #define IGB_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ PKT_TX_VLAN_PKT | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ @@ -285,17 +289,20 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq, case PKT_TX_UDP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP | E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_udp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; break; case PKT_TX_TCP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP | E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_tcp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; break; case PKT_TX_SCTP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP | E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_sctp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; break; default: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV | @@ -313,7 +320,7 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq, vlan_macip_lens = (uint32_t)tx_offload.data; ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); - ctx_txd->seqnum_seed = 0; + ctx_txd->u.seqnum_seed = 0; } /* @@ -625,25 +632,25 @@ eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if ((m->tso_segsz > IGB_TSO_MAX_MSS) || (m->l2_len + m->l3_len + m->l4_len > IGB_TSO_MAX_HDRLEN)) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -1143,17 +1150,17 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt_len -= ETHER_CRC_LEN; - if (data_len <= ETHER_CRC_LEN) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (data_len <= RTE_ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); first_seg->nb_segs--; last_seg->data_len = (uint16_t) (last_seg->data_len - - (ETHER_CRC_LEN - data_len)); + (RTE_ETHER_CRC_LEN - data_len)); last_seg->next = NULL; } else - rxm->data_len = - (uint16_t) (data_len - ETHER_CRC_LEN); + rxm->data_len = (uint16_t) + (data_len - RTE_ETHER_CRC_LEN); } /* @@ -1288,113 +1295,107 @@ igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt) uint16_t tx_id; /* Current segment being processed. */ uint16_t tx_last; /* Last segment in the current packet. */ uint16_t tx_next; /* First segment of the next packet. */ - int count; + int count = 0; - if (txq != NULL) { - count = 0; - sw_ring = txq->sw_ring; - txr = txq->tx_ring; + if (!txq) + return -ENODEV; - /* - * tx_tail is the last sent packet on the sw_ring. Goto the end - * of that packet (the last segment in the packet chain) and - * then the next segment will be the start of the oldest segment - * in the sw_ring. This is the first packet that will be - * attempted to be freed. - */ + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + + /* tx_tail is the last sent packet on the sw_ring. Goto the end + * of that packet (the last segment in the packet chain) and + * then the next segment will be the start of the oldest segment + * in the sw_ring. This is the first packet that will be + * attempted to be freed. + */ - /* Get last segment in most recently added packet. */ - tx_first = sw_ring[txq->tx_tail].last_id; + /* Get last segment in most recently added packet. */ + tx_first = sw_ring[txq->tx_tail].last_id; - /* Get the next segment, which is the oldest segment in ring. */ - tx_first = sw_ring[tx_first].next_id; + /* Get the next segment, which is the oldest segment in ring. */ + tx_first = sw_ring[tx_first].next_id; - /* Set the current index to the first. */ - tx_id = tx_first; + /* Set the current index to the first. */ + tx_id = tx_first; - /* - * Loop through each packet. For each packet, verify that an - * mbuf exists and that the last segment is free. If so, free - * it and move on. - */ - while (1) { - tx_last = sw_ring[tx_id].last_id; - - if (sw_ring[tx_last].mbuf) { - if (txr[tx_last].wb.status & - E1000_TXD_STAT_DD) { - /* - * Increment the number of packets - * freed. - */ - count++; - - /* Get the start of the next packet. */ - tx_next = sw_ring[tx_last].next_id; - - /* - * Loop through all segments in a - * packet. - */ - do { - rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf); + /* Loop through each packet. For each packet, verify that an + * mbuf exists and that the last segment is free. If so, free + * it and move on. + */ + while (1) { + tx_last = sw_ring[tx_id].last_id; + + if (sw_ring[tx_last].mbuf) { + if (txr[tx_last].wb.status & + E1000_TXD_STAT_DD) { + /* Increment the number of packets + * freed. + */ + count++; + + /* Get the start of the next packet. */ + tx_next = sw_ring[tx_last].next_id; + + /* Loop through all segments in a + * packet. + */ + do { + if (sw_ring[tx_id].mbuf) { + rte_pktmbuf_free_seg( + sw_ring[tx_id].mbuf); sw_ring[tx_id].mbuf = NULL; sw_ring[tx_id].last_id = tx_id; + } - /* Move to next segemnt. */ - tx_id = sw_ring[tx_id].next_id; + /* Move to next segemnt. */ + tx_id = sw_ring[tx_id].next_id; - } while (tx_id != tx_next); + } while (tx_id != tx_next); - if (unlikely(count == (int)free_cnt)) - break; - } else - /* - * mbuf still in use, nothing left to - * free. - */ + if (unlikely(count == (int)free_cnt)) break; } else { - /* - * There are multiple reasons to be here: - * 1) All the packets on the ring have been - * freed - tx_id is equal to tx_first - * and some packets have been freed. - * - Done, exit - * 2) Interfaces has not sent a rings worth of - * packets yet, so the segment after tail is - * still empty. Or a previous call to this - * function freed some of the segments but - * not all so there is a hole in the list. - * Hopefully this is a rare case. - * - Walk the list and find the next mbuf. If - * there isn't one, then done. + /* mbuf still in use, nothing left to + * free. */ - if (likely((tx_id == tx_first) && (count != 0))) - break; + break; + } + } else { + /* There are multiple reasons to be here: + * 1) All the packets on the ring have been + * freed - tx_id is equal to tx_first + * and some packets have been freed. + * - Done, exit + * 2) Interfaces has not sent a rings worth of + * packets yet, so the segment after tail is + * still empty. Or a previous call to this + * function freed some of the segments but + * not all so there is a hole in the list. + * Hopefully this is a rare case. + * - Walk the list and find the next mbuf. If + * there isn't one, then done. + */ + if (likely(tx_id == tx_first && count != 0)) + break; - /* - * Walk the list and find the next mbuf, if any. - */ - do { - /* Move to next segemnt. */ - tx_id = sw_ring[tx_id].next_id; + /* Walk the list and find the next mbuf, if any. */ + do { + /* Move to next segemnt. */ + tx_id = sw_ring[tx_id].next_id; - if (sw_ring[tx_id].mbuf) - break; + if (sw_ring[tx_id].mbuf) + break; - } while (tx_id != tx_first); + } while (tx_id != tx_first); - /* - * Determine why previous loop bailed. If there - * is not an mbuf, done. - */ - if (sw_ring[tx_id].mbuf == NULL) - break; - } + /* Determine why previous loop bailed. If there + * is not an mbuf, done. + */ + if (!sw_ring[tx_id].mbuf) + break; } - } else - count = -ENODEV; + } return count; } @@ -1452,43 +1453,28 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev) { - uint64_t rx_offload_capa; + uint64_t tx_offload_capa; RTE_SET_USED(dev); - rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; - return rx_offload_capa; + return tx_offload_capa; } uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) { - uint64_t rx_queue_offload_capa; + uint64_t tx_queue_offload_capa; - rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); + tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); - return rx_queue_offload_capa; -} - -static int -igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) -{ - uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; - uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev); - uint64_t port_supported = igb_get_tx_port_offloads_capa(dev); - - if ((requested & (queue_supported | port_supported)) != requested) - return 0; - - if ((port_offloads ^ requested) & port_supported) - return 0; - - return 1; + return tx_queue_offload_capa; } int @@ -1502,19 +1488,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, struct igb_tx_queue *txq; struct e1000_hw *hw; uint32_t size; + uint64_t offloads; - if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) { - PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64 - " don't match port offloads 0x%" PRIx64 - " or supported port offloads 0x%" PRIx64 - " or supported queue offloads 0x%" PRIx64, - (void *)dev, - tx_conf->offloads, - dev->data->dev_conf.txmode.offloads, - igb_get_tx_port_offloads_capa(dev), - igb_get_tx_queue_offloads_capa(dev)); - return -ENOTSUP; - } + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1599,7 +1575,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, dev->tx_pkt_burst = eth_igb_xmit_pkts; dev->tx_pkt_prepare = ð_igb_prep_pkts; dev->data->tx_queues[queue_idx] = txq; - txq->offloads = tx_conf->offloads; + txq->offloads = offloads; return 0; } @@ -1655,16 +1631,24 @@ uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev) { uint64_t rx_offload_capa; + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - RTE_SET_USED(dev); rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_VLAN_FILTER | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_CRC_STRIP | - DEV_RX_OFFLOAD_SCATTER; + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_RSS_HASH; + + if (hw->mac.type == e1000_i350 || + hw->mac.type == e1000_i210 || + hw->mac.type == e1000_i211) + rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND; return rx_offload_capa; } @@ -1690,22 +1674,6 @@ igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) return rx_queue_offload_capa; } -static int -igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) -{ - uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; - uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev); - uint64_t port_supported = igb_get_rx_port_offloads_capa(dev); - - if ((requested & (queue_supported | port_supported)) != requested) - return 0; - - if ((port_offloads ^ requested) & port_supported) - return 0; - - return 1; -} - int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1718,19 +1686,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, struct igb_rx_queue *rxq; struct e1000_hw *hw; unsigned int size; + uint64_t offloads; - if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) { - PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64 - " don't match port offloads 0x%" PRIx64 - " or supported port offloads 0x%" PRIx64 - " or supported queue offloads 0x%" PRIx64, - (void *)dev, - rx_conf->offloads, - dev->data->dev_conf.rxmode.offloads, - igb_get_rx_port_offloads_capa(dev), - igb_get_rx_queue_offloads_capa(dev)); - return -ENOTSUP; - } + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1756,7 +1714,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (rxq == NULL) return -ENOMEM; - rxq->offloads = rx_conf->offloads; + rxq->offloads = offloads; rxq->mb_pool = mp; rxq->nb_rx_desc = nb_desc; rxq->pthresh = rx_conf->rx_thresh.pthresh; @@ -1771,8 +1729,10 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -1925,12 +1885,14 @@ igb_dev_free_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { eth_igb_rx_queue_release(dev->data->rx_queues[i]); dev->data->rx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "rx_ring", i); } dev->data->nb_rx_queues = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { eth_igb_tx_queue_release(dev->data->tx_queues[i]); dev->data->tx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "tx_ring", i); } dev->data->nb_tx_queues = 0; } @@ -2422,8 +2384,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx), @@ -2552,10 +2516,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) { - rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ - /* set STRCRC bit in all queues */ + /* clear STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2564,14 +2528,14 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr |= E1000_DVMOLR_STRCRC; + dvmolr &= ~E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } } else { - rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - /* clear STRCRC bit in all queues */ + /* set STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2580,7 +2544,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr &= ~E1000_DVMOLR_STRCRC; + dvmolr |= E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } @@ -2898,11 +2862,17 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, } int -igb_rss_conf_init(struct igb_rte_flow_rss_conf *out, +igb_rss_conf_init(struct rte_eth_dev *dev, + struct igb_rte_flow_rss_conf *out, const struct rte_flow_action_rss *in) { + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (in->key_len > RTE_DIM(out->key) || - in->queue_num > RTE_DIM(out->queue)) + ((hw->mac.type == e1000_82576) && + (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) || + ((hw->mac.type != e1000_82576) && + (in->queue_num > IGB_MAX_RX_QUEUE_NUM))) return -EINVAL; out->conf = (struct rte_flow_action_rss){ .func = in->func, @@ -2991,7 +2961,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev, rss_conf.rss_key = rss_intel_key; /* Default hash key */ igb_hw_rss_hash_set(hw, &rss_conf); - if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf)) + if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf)) return -EINVAL; return 0;