X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_rxtx.c;h=871b5f3b92c184483461ff46cb3c617b813b7fdf;hb=e73e3547ce54d7ae48dff82d87efac0b7a30692a;hp=009f0ea798043b0758a856029a6d20488a5c21cc;hpb=bf3d3ecc34c79946aa9f98593bf1496a1e81709f;p=dpdk.git diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 009f0ea798..871b5f3b92 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -50,6 +50,10 @@ #endif /* Bit Mask to indicate what bits required for building TX context */ #define IGB_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ PKT_TX_VLAN_PKT | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ @@ -107,6 +111,7 @@ struct igb_rx_queue { uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ uint32_t flags; /**< RX flags. */ + uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */ }; /** @@ -180,6 +185,7 @@ struct igb_tx_queue { /**< Start context position for transmit queue. */ struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -283,17 +289,20 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq, case PKT_TX_UDP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP | E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_udp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; break; case PKT_TX_TCP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP | E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_tcp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; break; case PKT_TX_SCTP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP | E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_sctp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; break; default: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV | @@ -1141,17 +1150,17 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt_len -= ETHER_CRC_LEN; - if (data_len <= ETHER_CRC_LEN) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (data_len <= RTE_ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); first_seg->nb_segs--; last_seg->data_len = (uint16_t) (last_seg->data_len - - (ETHER_CRC_LEN - data_len)); + (RTE_ETHER_CRC_LEN - data_len)); last_seg->next = NULL; } else - rxm->data_len = - (uint16_t) (data_len - ETHER_CRC_LEN); + rxm->data_len = (uint16_t) + (data_len - RTE_ETHER_CRC_LEN); } /* @@ -1447,6 +1456,33 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) igb_reset_tx_queue_stat(txq); } +uint64_t +igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + return tx_offload_capa; +} + +uint64_t +igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_queue_offload_capa; + + tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); + + return tx_queue_offload_capa; +} + int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1458,6 +1494,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, struct igb_tx_queue *txq; struct e1000_hw *hw; uint32_t size; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1542,6 +1581,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, dev->tx_pkt_burst = eth_igb_xmit_pkts; dev->tx_pkt_prepare = ð_igb_prep_pkts; dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; return 0; } @@ -1593,6 +1633,45 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq) rxq->pkt_last_seg = NULL; } +uint64_t +igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + + RTE_SET_USED(dev); + rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER; + + return rx_offload_capa; +} + +uint64_t +igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_queue_offload_capa; + + switch (hw->mac.type) { + case e1000_vfadapt_i350: + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev); + break; + default: + rx_queue_offload_capa = 0; + } + return rx_queue_offload_capa; +} + int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1605,6 +1684,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, struct igb_rx_queue *rxq; struct e1000_hw *hw; unsigned int size; + uint64_t offloads; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1630,6 +1712,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (rxq == NULL) return -ENOMEM; + rxq->offloads = offloads; rxq->mb_pool = mp; rxq->nb_rx_desc = nb_desc; rxq->pthresh = rx_conf->rx_thresh.pthresh; @@ -1644,8 +1727,10 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : - ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -2227,6 +2312,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev) int eth_igb_rx_init(struct rte_eth_dev *dev) { + struct rte_eth_rxmode *rxmode; struct e1000_hw *hw; struct igb_rx_queue *rxq; uint32_t rctl; @@ -2247,10 +2333,12 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + rxmode = &dev->data->dev_conf.rxmode; + /* * Configure support of jumbo frames, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { rctl |= E1000_RCTL_LPE; /* @@ -2292,9 +2380,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = - (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? - 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx), @@ -2362,7 +2451,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl); } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; @@ -2406,19 +2495,27 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxcsum |= E1000_RXCSUM_PCSD; /* Enable both L3/L4 rx checksum offload */ - if (dev->data->dev_conf.rxmode.hw_ip_checksum) - rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | - E1000_RXCSUM_CRCOFL); + if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rxcsum |= E1000_RXCSUM_IPOFL; + else + rxcsum &= ~E1000_RXCSUM_IPOFL; + if (rxmode->offloads & + (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) + rxcsum |= E1000_RXCSUM_TUOFL; + else + rxcsum &= ~E1000_RXCSUM_TUOFL; + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + rxcsum |= E1000_RXCSUM_CRCOFL; else - rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | - E1000_RXCSUM_CRCOFL); + rxcsum &= ~E1000_RXCSUM_CRCOFL; + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) { - rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ - /* set STRCRC bit in all queues */ + /* clear STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2427,14 +2524,14 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr |= E1000_DVMOLR_STRCRC; + dvmolr &= ~E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } } else { - rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - /* clear STRCRC bit in all queues */ + /* set STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2443,7 +2540,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr &= ~E1000_DVMOLR_STRCRC; + dvmolr |= E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } @@ -2654,7 +2751,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; @@ -2741,6 +2838,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.offloads = rxq->offloads; } void @@ -2756,6 +2854,47 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.pthresh = txq->pthresh; qinfo->conf.tx_thresh.hthresh = txq->hthresh; qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.offloads = txq->offloads; +} + +int +igb_rss_conf_init(struct rte_eth_dev *dev, + struct igb_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (in->key_len > RTE_DIM(out->key) || + ((hw->mac.type == e1000_82576) && + (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) || + ((hw->mac.type != e1000_82576) && + (in->queue_num > IGB_MAX_RX_QUEUE_NUM))) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +igb_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); } int @@ -2764,7 +2903,12 @@ igb_config_rss_filter(struct rte_eth_dev *dev, { uint32_t shift; uint16_t i, j; - struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -2772,8 +2916,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev, hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!add) { - if (memcmp(conf, &filter_info->rss_info, - sizeof(struct igb_rte_flow_rss_conf)) == 0) { + if (igb_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { igb_rss_disable(dev); memset(&filter_info->rss_info, 0, sizeof(struct igb_rte_flow_rss_conf)); @@ -2782,7 +2926,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev, return -EINVAL; } - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) return -EINVAL; /* Fill in redirection table. */ @@ -2794,9 +2938,9 @@ igb_config_rss_filter(struct rte_eth_dev *dev, } reta; uint8_t q_idx; - if (j == conf->num) + if (j == conf->conf.queue_num) j = 0; - q_idx = conf->queue[j]; + q_idx = conf->conf.queue[j]; reta.bytes[i & 3] = (uint8_t)(q_idx << shift); if ((i & 3) == 3) E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); @@ -2813,8 +2957,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev, rss_conf.rss_key = rss_intel_key; /* Default hash key */ igb_hw_rss_hash_set(hw, &rss_conf); - rte_memcpy(&filter_info->rss_info, - conf, sizeof(struct igb_rte_flow_rss_conf)); + if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf)) + return -EINVAL; return 0; }