X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_rxtx.c;h=278d5d2712af18707771c515a9259852e7134e36;hb=4a5a1e6b624e46f24b13d095e35a9b8699bd59b9;hp=2f37167249952acfd335b79f007a16d38a099d39;hpb=ffc905f3b856b96c6d8d864dba4052104fae4064;p=dpdk.git diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 2f37167249..278d5d2712 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include @@ -50,6 +50,10 @@ #endif /* Bit Mask to indicate what bits required for building TX context */ #define IGB_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ PKT_TX_VLAN_PKT | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ @@ -107,6 +111,7 @@ struct igb_rx_queue { uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ uint32_t flags; /**< RX flags. */ + uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */ }; /** @@ -180,6 +185,7 @@ struct igb_tx_queue { /**< Start context position for transmit queue. */ struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -283,17 +289,20 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq, case PKT_TX_UDP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP | E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_udp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; break; case PKT_TX_TCP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP | E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_tcp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; break; case PKT_TX_SCTP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP | E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_sctp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; break; default: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV | @@ -311,7 +320,7 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq, vlan_macip_lens = (uint32_t)tx_offload.data; ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); - ctx_txd->seqnum_seed = 0; + ctx_txd->u.seqnum_seed = 0; } /* @@ -623,25 +632,25 @@ eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if ((m->tso_segsz > IGB_TSO_MAX_MSS) || (m->l2_len + m->l3_len + m->l4_len > IGB_TSO_MAX_HDRLEN)) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } -#ifdef RTE_LIBRTE_ETHDEV_DEBUG +#ifdef RTE_ETHDEV_DEBUG_TX ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -1141,17 +1150,17 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt_len -= ETHER_CRC_LEN; - if (data_len <= ETHER_CRC_LEN) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (data_len <= RTE_ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); first_seg->nb_segs--; last_seg->data_len = (uint16_t) (last_seg->data_len - - (ETHER_CRC_LEN - data_len)); + (RTE_ETHER_CRC_LEN - data_len)); last_seg->next = NULL; } else - rxm->data_len = - (uint16_t) (data_len - ETHER_CRC_LEN); + rxm->data_len = (uint16_t) + (data_len - RTE_ETHER_CRC_LEN); } /* @@ -1286,113 +1295,107 @@ igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt) uint16_t tx_id; /* Current segment being processed. */ uint16_t tx_last; /* Last segment in the current packet. */ uint16_t tx_next; /* First segment of the next packet. */ - int count; + int count = 0; - if (txq != NULL) { - count = 0; - sw_ring = txq->sw_ring; - txr = txq->tx_ring; + if (!txq) + return -ENODEV; - /* - * tx_tail is the last sent packet on the sw_ring. Goto the end - * of that packet (the last segment in the packet chain) and - * then the next segment will be the start of the oldest segment - * in the sw_ring. This is the first packet that will be - * attempted to be freed. - */ + sw_ring = txq->sw_ring; + txr = txq->tx_ring; - /* Get last segment in most recently added packet. */ - tx_first = sw_ring[txq->tx_tail].last_id; + /* tx_tail is the last sent packet on the sw_ring. Goto the end + * of that packet (the last segment in the packet chain) and + * then the next segment will be the start of the oldest segment + * in the sw_ring. This is the first packet that will be + * attempted to be freed. + */ - /* Get the next segment, which is the oldest segment in ring. */ - tx_first = sw_ring[tx_first].next_id; + /* Get last segment in most recently added packet. */ + tx_first = sw_ring[txq->tx_tail].last_id; - /* Set the current index to the first. */ - tx_id = tx_first; + /* Get the next segment, which is the oldest segment in ring. */ + tx_first = sw_ring[tx_first].next_id; - /* - * Loop through each packet. For each packet, verify that an - * mbuf exists and that the last segment is free. If so, free - * it and move on. - */ - while (1) { - tx_last = sw_ring[tx_id].last_id; - - if (sw_ring[tx_last].mbuf) { - if (txr[tx_last].wb.status & - E1000_TXD_STAT_DD) { - /* - * Increment the number of packets - * freed. - */ - count++; - - /* Get the start of the next packet. */ - tx_next = sw_ring[tx_last].next_id; - - /* - * Loop through all segments in a - * packet. - */ - do { - rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf); + /* Set the current index to the first. */ + tx_id = tx_first; + + /* Loop through each packet. For each packet, verify that an + * mbuf exists and that the last segment is free. If so, free + * it and move on. + */ + while (1) { + tx_last = sw_ring[tx_id].last_id; + + if (sw_ring[tx_last].mbuf) { + if (txr[tx_last].wb.status & + E1000_TXD_STAT_DD) { + /* Increment the number of packets + * freed. + */ + count++; + + /* Get the start of the next packet. */ + tx_next = sw_ring[tx_last].next_id; + + /* Loop through all segments in a + * packet. + */ + do { + if (sw_ring[tx_id].mbuf) { + rte_pktmbuf_free_seg( + sw_ring[tx_id].mbuf); sw_ring[tx_id].mbuf = NULL; sw_ring[tx_id].last_id = tx_id; + } - /* Move to next segemnt. */ - tx_id = sw_ring[tx_id].next_id; + /* Move to next segemnt. */ + tx_id = sw_ring[tx_id].next_id; - } while (tx_id != tx_next); + } while (tx_id != tx_next); - if (unlikely(count == (int)free_cnt)) - break; - } else - /* - * mbuf still in use, nothing left to - * free. - */ + if (unlikely(count == (int)free_cnt)) break; } else { - /* - * There are multiple reasons to be here: - * 1) All the packets on the ring have been - * freed - tx_id is equal to tx_first - * and some packets have been freed. - * - Done, exit - * 2) Interfaces has not sent a rings worth of - * packets yet, so the segment after tail is - * still empty. Or a previous call to this - * function freed some of the segments but - * not all so there is a hole in the list. - * Hopefully this is a rare case. - * - Walk the list and find the next mbuf. If - * there isn't one, then done. + /* mbuf still in use, nothing left to + * free. */ - if (likely((tx_id == tx_first) && (count != 0))) - break; + break; + } + } else { + /* There are multiple reasons to be here: + * 1) All the packets on the ring have been + * freed - tx_id is equal to tx_first + * and some packets have been freed. + * - Done, exit + * 2) Interfaces has not sent a rings worth of + * packets yet, so the segment after tail is + * still empty. Or a previous call to this + * function freed some of the segments but + * not all so there is a hole in the list. + * Hopefully this is a rare case. + * - Walk the list and find the next mbuf. If + * there isn't one, then done. + */ + if (likely(tx_id == tx_first && count != 0)) + break; - /* - * Walk the list and find the next mbuf, if any. - */ - do { - /* Move to next segemnt. */ - tx_id = sw_ring[tx_id].next_id; + /* Walk the list and find the next mbuf, if any. */ + do { + /* Move to next segemnt. */ + tx_id = sw_ring[tx_id].next_id; - if (sw_ring[tx_id].mbuf) - break; + if (sw_ring[tx_id].mbuf) + break; - } while (tx_id != tx_first); + } while (tx_id != tx_first); - /* - * Determine why previous loop bailed. If there - * is not an mbuf, done. - */ - if (sw_ring[tx_id].mbuf == NULL) - break; - } + /* Determine why previous loop bailed. If there + * is not an mbuf, done. + */ + if (!sw_ring[tx_id].mbuf) + break; } - } else - count = -ENODEV; + } return count; } @@ -1447,6 +1450,33 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) igb_reset_tx_queue_stat(txq); } +uint64_t +igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + return tx_offload_capa; +} + +uint64_t +igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_queue_offload_capa; + + tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); + + return tx_queue_offload_capa; +} + int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1458,6 +1488,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, struct igb_tx_queue *txq; struct e1000_hw *hw; uint32_t size; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1542,6 +1575,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, dev->tx_pkt_burst = eth_igb_xmit_pkts; dev->tx_pkt_prepare = ð_igb_prep_pkts; dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; return 0; } @@ -1593,6 +1627,53 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq) rxq->pkt_last_seg = NULL; } +uint64_t +igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_RSS_HASH; + + if (hw->mac.type == e1000_i350 || + hw->mac.type == e1000_i210 || + hw->mac.type == e1000_i211) + rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND; + + return rx_offload_capa; +} + +uint64_t +igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_queue_offload_capa; + + switch (hw->mac.type) { + case e1000_vfadapt_i350: + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev); + break; + default: + rx_queue_offload_capa = 0; + } + return rx_queue_offload_capa; +} + int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1605,6 +1686,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, struct igb_rx_queue *rxq; struct e1000_hw *hw; unsigned int size; + uint64_t offloads; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1630,6 +1714,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (rxq == NULL) return -ENOMEM; + rxq->offloads = offloads; rxq->mb_pool = mp; rxq->nb_rx_desc = nb_desc; rxq->pthresh = rx_conf->rx_thresh.pthresh; @@ -1644,8 +1729,10 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : - ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -1798,12 +1885,14 @@ igb_dev_free_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { eth_igb_rx_queue_release(dev->data->rx_queues[i]); dev->data->rx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "rx_ring", i); } dev->data->nb_rx_queues = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { eth_igb_tx_queue_release(dev->data->tx_queues[i]); dev->data->tx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "tx_ring", i); } dev->data->nb_tx_queues = 0; } @@ -2227,6 +2316,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev) int eth_igb_rx_init(struct rte_eth_dev *dev) { + struct rte_eth_rxmode *rxmode; struct e1000_hw *hw; struct igb_rx_queue *rxq; uint32_t rctl; @@ -2247,19 +2337,24 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + rxmode = &dev->data->dev_conf.rxmode; + /* * Configure support of jumbo frames, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + uint32_t max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + rctl |= E1000_RCTL_LPE; /* * Set maximum packet length by default, and might be updated * together with enabling/disabling dual VLAN. */ - E1000_WRITE_REG(hw, E1000_RLPML, - dev->data->dev_conf.rxmode.max_rx_pkt_len + - VLAN_TAG_SIZE); + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + max_len += VLAN_TAG_SIZE; + + E1000_WRITE_REG(hw, E1000_RLPML, max_len); } else rctl &= ~E1000_RCTL_LPE; @@ -2292,9 +2387,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = - (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? - 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx), @@ -2362,7 +2458,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl); } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; @@ -2406,19 +2502,27 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxcsum |= E1000_RXCSUM_PCSD; /* Enable both L3/L4 rx checksum offload */ - if (dev->data->dev_conf.rxmode.hw_ip_checksum) - rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | - E1000_RXCSUM_CRCOFL); + if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rxcsum |= E1000_RXCSUM_IPOFL; else - rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | - E1000_RXCSUM_CRCOFL); + rxcsum &= ~E1000_RXCSUM_IPOFL; + if (rxmode->offloads & + (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) + rxcsum |= E1000_RXCSUM_TUOFL; + else + rxcsum &= ~E1000_RXCSUM_TUOFL; + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + rxcsum |= E1000_RXCSUM_CRCOFL; + else + rxcsum &= ~E1000_RXCSUM_CRCOFL; + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) { - rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ - /* set STRCRC bit in all queues */ + /* clear STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2427,14 +2531,14 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr |= E1000_DVMOLR_STRCRC; + dvmolr &= ~E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } } else { - rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - /* clear STRCRC bit in all queues */ + /* set STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2443,7 +2547,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr &= ~E1000_DVMOLR_STRCRC; + dvmolr |= E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } @@ -2654,7 +2758,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; @@ -2741,6 +2845,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.offloads = rxq->offloads; } void @@ -2756,6 +2861,47 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.pthresh = txq->pthresh; qinfo->conf.tx_thresh.hthresh = txq->hthresh; qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.offloads = txq->offloads; +} + +int +igb_rss_conf_init(struct rte_eth_dev *dev, + struct igb_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (in->key_len > RTE_DIM(out->key) || + ((hw->mac.type == e1000_82576) && + (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) || + ((hw->mac.type != e1000_82576) && + (in->queue_num > IGB_MAX_RX_QUEUE_NUM))) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +igb_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); } int @@ -2764,7 +2910,12 @@ igb_config_rss_filter(struct rte_eth_dev *dev, { uint32_t shift; uint16_t i, j; - struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -2772,8 +2923,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev, hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!add) { - if (memcmp(conf, &filter_info->rss_info, - sizeof(struct igb_rte_flow_rss_conf)) == 0) { + if (igb_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { igb_rss_disable(dev); memset(&filter_info->rss_info, 0, sizeof(struct igb_rte_flow_rss_conf)); @@ -2782,7 +2933,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev, return -EINVAL; } - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) return -EINVAL; /* Fill in redirection table. */ @@ -2794,9 +2945,9 @@ igb_config_rss_filter(struct rte_eth_dev *dev, } reta; uint8_t q_idx; - q_idx = conf->queue[j]; - if (j == conf->num) + if (j == conf->conf.queue_num) j = 0; + q_idx = conf->conf.queue[j]; reta.bytes[i & 3] = (uint8_t)(q_idx << shift); if ((i & 3) == 3) E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); @@ -2813,8 +2964,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev, rss_conf.rss_key = rss_intel_key; /* Default hash key */ igb_hw_rss_hash_set(hw, &rss_conf); - rte_memcpy(&filter_info->rss_info, - conf, sizeof(struct igb_rte_flow_rss_conf)); + if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf)) + return -EINVAL; return 0; }