X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftxgbe%2Ftxgbe_rxtx.c;h=ac09e75a3e800280eb4ab497e5fa60a24f05d3e6;hb=7f2ae5ddb1b3567655555a34fb014fdef9caabc2;hp=5fadbb1438a974f834844f45ede6a040cdc287f1;hpb=8bdc7882f376590bd33f6ecfd3d5c0d19d4cad44;p=dpdk.git diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index 5fadbb1438..ac09e75a3e 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -19,7 +19,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -40,6 +41,12 @@ #include "txgbe_ethdev.h" #include "txgbe_rxtx.h" +#ifdef RTE_LIBRTE_IEEE1588 +#define TXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST +#else +#define TXGBE_TX_IEEE1588_TMST 0 +#endif + /* Bit Mask to indicate what bits required for building TX context */ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM | PKT_TX_OUTER_IPV6 | @@ -50,7 +57,11 @@ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG | PKT_TX_TUNNEL_MASK | - PKT_TX_OUTER_IP_CKSUM); + PKT_TX_OUTER_IP_CKSUM | +#ifdef RTE_LIB_SECURITY + PKT_TX_SEC_OFFLOAD | +#endif + TXGBE_TX_IEEE1588_TMST); #define TXGBE_TX_OFFLOAD_NOTSUP_MASK \ (PKT_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK) @@ -304,7 +315,8 @@ txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, static inline void txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq, volatile struct txgbe_tx_ctx_desc *ctx_txd, - uint64_t ol_flags, union txgbe_tx_offload tx_offload) + uint64_t ol_flags, union txgbe_tx_offload tx_offload, + __rte_unused uint64_t *mdata) { union txgbe_tx_offload tx_offload_mask; uint32_t type_tucmd_mlhl; @@ -398,6 +410,19 @@ txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq, vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci); } +#ifdef RTE_LIB_SECURITY + if (ol_flags & PKT_TX_SEC_OFFLOAD) { + union txgbe_crypto_tx_desc_md *md = + (union txgbe_crypto_tx_desc_md *)mdata; + tunnel_seed |= TXGBE_TXD_IPSEC_SAIDX(md->sa_idx); + type_tucmd_mlhl |= md->enc ? + (TXGBE_TXD_IPSEC_ESP | TXGBE_TXD_IPSEC_ESPENC) : 0; + type_tucmd_mlhl |= TXGBE_TXD_IPSEC_ESPLEN(md->pad_len); + tx_offload_mask.sa_idx |= ~0; + tx_offload_mask.sec_pad_len |= ~0; + } +#endif + txq->ctx_cache[ctx_idx].flags = ol_flags; txq->ctx_cache[ctx_idx].tx_offload.data[0] = tx_offload_mask.data[0] & tx_offload.data[0]; @@ -694,6 +719,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t ctx = 0; uint32_t new_ctx; union txgbe_tx_offload tx_offload; +#ifdef RTE_LIB_SECURITY + uint8_t use_ipsec; +#endif tx_offload.data[0] = 0; tx_offload.data[1] = 0; @@ -720,6 +748,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; +#ifdef RTE_LIB_SECURITY + use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD); +#endif /* If hardware offload required */ tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK; @@ -735,6 +766,16 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.outer_l3_len = tx_pkt->outer_l3_len; tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt); +#ifdef RTE_LIB_SECURITY + if (use_ipsec) { + union txgbe_crypto_tx_desc_md *ipsec_mdata = + (union txgbe_crypto_tx_desc_md *) + rte_security_dynfield(tx_pkt); + tx_offload.sa_idx = ipsec_mdata->sa_idx; + tx_offload.sec_pad_len = ipsec_mdata->pad_len; + } +#endif + /* If new context need be built or reuse the exist ctx*/ ctx = what_ctx_update(txq, tx_ol_req, tx_offload); /* Only allocate context descriptor if required */ @@ -851,6 +892,11 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ cmd_type_len = TXGBE_TXD_FCS; +#ifdef RTE_LIBRTE_IEEE1588 + if (ol_flags & PKT_TX_IEEE1588_TMST) + cmd_type_len |= TXGBE_TXD_1588; +#endif + olinfo_status = 0; if (tx_ol_req) { if (ol_flags & PKT_TX_TCP_SEG) { @@ -883,7 +929,8 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - tx_offload); + tx_offload, + rte_security_dynfield(tx_pkt)); txe->last_id = tx_last; tx_id = txe->next_id; @@ -902,6 +949,10 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len); +#ifdef RTE_LIB_SECURITY + if (use_ipsec) + olinfo_status |= TXGBE_TXD_IPSEC; +#endif m_seg = tx_pkt; do { @@ -1028,8 +1079,20 @@ txgbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info) PKT_RX_RSS_HASH, 0, 0, 0, 0, 0, 0, PKT_RX_FDIR, }; - +#ifdef RTE_LIBRTE_IEEE1588 + static uint64_t ip_pkt_etqf_map[8] = { + 0, 0, 0, PKT_RX_IEEE1588_PTP, + 0, 0, 0, 0, + }; + int etfid = txgbe_etflt_id(TXGBE_RXD_PTID(pkt_info)); + if (likely(-1 != etfid)) + return ip_pkt_etqf_map[etfid] | + ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)]; + else + return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)]; +#else return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)]; +#endif } static inline uint64_t @@ -1046,6 +1109,10 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags) vlan_flags & PKT_RX_VLAN_STRIPPED) ? vlan_flags : 0; +#ifdef RTE_LIBRTE_IEEE1588 + if (rx_status & TXGBE_RXD_STAT_1588) + pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST; +#endif return pkt_flags; } @@ -1070,6 +1137,14 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) pkt_flags |= PKT_RX_EIP_CKSUM_BAD; } +#ifdef RTE_LIB_SECURITY + if (rx_status & TXGBE_RXD_STAT_SECP) { + pkt_flags |= PKT_RX_SEC_OFFLOAD; + if (rx_status & TXGBE_RXD_ERR_SECERR) + pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED; + } +#endif + return pkt_flags; } @@ -1117,7 +1192,7 @@ txgbe_rx_scan_hw_ring(struct txgbe_rx_queue *rxq) for (j = 0; j < LOOK_AHEAD; j++) s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status); - rte_smp_rmb(); + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); /* Compute how many status bits were set */ for (nb_dd = 0; nb_dd < LOOK_AHEAD && @@ -1898,6 +1973,11 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev) offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; +#ifdef RTE_LIB_SECURITY + if (dev->security_ctx) + offloads |= DEV_RX_OFFLOAD_SECURITY; +#endif + return offloads; } @@ -1916,6 +1996,98 @@ txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq) } } +static int +txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt) +{ + struct txgbe_tx_entry *swr_ring = txq->sw_ring; + uint16_t i, tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; + uint32_t pkt_cnt; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (pkt_cnt < free_cnt) { + if (txgbe_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + return (int)pkt_cnt; +} + +static int +txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq, + uint32_t free_cnt) +{ + int i, n, cnt; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + cnt = free_cnt - free_cnt % txq->tx_free_thresh; + + for (i = 0; i < cnt; i += n) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh) + break; + + n = txgbe_tx_free_bufs(txq); + + if (n == 0) + break; + } + + return i; +} + +int +txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) +{ + struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue; + if (txq->offloads == 0 && +#ifdef RTE_LIB_SECURITY + !(txq->using_ipsec) && +#endif + txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) + return txgbe_tx_done_cleanup_simple(txq, free_cnt); + + return txgbe_tx_done_cleanup_full(txq, free_cnt); +} + static void __rte_cold txgbe_tx_free_swring(struct txgbe_tx_queue *txq) { @@ -1993,6 +2165,9 @@ txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (txq->offloads == 0 && +#ifdef RTE_LIB_SECURITY + !(txq->using_ipsec) && +#endif txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); dev->tx_pkt_burst = txgbe_xmit_pkts_simple; @@ -2047,6 +2222,10 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev) tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; +#ifdef RTE_LIB_SECURITY + if (dev->security_ctx) + tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; +#endif return tx_offload_capa; } @@ -2145,6 +2324,10 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->offloads = offloads; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; +#ifdef RTE_LIB_SECURITY + txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SECURITY); +#endif /* Modification to set tail pointer for virtual function * if vf is detected. @@ -2501,6 +2684,79 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, return 0; } +uint32_t +txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define TXGBE_RXQ_SCAN_INTERVAL 4 + volatile struct txgbe_rx_desc *rxdp; + struct txgbe_rx_queue *rxq; + uint32_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &rxq->rx_ring[rxq->rx_tail]; + + while ((desc < rxq->nb_rx_desc) && + (rxdp->qw1.lo.status & + rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) { + desc += TXGBE_RXQ_SCAN_INTERVAL; + rxdp += TXGBE_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct txgbe_rx_queue *rxq = rx_queue; + volatile uint32_t *status; + uint32_t nb_hold, desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + nb_hold = rxq->nb_rx_hold; + if (offset >= rxq->nb_rx_desc - nb_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].qw1.lo.status; + if (*status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct txgbe_tx_queue *txq = tx_queue; + volatile uint32_t *status; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].dw3; + if (*status & rte_cpu_to_le_32(TXGBE_TXD_DD)) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + void __rte_cold txgbe_dev_clear_queues(struct rte_eth_dev *dev) { @@ -3125,7 +3381,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev, struct txgbe_dcb_config *dcb_config) { int ret = 0; - uint8_t i, nb_tcs; + uint8_t i, pfc_en, nb_tcs; uint16_t pbsize, rx_buffer_size; uint8_t config_dcb_rx = 0; uint8_t config_dcb_tx = 0; @@ -3299,6 +3555,26 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Configure queue statistics registers */ txgbe_dcb_config_tc_stats_raptor(hw, dcb_config); + /* Check if the PFC is supported */ + if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); + for (i = 0; i < nb_tcs; i++) { + /* If the TC count is 8, + * and the default high_water is 48, + * the low_water is 16 as default. + */ + hw->fc.high_water[i] = (pbsize * 3) / 4; + hw->fc.low_water[i] = pbsize / 4; + /* Enable pfc for this TC */ + tc = &dcb_config->tc_config[i]; + tc->pfc = txgbe_dcb_pfc_enabled; + } + txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS) + pfc_en &= 0x0F; + ret = txgbe_dcb_config_pfc(hw, pfc_en, map); + } + return ret; } @@ -3852,6 +4128,7 @@ txgbe_set_rsc(struct rte_eth_dev *dev) void __rte_cold txgbe_set_rx_function(struct rte_eth_dev *dev) { + uint16_t i; struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); /* @@ -3912,6 +4189,15 @@ txgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = txgbe_recv_pkts; } + +#ifdef RTE_LIB_SECURITY + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct txgbe_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SECURITY); + } +#endif } /* @@ -4182,6 +4468,19 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev) dev->data->dev_conf.lpbk_mode) txgbe_setup_loopback_link_raptor(hw); +#ifdef RTE_LIB_SECURITY + if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) || + (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) { + ret = txgbe_crypto_enable_ipsec(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, + "txgbe_crypto_enable_ipsec fails with %d.", + ret); + return ret; + } + } +#endif + return 0; } @@ -4423,3 +4722,99 @@ txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } +int +txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +txgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + +int +txgbe_config_rss_filter(struct rte_eth_dev *dev, + struct txgbe_rte_flow_rss_conf *conf, bool add) +{ + struct txgbe_hw *hw; + uint32_t reta; + uint16_t i; + uint16_t j; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + + PMD_INIT_FUNC_TRACE(); + hw = TXGBE_DEV_HW(dev); + + if (!add) { + if (txgbe_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { + txgbe_rss_disable(dev); + memset(&filter_info->rss_info, 0, + sizeof(struct txgbe_rte_flow_rss_conf)); + return 0; + } + return -EINVAL; + } + + if (filter_info->rss_info.conf.queue_num) + return -EINVAL; + /* Fill in redirection table + * The byte-swap is needed because NIC registers are in + * little-endian order. + */ + reta = 0; + for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) { + if (j == conf->conf.queue_num) + j = 0; + reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF); + if ((i & 3) == 3) + wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta); + } + + /* Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + if ((rss_conf.rss_hf & TXGBE_RSS_OFFLOAD_ALL) == 0) { + txgbe_rss_disable(dev); + return 0; + } + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = rss_intel_key; /* Default hash key */ + txgbe_dev_rss_hash_update(dev, &rss_conf); + + if (txgbe_rss_conf_init(&filter_info->rss_info, &conf->conf)) + return -EINVAL; + + return 0; +} +