#include <rte_log.h>
#include <rte_debug.h>
#include <rte_ethdev.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
+#include <rte_security_driver.h>
#include <rte_memzone.h>
#include <rte_atomic.h>
#include <rte_mempool.h>
PKT_TX_TCP_SEG |
PKT_TX_TUNNEL_MASK |
PKT_TX_OUTER_IP_CKSUM |
+#ifdef RTE_LIB_SECURITY
+ PKT_TX_SEC_OFFLOAD |
+#endif
TXGBE_TX_IEEE1588_TMST);
#define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
static inline void
txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
volatile struct txgbe_tx_ctx_desc *ctx_txd,
- uint64_t ol_flags, union txgbe_tx_offload tx_offload)
+ uint64_t ol_flags, union txgbe_tx_offload tx_offload,
+ __rte_unused uint64_t *mdata)
{
union txgbe_tx_offload tx_offload_mask;
uint32_t type_tucmd_mlhl;
vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
}
+#ifdef RTE_LIB_SECURITY
+ if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+ union txgbe_crypto_tx_desc_md *md =
+ (union txgbe_crypto_tx_desc_md *)mdata;
+ tunnel_seed |= TXGBE_TXD_IPSEC_SAIDX(md->sa_idx);
+ type_tucmd_mlhl |= md->enc ?
+ (TXGBE_TXD_IPSEC_ESP | TXGBE_TXD_IPSEC_ESPENC) : 0;
+ type_tucmd_mlhl |= TXGBE_TXD_IPSEC_ESPLEN(md->pad_len);
+ tx_offload_mask.sa_idx |= ~0;
+ tx_offload_mask.sec_pad_len |= ~0;
+ }
+#endif
+
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].tx_offload.data[0] =
tx_offload_mask.data[0] & tx_offload.data[0];
uint32_t ctx = 0;
uint32_t new_ctx;
union txgbe_tx_offload tx_offload;
+#ifdef RTE_LIB_SECURITY
+ uint8_t use_ipsec;
+#endif
tx_offload.data[0] = 0;
tx_offload.data[1] = 0;
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
+#ifdef RTE_LIB_SECURITY
+ use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
+#endif
/* If hardware offload required */
tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
+#ifdef RTE_LIB_SECURITY
+ if (use_ipsec) {
+ union txgbe_crypto_tx_desc_md *ipsec_mdata =
+ (union txgbe_crypto_tx_desc_md *)
+ rte_security_dynfield(tx_pkt);
+ tx_offload.sa_idx = ipsec_mdata->sa_idx;
+ tx_offload.sec_pad_len = ipsec_mdata->pad_len;
+ }
+#endif
+
/* If new context need be built or reuse the exist ctx*/
ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
/* Only allocate context descriptor if required */
}
txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload);
+ tx_offload,
+ rte_security_dynfield(tx_pkt));
txe->last_id = tx_last;
tx_id = txe->next_id;
}
olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
+#ifdef RTE_LIB_SECURITY
+ if (use_ipsec)
+ olinfo_status |= TXGBE_TXD_IPSEC;
+#endif
m_seg = tx_pkt;
do {
pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
}
+#ifdef RTE_LIB_SECURITY
+ if (rx_status & TXGBE_RXD_STAT_SECP) {
+ pkt_flags |= PKT_RX_SEC_OFFLOAD;
+ if (rx_status & TXGBE_RXD_ERR_SECERR)
+ pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ }
+#endif
+
return pkt_flags;
}
for (j = 0; j < LOOK_AHEAD; j++)
s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
- rte_smp_rmb();
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
/* Compute how many status bits were set */
for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+#ifdef RTE_LIB_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
return offloads;
}
}
}
+static int
+txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt)
+{
+ struct txgbe_tx_entry *swr_ring = txq->sw_ring;
+ uint16_t i, tx_last, tx_id;
+ uint16_t nb_tx_free_last;
+ uint16_t nb_tx_to_clean;
+ uint32_t pkt_cnt;
+
+ /* Start free mbuf from the next of tx_tail */
+ tx_last = txq->tx_tail;
+ tx_id = swr_ring[tx_last].next_id;
+
+ if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq))
+ return 0;
+
+ nb_tx_to_clean = txq->nb_tx_free;
+ nb_tx_free_last = txq->nb_tx_free;
+ if (!free_cnt)
+ free_cnt = txq->nb_tx_desc;
+
+ /* Loop through swr_ring to count the amount of
+ * freeable mubfs and packets.
+ */
+ for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+ for (i = 0; i < nb_tx_to_clean &&
+ pkt_cnt < free_cnt &&
+ tx_id != tx_last; i++) {
+ if (swr_ring[tx_id].mbuf != NULL) {
+ rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+ swr_ring[tx_id].mbuf = NULL;
+
+ /*
+ * last segment in the packet,
+ * increment packet count
+ */
+ pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+ }
+
+ tx_id = swr_ring[tx_id].next_id;
+ }
+
+ if (pkt_cnt < free_cnt) {
+ if (txgbe_xmit_cleanup(txq))
+ break;
+
+ nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_tx_free;
+ }
+ }
+
+ return (int)pkt_cnt;
+}
+
+static int
+txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ int i, n, cnt;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ cnt = free_cnt - free_cnt % txq->tx_free_thresh;
+
+ for (i = 0; i < cnt; i += n) {
+ if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
+ break;
+
+ n = txgbe_tx_free_bufs(txq);
+
+ if (n == 0)
+ break;
+ }
+
+ return i;
+}
+
+int
+txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
+{
+ struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+ if (txq->offloads == 0 &&
+#ifdef RTE_LIB_SECURITY
+ !(txq->using_ipsec) &&
+#endif
+ txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)
+ return txgbe_tx_done_cleanup_simple(txq, free_cnt);
+
+ return txgbe_tx_done_cleanup_full(txq, free_cnt);
+}
+
static void __rte_cold
txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
{
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (txq->offloads == 0 &&
+#ifdef RTE_LIB_SECURITY
+ !(txq->using_ipsec) &&
+#endif
txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+#ifdef RTE_LIB_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
return tx_offload_capa;
}
txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
+#ifdef RTE_LIB_SECURITY
+ txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_SECURITY);
+#endif
/* Modification to set tail pointer for virtual function
* if vf is detected.
return 0;
}
+uint32_t
+txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define TXGBE_RXQ_SCAN_INTERVAL 4
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_queue *rxq;
+ uint32_t desc = 0;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+
+ while ((desc < rxq->nb_rx_desc) &&
+ (rxdp->qw1.lo.status &
+ rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) {
+ desc += TXGBE_RXQ_SCAN_INTERVAL;
+ rxdp += TXGBE_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct txgbe_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t nb_hold, desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ nb_hold = rxq->nb_rx_hold;
+ if (offset >= rxq->nb_rx_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].qw1.lo.status;
+ if (*status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct txgbe_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].dw3;
+ if (*status & rte_cpu_to_le_32(TXGBE_TXD_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void __rte_cold
txgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
void __rte_cold
txgbe_set_rx_function(struct rte_eth_dev *dev)
{
+ uint16_t i;
struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
/*
dev->rx_pkt_burst = txgbe_recv_pkts;
}
+
+#ifdef RTE_LIB_SECURITY
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY);
+ }
+#endif
}
/*
dev->data->dev_conf.lpbk_mode)
txgbe_setup_loopback_link_raptor(hw);
+#ifdef RTE_LIB_SECURITY
+ if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
+ (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+ ret = txgbe_crypto_enable_ipsec(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "txgbe_crypto_enable_ipsec fails with %d.",
+ ret);
+ return ret;
+ }
+ }
+#endif
+
return 0;
}
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
+int
+txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
+txgbe_config_rss_filter(struct rte_eth_dev *dev,
+ struct txgbe_rte_flow_rss_conf *conf, bool add)
+{
+ struct txgbe_hw *hw;
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ if (!add) {
+ if (txgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
+ txgbe_rss_disable(dev);
+ memset(&filter_info->rss_info, 0,
+ sizeof(struct txgbe_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (filter_info->rss_info.conf.queue_num)
+ return -EINVAL;
+ /* Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+ if (j == conf->conf.queue_num)
+ j = 0;
+ reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
+ if ((i & 3) == 3)
+ wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+ }
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ if ((rss_conf.rss_hf & TXGBE_RSS_OFFLOAD_ALL) == 0) {
+ txgbe_rss_disable(dev);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ txgbe_dev_rss_hash_update(dev, &rss_conf);
+
+ if (txgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
+
+ return 0;
+}
+