*/
#include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_net.h>
#include "atl_ethdev.h"
#include "hw_atl/hw_atl_b0_internal.h"
#define ATL_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG)
#define ATL_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
+ RTE_MBUF_F_TX_VLAN | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG)
#define ATL_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
* different socket than was previously used.
*/
if (dev->data->rx_queues[rx_queue_id] != NULL) {
- atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
+ atl_rx_queue_release(dev, rx_queue_id);
dev->data->rx_queues[rx_queue_id] = NULL;
}
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_IPV4_CKSUM;
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
- (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
/* allocate memory for the software ring */
* different socket than was previously used.
*/
if (dev->data->tx_queues[tx_queue_id] != NULL) {
- atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+ atl_tx_queue_release(dev, tx_queue_id);
dev->data->tx_queues[tx_queue_id] = NULL;
}
atl_rx_init(struct rte_eth_dev *eth_dev)
{
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct aq_rss_parameters *rss_params = &hw->aq_nic_cfg->aq_rss;
struct atl_rx_queue *rxq;
uint64_t base_addr = 0;
int i = 0;
}
}
+ for (i = rss_params->indirection_table_size; i--;)
+ rss_params->indirection_table[i] = i &
+ (eth_dev->data->nb_rx_queues - 1);
+ hw_atl_b0_hw_rss_set(hw, rss_params);
return err;
}
}
void
-atl_rx_queue_release(void *rx_queue)
+atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- PMD_INIT_FUNC_TRACE();
+ struct atl_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
- if (rx_queue != NULL) {
- struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
+ PMD_INIT_FUNC_TRACE();
+ if (rxq != NULL) {
atl_rx_queue_release_mbufs(rxq);
rte_free(rxq->sw_ring);
rte_free(rxq);
}
void
-atl_tx_queue_release(void *tx_queue)
+atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- PMD_INIT_FUNC_TRACE();
+ struct atl_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
- if (tx_queue != NULL) {
- struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
+ PMD_INIT_FUNC_TRACE();
+ if (txq != NULL) {
atl_tx_queue_release_mbufs(txq);
rte_free(txq->sw_ring);
rte_free(txq);
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- atl_rx_queue_release(dev->data->rx_queues[i]);
+ atl_rx_queue_release(dev, i);
dev->data->rx_queues[i] = 0;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- atl_tx_queue_release(dev->data->tx_queues[i]);
+ atl_tx_queue_release(dev, i);
dev->data->tx_queues[i] = 0;
}
dev->data->nb_tx_queues = 0;
return 0;
}
+void
+atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct atl_rx_queue *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+}
+
+void
+atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct atl_tx_queue *txq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+}
+
+/* Return Rx queue avail count */
+
+uint32_t
+atl_rx_queue_count(void *rx_queue)
+{
+ struct atl_rx_queue *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = rx_queue;
+
+ if (rxq == NULL)
+ return 0;
+
+ return rxq->nb_rx_desc - rxq->nb_rx_hold;
+}
+
+int
+atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct atl_rx_queue *rxq = rx_queue;
+ struct hw_atl_rxd_wb_s *rxd;
+ uint32_t idx;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ idx = rxq->rx_tail + offset;
+
+ if (idx >= rxq->nb_rx_desc)
+ idx -= rxq->nb_rx_desc;
+
+ rxd = (struct hw_atl_rxd_wb_s *)&rxq->hw_ring[idx];
+
+ if (rxd->dd)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct atl_tx_queue *txq = tx_queue;
+ struct hw_atl_txd_s *txd;
+ uint32_t idx;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ idx = txq->tx_tail + offset;
+
+ if (idx >= txq->nb_tx_desc)
+ idx -= txq->nb_tx_desc;
+
+ txd = &txq->hw_ring[idx];
+
+ if (txd->dd)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+static int
+atl_rx_enable_intr(struct rte_eth_dev *dev, uint16_t queue_id, bool enable)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct atl_rx_queue *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue_id >= dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", queue_id);
+ return -EINVAL;
+ }
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ if (rxq == NULL)
+ return 0;
+
+ /* Mapping interrupt vector */
+ hw_atl_itr_irq_map_en_rx_set(hw, enable, queue_id);
+
+ return 0;
+}
+
+int
+atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ return atl_rx_enable_intr(eth_dev, queue_id, true);
+}
+
+int
+atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ return atl_rx_enable_intr(eth_dev, queue_id, false);
+}
+
uint16_t
atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
ol_flags = m->ol_flags;
if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
- rte_errno = -EINVAL;
+ rte_errno = EINVAL;
return i;
}
if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ rte_errno = ENOTSUP;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
}
if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {
/* IPv4 csum error ? */
if (rxd_wb->rx_stat & BIT(1))
- mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
} else {
- mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
}
/* CSUM calculated ? */
if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {
if (rxd_wb->rx_stat & BIT(2))
- mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
} else {
- mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
}
return mbuf_flags;
struct atl_adapter *adapter =
ATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]);
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter);
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
struct atl_rx_entry *sw_ring = rxq->sw_ring;
struct rte_mbuf *new_mbuf;
break;
}
- PMD_RX_LOG(ERR, "port_id=%u queue_id=%u tail=%u "
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u tail=%u "
"eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x",
(unsigned int)rxq->port_id,
(unsigned int)rxq->queue_id,
while (true) {
new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (new_mbuf == NULL) {
- PMD_RX_LOG(ERR,
+ PMD_RX_LOG(DEBUG,
"RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned int)rxq->port_id,
(unsigned int)rxq->queue_id);
dev->data->rx_mbuf_alloc_failed++;
- goto err_stop;
+ adapter->sw_stats.rx_nombuf++;
+ goto err_stop;
}
nb_hold++;
rx_mbuf->ol_flags =
atl_desc_to_offload_flags(rxq, &rxd_wb);
+
rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
+ if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) {
+ rx_mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
+ rx_mbuf->vlan_tci = rxd_wb.vlan;
+
+ if (cfg->vlan_strip)
+ rx_mbuf->ol_flags |=
+ RTE_MBUF_F_RX_VLAN_STRIPPED;
+ }
+
if (!rx_mbuf_first)
rx_mbuf_first = rx_mbuf;
rx_mbuf_first->nb_segs++;
* of returned packets.
*/
rx_pkts[nb_rx++] = rx_mbuf_first;
+ adapter->sw_stats.q_ipackets[rxq->queue_id]++;
+ adapter->sw_stats.q_ibytes[rxq->queue_id] +=
+ rx_mbuf_first->pkt_len;
- PMD_RX_LOG(ERR, "add mbuf segs=%d pkt_len=%d",
+ PMD_RX_LOG(DEBUG, "add mbuf segs=%d pkt_len=%d",
rx_mbuf_first->nb_segs,
rx_mbuf_first->pkt_len);
}
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
if (nb_hold > rxq->rx_free_thresh) {
- PMD_RX_LOG(ERR, "port_id=%u queue_id=%u rx_tail=%u "
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
"nb_hold=%u nb_rx=%u",
(unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
(unsigned int)tail, (unsigned int)nb_hold,
struct hw_atl_txd_s *txd;
int to_clean = 0;
- PMD_INIT_FUNC_TRACE();
-
if (txq != NULL) {
sw_ring = txq->sw_ring;
int head = txq->tx_head;
uint32_t tx_cmd = 0;
uint64_t ol_flags = tx_pkt->ol_flags;
- PMD_INIT_FUNC_TRACE();
-
- if (ol_flags & PKT_TX_TCP_SEG) {
- PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
-
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
txc->cmd = 0x4;
- if (ol_flags & PKT_TX_IPV6)
+ if (ol_flags & RTE_MBUF_F_TX_IPV6)
txc->cmd |= 0x2;
txc->l2_len = tx_pkt->l2_len;
txc->mss_len = tx_pkt->tso_segsz;
}
- if (ol_flags & PKT_TX_VLAN) {
+ if (ol_flags & RTE_MBUF_F_TX_VLAN) {
tx_cmd |= tx_desc_cmd_vlan;
txc->vlan_tag = tx_pkt->vlan_tci;
}
uint32_t tx_cmd)
{
txd->cmd |= tx_desc_cmd_fcs;
- txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
+ txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
/* L4 csum requested */
- txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
+ txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
txd->cmd |= tx_cmd;
}
atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
struct rte_mbuf *tx_pkt)
{
+ struct atl_adapter *adapter =
+ ATL_DEV_TO_ADAPTER(&rte_eth_devices[txq->port_id]);
uint32_t pay_len = 0;
int tail = 0;
struct atl_tx_entry *tx_entry;
u32 tx_cmd = 0U;
int desc_count = 0;
- PMD_INIT_FUNC_TRACE();
-
tail = txq->tx_tail;
txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
txq->tx_tail = tail;
txq->tx_free -= desc_count;
+
+ adapter->sw_stats.q_opackets[txq->queue_id]++;
+ adapter->sw_stats.q_obytes[txq->queue_id] += pay_len;
}
uint16_t
return nb_tx;
}
-