devinfo->speed_capa = dev->speed_capa;
devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+ devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
return 0;
}
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
cq = &dev->cqs[qid];
- fc_cfg.cq_cfg_valid = true;
+ fc_cfg.type = ROC_NIX_FC_CQ_CFG;
fc_cfg.cq_cfg.enable = enable;
fc_cfg.cq_cfg.rq = qid;
fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
uint8_t rx_pause, tx_pause;
int rc, i;
- if (roc_nix_is_vf_or_sdp(nix)) {
+ if (roc_nix_is_vf_or_sdp(nix) && !roc_nix_is_lbk(nix)) {
plt_err("Flow control configuration is not allowed on VFs");
return -ENOTSUP;
}
}
}
+ /* Check if RX pause frame is enabled or not */
+ if (fc->rx_pause ^ rx_pause) {
+ struct roc_nix_fc_cfg fc_cfg;
+
+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ fc_cfg.type = ROC_NIX_FC_TM_CFG;
+ fc_cfg.tm_cfg.enable = !!rx_pause;
+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
+ if (rc)
+ return rc;
+ }
+
rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
if (rc)
return rc;
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- return roc_nix_npc_mcast_config(&dev->nix, true, false);
+ return roc_nix_npc_mcast_config(&dev->nix, true,
+ eth_dev->data->promiscuous);
}
int
memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
}
+uint32_t
+cnxk_nix_rx_queue_count(void *rxq)
+{
+ struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+ struct roc_nix *nix = &rxq_sp->dev->nix;
+ uint32_t head, tail;
+
+ roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
+ return (tail - head) % (rxq_sp->qconf.nb_desc);
+}
+
+static inline int
+nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset, bool is_rx)
+{
+ /* Check given offset(queue index) has packet filled/xmit by HW
+ * in case of Rx or Tx.
+ * Also, checks for wrap around case.
+ */
+ return ((tail > head && offset <= tail && offset >= head) ||
+ (head > tail && (offset >= head || offset <= tail))) ?
+ is_rx :
+ !is_rx;
+}
+
+int
+cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset)
+{
+ struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+ struct roc_nix *nix = &rxq_sp->dev->nix;
+ uint32_t head, tail;
+
+ if (rxq_sp->qconf.nb_desc <= offset)
+ return -EINVAL;
+
+ roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
+
+ if (nix_offset_has_packet(head, tail, offset, 1))
+ return RTE_ETH_RX_DESC_DONE;
+ else
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset)
+{
+ struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
+ struct roc_nix *nix = &txq_sp->dev->nix;
+ uint32_t head = 0, tail = 0;
+
+ if (txq_sp->qconf.nb_desc <= offset)
+ return -EINVAL;
+
+ roc_nix_sq_head_tail_get(nix, txq_sp->qid, &head, &tail);
+
+ if (nix_offset_has_packet(head, tail, offset, 0))
+ return RTE_ETH_TX_DESC_DONE;
+ else
+ return RTE_ETH_TX_DESC_FULL;
+}
+
/* It is a NOP for cnxk as HW frees the buffer on xmit */
int
cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)