* Added queue based priority flow control support for CN9K & CN10K.
* Added support for IP reassembly for inline inbound IPsec packets.
+ * Added support for packet marking in traffic manager.
* **Updated Mellanox mlx5 driver.**
if (flags & NIX_TX_OFFLOAD_TSO_F)
cn10k_nix_xmit_prepare_tso(m, flags);
- cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec);
+ cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec,
+ txq->mark_flag, txq->mark_fmt);
laddr = lmt_addr;
/* Prepare CPT instruction and get nixtx addr if
rte_io_wmb();
txq = cn9k_sso_hws_xtract_meta(m, txq_data);
cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
- cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
+ cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
+ txq->mark_fmt);
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
uint64_t ol_flags = m->ol_flags;
if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
+ if (dev->tx_mark)
+ flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
return flags;
}
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *nix = &dev->nix;
+ uint64_t mark_fmt, mark_flag;
struct roc_cpt_lf *inl_lf;
struct cn10k_eth_txq *txq;
struct roc_nix_sq *sq;
PLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16));
}
+ /* Restore marking flag from roc */
+ mark_fmt = roc_nix_tm_mark_format_get(nix, &mark_flag);
+ txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+ txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+
nix_form_default_desc(dev, txq, qid);
txq->lso_tun_fmt = dev->lso_tun_fmt;
return 0;
return rc;
}
+static int
+cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *roc_nix = &dev->nix;
+ uint64_t mark_fmt, mark_flag;
+ int rc, i;
+
+ rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
+ mark_red, error);
+
+ if (rc)
+ goto exit;
+
+ mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+ if (mark_flag) {
+ dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ dev->tx_mark = true;
+ } else {
+ dev->tx_mark = false;
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+ dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+ txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+ txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+ }
+ cn10k_eth_set_tx_function(eth_dev);
+exit:
+ return rc;
+}
+
+static int
+cn10k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *roc_nix = &dev->nix;
+ uint64_t mark_fmt, mark_flag;
+ int rc, i;
+
+ rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
+ error);
+ if (rc)
+ goto exit;
+
+ mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+ if (mark_flag) {
+ dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ dev->tx_mark = true;
+ } else {
+ dev->tx_mark = false;
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+ dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+ txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+ txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+ }
+ cn10k_eth_set_tx_function(eth_dev);
+exit:
+ return rc;
+}
+
+static int
+cn10k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *roc_nix = &dev->nix;
+ uint64_t mark_fmt, mark_flag;
+ int rc, i;
+
+ rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
+ mark_red, error);
+ if (rc)
+ goto exit;
+
+ mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+ if (mark_flag) {
+ dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ dev->tx_mark = true;
+ } else {
+ dev->tx_mark = false;
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+ dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+ txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+ txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+ }
+ cn10k_eth_set_tx_function(eth_dev);
+exit:
+ return rc;
+}
+
/* Update platform specific eth dev ops */
static void
nix_eth_dev_ops_override(void)
cnxk_eth_dev_ops.ip_reassembly_conf_set = cn10k_nix_reassembly_conf_set;
}
+/* Update platform specific tm ops */
+static void
+nix_tm_ops_override(void)
+{
+ static int init_once;
+
+ if (init_once)
+ return;
+ init_once = 1;
+
+ /* Update platform specific ops */
+ cnxk_tm_ops.mark_vlan_dei = cn10k_nix_tm_mark_vlan_dei;
+ cnxk_tm_ops.mark_ip_ecn = cn10k_nix_tm_mark_ip_ecn;
+ cnxk_tm_ops.mark_ip_dscp = cn10k_nix_tm_mark_ip_dscp;
+}
+
static void
npc_flow_ops_override(void)
{
}
nix_eth_dev_ops_override();
+ nix_tm_ops_override();
npc_flow_ops_override();
cn10k_eth_sec_ops_override();
uint16_t cpt_desc;
uint64_t lso_tun_fmt;
uint64_t ts_mem;
+ uint64_t mark_flag : 8;
+ uint64_t mark_fmt : 48;
} __plt_cache_aligned;
struct cn10k_eth_rxq {
static __rte_always_inline void
cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
- const uint64_t lso_tun_fmt, bool *sec)
+ const uint64_t lso_tun_fmt, bool *sec, uint8_t mark_flag,
+ uint64_t mark_fmt)
{
+ uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
struct nix_send_ext_s *send_hdr_ext;
struct nix_send_hdr_s *send_hdr;
uint64_t ol_flags = 0, mask;
union nix_send_hdr_w1_u w1;
union nix_send_sg_s *sg;
+ uint16_t mark_form = 0;
send_hdr = (struct nix_send_hdr_s *)cmd;
if (flags & NIX_TX_NEED_EXT_HDR) {
sg = (union nix_send_sg_s *)(cmd + 4);
/* Clear previous markings */
send_hdr_ext->w0.lso = 0;
+ send_hdr_ext->w0.mark_en = 0;
send_hdr_ext->w1.u = 0;
+ ol_flags = m->ol_flags;
} else {
sg = (union nix_send_sg_s *)(cmd + 2);
}
}
if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+ const uint8_t ipv6 = !!(ol_flags & RTE_MBUF_F_TX_IPV6);
+ const uint8_t ip = !!(ol_flags & (RTE_MBUF_F_TX_IPV4 |
+ RTE_MBUF_F_TX_IPV6));
+
send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
/* HW will update ptr after vlan0 update */
send_hdr_ext->w1.vlan1_ins_ptr = 12;
/* 2B before end of l2 header */
send_hdr_ext->w1.vlan0_ins_ptr = 12;
send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
+ /* Fill for VLAN marking only when VLAN insertion enabled */
+ mark_vlan = ((mark_flag & CNXK_TM_MARK_VLAN_DEI) &
+ (send_hdr_ext->w1.vlan1_ins_ena ||
+ send_hdr_ext->w1.vlan0_ins_ena));
+
+ /* Mask requested flags with packet data information */
+ mark_off = mark_flag & ((ip << 2) | (ip << 1) | mark_vlan);
+ mark_off = ffs(mark_off & CNXK_TM_MARK_MASK);
+
+ mark_form = (mark_fmt >> ((mark_off - !!mark_off) << 4));
+ mark_form = (mark_form >> (ipv6 << 3)) & 0xFF;
+ markptr = m->l2_len + (mark_form >> 7) - (mark_vlan << 2);
+
+ send_hdr_ext->w0.mark_en = !!mark_off;
+ send_hdr_ext->w0.markform = mark_form & 0x7F;
+ send_hdr_ext->w0.markptr = markptr;
}
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uintptr_t pa, lbase = txq->lmt_base;
uint16_t lmt_id, burst, left, i;
uintptr_t c_lbase = lbase;
+ uint64_t mark_fmt = 0;
+ uint8_t mark_flag = 0;
rte_iova_t c_io_addr;
uint64_t lso_tun_fmt;
uint16_t c_lmt_id;
if (flags & NIX_TX_OFFLOAD_TSO_F)
lso_tun_fmt = txq->lso_tun_fmt;
+ if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+ mark_fmt = txq->mark_fmt;
+ mark_flag = txq->mark_flag;
+ }
+
/* Get LMT base address and LMT ID as lcore id */
ROC_LMT_BASE_ID_GET(lbase, lmt_id);
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
- &sec);
+ &sec, mark_flag, mark_fmt);
laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
uint16_t segdw, lmt_id, burst, left, i;
uint8_t lnum, c_lnum, c_loff;
uintptr_t c_lbase = lbase;
+ uint64_t mark_fmt = 0;
+ uint8_t mark_flag = 0;
uint64_t data0, data1;
rte_iova_t c_io_addr;
uint64_t lso_tun_fmt;
if (flags & NIX_TX_OFFLOAD_TSO_F)
lso_tun_fmt = txq->lso_tun_fmt;
+ if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+ mark_fmt = txq->mark_fmt;
+ mark_flag = txq->mark_flag;
+ }
+
/* Get LMT base address and LMT ID as lcore id */
ROC_LMT_BASE_ID_GET(lbase, lmt_id);
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
- &sec);
+ &sec, mark_flag, mark_fmt);
laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
#undef T
};
- if (dev->scalar_ena) {
+ if (dev->scalar_ena || dev->tx_mark) {
pick_tx_func(eth_dev, nix_eth_tx_burst);
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
+ if (dev->tx_mark)
+ flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
return flags;
}
const struct rte_eth_txconf *tx_conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ uint64_t mark_fmt, mark_flag;
struct roc_cpt_lf *inl_lf;
struct cn9k_eth_txq *txq;
struct roc_nix_sq *sq;
PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN);
}
+ mark_fmt = roc_nix_tm_mark_format_get(&dev->nix, &mark_flag);
+ txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+ txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+
nix_form_default_desc(dev, txq, qid);
txq->lso_tun_fmt = dev->lso_tun_fmt;
return 0;
return 0;
}
+static int
+cn9k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *roc_nix = &dev->nix;
+ uint64_t mark_fmt, mark_flag;
+ int rc, i;
+
+ rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
+ mark_red, error);
+
+ if (rc)
+ goto exit;
+
+ mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+ if (mark_flag) {
+ dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ dev->tx_mark = true;
+ } else {
+ dev->tx_mark = false;
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+ dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+ txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+ txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+ }
+ cn9k_eth_set_tx_function(eth_dev);
+exit:
+ return rc;
+}
+
+static int
+cn9k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *roc_nix = &dev->nix;
+ uint64_t mark_fmt, mark_flag;
+ int rc, i;
+
+ rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
+ error);
+ if (rc)
+ goto exit;
+
+ mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+ if (mark_flag) {
+ dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ dev->tx_mark = true;
+ } else {
+ dev->tx_mark = false;
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+ dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+ txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+ txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+ }
+ cn9k_eth_set_tx_function(eth_dev);
+exit:
+ return rc;
+}
+
+static int
+cn9k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *roc_nix = &dev->nix;
+ uint64_t mark_fmt, mark_flag;
+ int rc, i;
+
+ rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
+ mark_red, error);
+ if (rc)
+ goto exit;
+
+ mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+ if (mark_flag) {
+ dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ dev->tx_mark = true;
+ } else {
+ dev->tx_mark = false;
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+ dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+ txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+ txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+ }
+ cn9k_eth_set_tx_function(eth_dev);
+exit:
+ return rc;
+}
+
/* Update platform specific eth dev ops */
static void
nix_eth_dev_ops_override(void)
cn9k_nix_timesync_read_tx_timestamp;
}
+/* Update platform specific eth dev ops */
+static void
+nix_tm_ops_override(void)
+{
+ static int init_once;
+
+ if (init_once)
+ return;
+ init_once = 1;
+
+ /* Update platform specific ops */
+ cnxk_tm_ops.mark_vlan_dei = cn9k_nix_tm_mark_vlan_dei;
+ cnxk_tm_ops.mark_ip_ecn = cn9k_nix_tm_mark_ip_ecn;
+ cnxk_tm_ops.mark_ip_dscp = cn9k_nix_tm_mark_ip_dscp;
+}
+
static void
npc_flow_ops_override(void)
{
}
nix_eth_dev_ops_override();
+ nix_tm_ops_override();
npc_flow_ops_override();
cn9k_eth_sec_ops_override();
uint64_t sa_base;
uint64_t *cpt_fc;
uint16_t cpt_desc;
+ uint64_t mark_flag : 8;
+ uint64_t mark_fmt : 48;
} __plt_cache_aligned;
struct cn9k_eth_rxq {
static __rte_always_inline void
cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
- const uint64_t lso_tun_fmt)
+ const uint64_t lso_tun_fmt, uint8_t mark_flag,
+ uint64_t mark_fmt)
{
+ uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
struct nix_send_ext_s *send_hdr_ext;
struct nix_send_hdr_s *send_hdr;
uint64_t ol_flags = 0, mask;
union nix_send_hdr_w1_u w1;
union nix_send_sg_s *sg;
+ uint16_t mark_form = 0;
send_hdr = (struct nix_send_hdr_s *)cmd;
if (flags & NIX_TX_NEED_EXT_HDR) {
sg = (union nix_send_sg_s *)(cmd + 4);
/* Clear previous markings */
send_hdr_ext->w0.lso = 0;
+ send_hdr_ext->w0.mark_en = 0;
send_hdr_ext->w1.u = 0;
+ ol_flags = m->ol_flags;
} else {
sg = (union nix_send_sg_s *)(cmd + 2);
}
}
if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+ const uint8_t ipv6 = !!(ol_flags & RTE_MBUF_F_TX_IPV6);
+ const uint8_t ip = !!(ol_flags & (RTE_MBUF_F_TX_IPV4 |
+ RTE_MBUF_F_TX_IPV6));
+
send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
/* HW will update ptr after vlan0 update */
send_hdr_ext->w1.vlan1_ins_ptr = 12;
/* 2B before end of l2 header */
send_hdr_ext->w1.vlan0_ins_ptr = 12;
send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
+ /* Fill for VLAN marking only when VLAN insertion enabled */
+ mark_vlan = ((mark_flag & CNXK_TM_MARK_VLAN_DEI) &
+ (send_hdr_ext->w1.vlan1_ins_ena ||
+ send_hdr_ext->w1.vlan0_ins_ena));
+ /* Mask requested flags with packet data information */
+ mark_off = mark_flag & ((ip << 2) | (ip << 1) | mark_vlan);
+ mark_off = ffs(mark_off & CNXK_TM_MARK_MASK);
+
+ mark_form = (mark_fmt >> ((mark_off - !!mark_off) << 4));
+ mark_form = (mark_form >> (ipv6 << 3)) & 0xFF;
+ markptr = m->l2_len + (mark_form >> 7) - (mark_vlan << 2);
+
+ send_hdr_ext->w0.mark_en = !!mark_off;
+ send_hdr_ext->w0.markform = mark_form & 0x7F;
+ send_hdr_ext->w0.markptr = markptr;
}
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
{
struct cn9k_eth_txq *txq = tx_queue;
const rte_iova_t io_addr = txq->io_addr;
+ uint64_t lso_tun_fmt, mark_fmt = 0;
void *lmt_addr = txq->lmt_addr;
- uint64_t lso_tun_fmt;
+ uint8_t mark_flag = 0;
uint16_t i;
NIX_XMIT_FC_OR_RETURN(txq, pkts);
cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
+ if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+ mark_fmt = txq->mark_fmt;
+ mark_flag = txq->mark_flag;
+ }
+
/* Lets commit any changes in the packet here as no further changes
* to the packet will be done unless no fast free is enabled.
*/
rte_io_wmb();
for (i = 0; i < pkts; i++) {
- cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
+ cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+ mark_flag, mark_fmt);
cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags, 4,
flags);
cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
{
struct cn9k_eth_txq *txq = tx_queue;
const rte_iova_t io_addr = txq->io_addr;
+ uint64_t lso_tun_fmt, mark_fmt = 0;
void *lmt_addr = txq->lmt_addr;
- uint64_t lso_tun_fmt;
+ uint8_t mark_flag = 0;
uint16_t segdw;
uint64_t i;
cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
+ if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+ mark_fmt = txq->mark_fmt;
+ mark_flag = txq->mark_flag;
+ }
+
/* Lets commit any changes in the packet here as no further changes
* to the packet will be done unless no fast free is enabled.
*/
rte_io_wmb();
for (i = 0; i < pkts; i++) {
- cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
+ cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+ mark_flag, mark_fmt);
segdw = cn9k_nix_prepare_mseg(tx_pkts[i], cmd, flags);
cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags,
segdw, flags);
#undef T
};
- if (dev->scalar_ena) {
+ if (dev->scalar_ena || dev->tx_mark) {
pick_tx_func(eth_dev, nix_eth_tx_burst);
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
#include <rte_security_driver.h>
#include <rte_tailq.h>
#include <rte_time.h>
+#include <rte_tm_driver.h>
#include "roc_api.h"
#define CNXK_NIX_PFC_CHAN_COUNT 16
+#define CNXK_TM_MARK_VLAN_DEI BIT_ULL(0)
+#define CNXK_TM_MARK_IP_DSCP BIT_ULL(1)
+#define CNXK_TM_MARK_IP_ECN BIT_ULL(2)
+
+#define CNXK_TM_MARK_MASK \
+ (CNXK_TM_MARK_VLAN_DEI | CNXK_TM_MARK_IP_DSCP | CNXK_TM_MARK_IP_ECN)
+
+#define CNXK_TX_MARK_FMT_MASK (0xFFFFFFFFFFFFull)
+
struct cnxk_fc_cfg {
enum rte_eth_fc_mode mode;
uint8_t rx_pause;
uint16_t flags;
uint8_t ptype_disable;
bool scalar_ena;
+ bool tx_mark;
bool ptp_en;
bool rx_mark_update; /* Enable/Disable mark update to mbuf */
/* Common security ops */
extern struct rte_security_ops cnxk_eth_sec_ops;
+/* Common tm ops */
+extern struct rte_tm_ops cnxk_tm_ops;
+
/* Ops */
int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev);
int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
uint16_t queue_idx, uint16_t tx_rate);
+int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error);
+int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error);
+int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error);
/* MTR */
int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);
RTE_TM_STATS_N_PKTS_RED_DROPPED |
RTE_TM_STATS_N_BYTES_RED_DROPPED;
- for (i = 0; i < RTE_COLORS; i++) {
- cap->mark_vlan_dei_supported[i] = false;
- cap->mark_ip_ecn_tcp_supported[i] = false;
- cap->mark_ip_dscp_supported[i] = false;
+ cap->mark_vlan_dei_supported[RTE_COLOR_GREEN] = false;
+ cap->mark_ip_ecn_tcp_supported[RTE_COLOR_GREEN] = false;
+ cap->mark_ip_ecn_sctp_supported[RTE_COLOR_GREEN] = false;
+ cap->mark_ip_dscp_supported[RTE_COLOR_GREEN] = false;
+
+ for (i = RTE_COLOR_YELLOW; i < RTE_COLORS; i++) {
+ cap->mark_vlan_dei_supported[i] = true;
+ cap->mark_ip_ecn_tcp_supported[i] = true;
+ cap->mark_ip_ecn_sctp_supported[i] = true;
+ cap->mark_ip_dscp_supported[i] = true;
}
return 0;
return rc;
}
-const struct rte_tm_ops cnxk_tm_ops = {
+int
+cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *roc_nix = &dev->nix;
+ int rc;
+
+ if (mark_green) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "Green VLAN marking not supported";
+ return -EINVAL;
+ }
+
+ if (eth_dev->data->dev_started) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "VLAN DEI mark for running ports not "
+ "supported";
+ return -EBUSY;
+ }
+
+ rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_VLAN_DEI,
+ mark_yellow, mark_red);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+ return rc;
+}
+
+int
+cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *roc_nix = &dev->nix;
+ int rc;
+
+ if (mark_green) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "Green IP ECN marking not supported";
+ return -EINVAL;
+ }
+
+ if (eth_dev->data->dev_started) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "IP ECN mark for running ports not "
+ "supported";
+ return -EBUSY;
+ }
+
+ rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_ECN,
+ mark_yellow, mark_red);
+ if (rc < 0)
+ goto exit;
+
+ rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_ECN,
+ mark_yellow, mark_red);
+exit:
+ if (rc < 0) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+ return rc;
+}
+
+int
+cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+ int mark_yellow, int mark_red,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *roc_nix = &dev->nix;
+ int rc;
+
+ if (mark_green) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "Green IP DSCP marking not supported";
+ return -EINVAL;
+ }
+
+ if (eth_dev->data->dev_started) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "IP DSCP mark for running ports not "
+ "supported";
+ return -EBUSY;
+ }
+
+ rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_DSCP,
+ mark_yellow, mark_red);
+ if (rc < 0)
+ goto exit;
+
+ rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_DSCP,
+ mark_yellow, mark_red);
+exit:
+ if (rc < 0) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+ return rc;
+}
+
+struct rte_tm_ops cnxk_tm_ops = {
.node_type_get = cnxk_nix_tm_node_type_get,
.capabilities_get = cnxk_nix_tm_capa_get,
.level_capabilities_get = cnxk_nix_tm_level_capa_get,
.node_shaper_update = cnxk_nix_tm_node_shaper_update,
.node_parent_update = cnxk_nix_tm_node_parent_update,
.node_stats_read = cnxk_nix_tm_node_stats_read,
+
+ .mark_vlan_dei = cnxk_nix_tm_mark_vlan_dei,
+ .mark_ip_ecn = cnxk_nix_tm_mark_ip_ecn,
+ .mark_ip_dscp = cnxk_nix_tm_mark_ip_dscp,
};
int