1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
17 * Initialize Hash Filters
19 int cxgbe_init_hash_filter(struct adapter *adap)
21 unsigned int n_user_filters;
22 unsigned int user_filter_perc;
24 u32 params[7], val[7];
26 #define FW_PARAM_DEV(param) \
27 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
28 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
30 #define FW_PARAM_PFVF(param) \
31 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
32 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
33 V_FW_PARAMS_PARAM_Y(0) | \
34 V_FW_PARAMS_PARAM_Z(0))
36 params[0] = FW_PARAM_DEV(NTID);
37 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
41 adap->tids.ntids = val[0];
42 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
44 user_filter_perc = 100;
45 n_user_filters = mult_frac(adap->tids.nftids,
49 adap->tids.nftids = n_user_filters;
50 adap->params.hash_filter = 1;
55 * Validate if the requested filter specification can be set by checking
56 * if the requested features have been enabled
58 int cxgbe_validate_filter(struct adapter *adapter,
59 struct ch_filter_specification *fs)
64 * Check for unconfigured fields being used.
66 fconf = fs->cap ? adapter->params.tp.filter_mask :
67 adapter->params.tp.vlan_pri_map;
69 iconf = adapter->params.tp.ingress_config;
72 (fs->val._field || fs->mask._field)
73 #define U(_mask, _field) \
74 (!(fconf & (_mask)) && S(_field))
76 if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
77 U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
78 U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
79 U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
82 /* Either OVLAN or PFVF match is enabled in hardware, but not both */
83 if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
84 (S(ovlan_vld) && (iconf & F_VNIC)))
87 /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
88 if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
89 (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
96 * If the user is requesting that the filter action loop
97 * matching packets back out one of our ports, make sure that
98 * the egress port is in range.
100 if (fs->action == FILTER_SWITCH &&
101 fs->eport >= adapter->params.nports)
105 * Don't allow various trivially obvious bogus out-of-range
108 if (fs->val.iport >= adapter->params.nports)
111 if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
114 if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
121 * Get the queue to which the traffic must be steered to.
123 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
124 struct ch_filter_specification *fs)
126 struct port_info *pi = ethdev2pinfo(dev);
127 struct adapter *adapter = pi->adapter;
131 * If the user has requested steering matching Ingress Packets
132 * to a specific Queue Set, we need to make sure it's in range
133 * for the port and map that into the Absolute Queue ID of the
134 * Queue Set's Response Queue.
140 * If the iq id is greater than the number of qsets,
141 * then assume it is an absolute qid.
143 if (fs->iq < pi->n_rx_qsets)
144 iq = adapter->sge.ethrxq[pi->first_qset +
153 /* Return an error number if the indicated filter isn't writable ... */
154 static int writable_filter(struct filter_entry *f)
165 * Send CPL_SET_TCB_FIELD message
167 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
168 u16 word, u64 mask, u64 val, int no_reply)
170 struct rte_mbuf *mbuf;
171 struct cpl_set_tcb_field *req;
172 struct sge_ctrl_txq *ctrlq;
174 ctrlq = &adapter->sge.ctrlq[0];
175 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
178 mbuf->data_len = sizeof(*req);
179 mbuf->pkt_len = mbuf->data_len;
181 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
182 memset(req, 0, sizeof(*req));
183 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
184 req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
185 V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
186 V_NO_REPLY(no_reply));
187 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
188 req->mask = cpu_to_be64(mask);
189 req->val = cpu_to_be64(val);
191 t4_mgmt_tx(ctrlq, mbuf);
195 * Set one of the t_flags bits in the TCB.
197 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
198 unsigned int bit_pos, unsigned int val, int no_reply)
200 set_tcb_field(adap, ftid, W_TCB_T_FLAGS, 1ULL << bit_pos,
201 (unsigned long long)val << bit_pos, no_reply);
205 * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
207 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
208 struct cpl_set_tcb_field *req,
210 u64 mask, u64 val, u8 cookie,
213 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
214 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
216 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
217 V_ULP_TXPKT_DEST(0));
218 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
219 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
220 sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
221 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
222 req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
224 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
225 req->mask = cpu_to_be64(mask);
226 req->val = cpu_to_be64(val);
227 sc = (struct ulptx_idata *)(req + 1);
228 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
229 sc->len = cpu_to_be32(0);
233 * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
234 * IPv4 requires only 1 slot on all cards.
236 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
238 if (family == FILTER_TYPE_IPV6) {
239 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
249 * Check if entries are already filled.
251 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
256 /* Ensure there's enough slots available. */
257 t4_os_lock(&t->ftid_lock);
258 for (i = fidx; i < fidx + nentries; i++) {
259 if (rte_bitmap_get(t->ftid_bmap, i)) {
264 t4_os_unlock(&t->ftid_lock);
269 * Allocate available free entries.
271 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
273 struct tid_info *t = &adap->tids;
275 int size = t->nftids;
277 t4_os_lock(&t->ftid_lock);
279 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
282 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
283 t4_os_unlock(&t->ftid_lock);
285 return pos < size ? pos : -1;
289 * Clear a filter and release any of its resources that we own. This also
290 * clears the filter's "pending" status.
292 static void clear_filter(struct filter_entry *f)
294 struct port_info *pi = ethdev2pinfo(f->dev);
297 cxgbe_clip_release(f->dev, f->clipt);
300 cxgbe_l2t_release(f->l2t);
302 if (f->fs.mask.macidx)
303 cxgbe_mpstcam_remove(pi, f->fs.val.macidx);
306 cxgbe_smt_release(f->smt);
308 /* The zeroing of the filter rule below clears the filter valid,
309 * pending, locked flags etc. so it's all we need for
312 memset(f, 0, sizeof(*f));
316 * Construct hash filter ntuple.
318 static u64 hash_filter_ntuple(const struct filter_entry *f)
320 struct adapter *adap = ethdev2adap(f->dev);
321 struct tp_params *tp = &adap->params.tp;
323 u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
325 if (tp->port_shift >= 0 && f->fs.mask.iport)
326 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
328 if (tp->protocol_shift >= 0) {
329 if (!f->fs.val.proto)
330 ntuple |= (u64)tcp_proto << tp->protocol_shift;
332 ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
335 if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
336 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
337 if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
338 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
339 if (tp->vlan_shift >= 0 && f->fs.mask.ivlan)
340 ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
342 if (tp->vnic_shift >= 0) {
343 if ((adap->params.tp.ingress_config & F_VNIC) &&
345 ntuple |= (u64)(f->fs.val.pfvf_vld << 16 |
346 f->fs.val.pf << 13 | f->fs.val.vf) <<
348 else if (!(adap->params.tp.ingress_config & F_VNIC) &&
349 f->fs.mask.ovlan_vld)
350 ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
351 f->fs.val.ovlan) << tp->vnic_shift;
353 if (tp->tos_shift >= 0 && f->fs.mask.tos)
354 ntuple |= (u64)f->fs.val.tos << tp->tos_shift;
360 * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
362 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
365 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
366 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
368 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
369 V_ULP_TXPKT_DEST(0));
370 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
371 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
372 sc->len = cpu_to_be32(sizeof(*abort_req) -
373 sizeof(struct work_request_hdr));
374 OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
375 abort_req->rsvd0 = cpu_to_be32(0);
376 abort_req->rsvd1 = 0;
377 abort_req->cmd = CPL_ABORT_NO_RST;
378 sc = (struct ulptx_idata *)(abort_req + 1);
379 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
380 sc->len = cpu_to_be32(0);
384 * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
386 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
389 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
390 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
392 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
393 V_ULP_TXPKT_DEST(0));
394 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
395 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
396 sc->len = cpu_to_be32(sizeof(*abort_rpl) -
397 sizeof(struct work_request_hdr));
398 OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
399 abort_rpl->rsvd0 = cpu_to_be32(0);
400 abort_rpl->rsvd1 = 0;
401 abort_rpl->cmd = CPL_ABORT_NO_RST;
402 sc = (struct ulptx_idata *)(abort_rpl + 1);
403 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
404 sc->len = cpu_to_be32(0);
408 * Delete the specified hash filter.
410 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
411 unsigned int filter_id,
412 struct filter_ctx *ctx)
414 struct adapter *adapter = ethdev2adap(dev);
415 struct tid_info *t = &adapter->tids;
416 struct filter_entry *f;
417 struct sge_ctrl_txq *ctrlq;
418 unsigned int port_id = ethdev2pinfo(dev)->port_id;
421 if (filter_id > adapter->tids.ntids)
424 f = lookup_tid(t, filter_id);
426 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
427 __func__, filter_id);
431 ret = writable_filter(f);
437 struct rte_mbuf *mbuf;
438 struct work_request_hdr *wr;
439 struct ulptx_idata *aligner;
440 struct cpl_set_tcb_field *req;
441 struct cpl_abort_req *abort_req;
442 struct cpl_abort_rpl *abort_rpl;
447 wrlen = cxgbe_roundup(sizeof(*wr) +
448 (sizeof(*req) + sizeof(*aligner)) +
449 sizeof(*abort_req) + sizeof(*abort_rpl),
452 ctrlq = &adapter->sge.ctrlq[port_id];
453 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
455 dev_err(adapter, "%s: could not allocate skb ..\n",
460 mbuf->data_len = wrlen;
461 mbuf->pkt_len = mbuf->data_len;
463 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
464 INIT_ULPTX_WR(req, wrlen, 0, 0);
465 wr = (struct work_request_hdr *)req;
467 req = (struct cpl_set_tcb_field *)wr;
468 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
469 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
470 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
472 aligner = (struct ulptx_idata *)(req + 1);
473 abort_req = (struct cpl_abort_req *)(aligner + 1);
474 mk_abort_req_ulp(abort_req, f->tid);
475 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
476 mk_abort_rpl_ulp(abort_rpl, f->tid);
477 t4_mgmt_tx(ctrlq, mbuf);
486 * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
488 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
489 unsigned int qid_filterid, struct adapter *adap)
491 struct cpl_t6_act_open_req6 *req = NULL;
492 u64 local_lo, local_hi, peer_lo, peer_hi;
493 u32 *lip = (u32 *)f->fs.val.lip;
494 u32 *fip = (u32 *)f->fs.val.fip;
496 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
498 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
503 dev_err(adap, "%s: unsupported chip type!\n", __func__);
507 local_hi = ((u64)lip[1]) << 32 | lip[0];
508 local_lo = ((u64)lip[3]) << 32 | lip[2];
509 peer_hi = ((u64)fip[1]) << 32 | fip[0];
510 peer_lo = ((u64)fip[3]) << 32 | fip[2];
512 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
514 req->local_port = cpu_to_be16(f->fs.val.lport);
515 req->peer_port = cpu_to_be16(f->fs.val.fport);
516 req->local_ip_hi = local_hi;
517 req->local_ip_lo = local_lo;
518 req->peer_ip_hi = peer_hi;
519 req->peer_ip_lo = peer_lo;
520 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
521 f->fs.newvlan == VLAN_REWRITE) |
522 V_DELACK(f->fs.hitcnts) |
523 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
524 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
526 V_TX_CHAN(f->fs.eport) |
527 V_ULP_MODE(ULP_MODE_NONE) |
528 F_TCAM_BYPASS | F_NON_OFFLOAD);
529 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
530 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
531 V_RSS_QUEUE(f->fs.iq) |
534 V_SACK_EN(f->fs.swapmac) |
535 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
536 (f->fs.dirsteer << 1)) |
537 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
541 * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
543 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
544 unsigned int qid_filterid, struct adapter *adap)
546 struct cpl_t6_act_open_req *req = NULL;
548 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
550 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
555 dev_err(adap, "%s: unsupported chip type!\n", __func__);
559 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
561 req->local_port = cpu_to_be16(f->fs.val.lport);
562 req->peer_port = cpu_to_be16(f->fs.val.fport);
563 req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
564 f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
565 req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
566 f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
567 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
568 f->fs.newvlan == VLAN_REWRITE) |
569 V_DELACK(f->fs.hitcnts) |
570 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
571 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
573 V_TX_CHAN(f->fs.eport) |
574 V_ULP_MODE(ULP_MODE_NONE) |
575 F_TCAM_BYPASS | F_NON_OFFLOAD);
576 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
577 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
578 V_RSS_QUEUE(f->fs.iq) |
581 V_SACK_EN(f->fs.swapmac) |
582 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
583 (f->fs.dirsteer << 1)) |
584 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
588 * Set the specified hash filter.
590 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
591 struct ch_filter_specification *fs,
592 struct filter_ctx *ctx)
594 struct port_info *pi = ethdev2pinfo(dev);
595 struct adapter *adapter = pi->adapter;
596 struct tid_info *t = &adapter->tids;
597 struct filter_entry *f;
598 struct rte_mbuf *mbuf;
599 struct sge_ctrl_txq *ctrlq;
604 ret = cxgbe_validate_filter(adapter, fs);
608 iq = get_filter_steerq(dev, fs);
610 ctrlq = &adapter->sge.ctrlq[pi->port_id];
612 f = t4_os_alloc(sizeof(*f));
621 /* Allocate MPS TCAM entry to match Destination MAC. */
622 if (f->fs.mask.macidx) {
625 idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
631 f->fs.val.macidx = idx;
635 * If the new filter requires loopback Destination MAC and/or VLAN
636 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
639 if (f->fs.newdmac || f->fs.newvlan == VLAN_INSERT ||
640 f->fs.newvlan == VLAN_REWRITE) {
641 /* allocate L2T entry for new filter */
642 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
643 f->fs.eport, f->fs.dmac);
650 /* If the new filter requires Source MAC rewriting then we need to
651 * allocate a SMT entry for the filter
654 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
661 atid = cxgbe_alloc_atid(t, f);
665 if (f->fs.type == FILTER_TYPE_IPV6) {
666 /* IPv6 hash filter */
667 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
671 size = sizeof(struct cpl_t6_act_open_req6);
672 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
678 mbuf->data_len = size;
679 mbuf->pkt_len = mbuf->data_len;
681 mk_act_open_req6(f, mbuf,
682 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
685 /* IPv4 hash filter */
686 size = sizeof(struct cpl_t6_act_open_req);
687 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
693 mbuf->data_len = size;
694 mbuf->pkt_len = mbuf->data_len;
696 mk_act_open_req(f, mbuf,
697 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
702 t4_mgmt_tx(ctrlq, mbuf);
706 cxgbe_free_atid(t, atid);
715 * t4_mk_filtdelwr - create a delete filter WR
716 * @adap: adapter context
717 * @ftid: the filter ID
718 * @wr: the filter work request to populate
719 * @qid: ingress queue to receive the delete notification
721 * Creates a filter work request to delete the supplied filter. If @qid is
722 * negative the delete notification is suppressed.
724 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
725 struct fw_filter2_wr *wr, int qid)
727 memset(wr, 0, sizeof(*wr));
728 if (adap->params.filter2_wr_support)
729 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
731 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
732 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
733 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
734 V_FW_FILTER_WR_NOREPLY(qid < 0));
735 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
737 wr->rx_chan_rx_rpl_iq =
738 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
742 * Create FW work request to delete the filter at a specified index
744 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
746 struct adapter *adapter = ethdev2adap(dev);
747 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
748 struct rte_mbuf *mbuf;
749 struct fw_filter2_wr *fwr;
750 struct sge_ctrl_txq *ctrlq;
751 unsigned int port_id = ethdev2pinfo(dev)->port_id;
753 ctrlq = &adapter->sge.ctrlq[port_id];
754 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
758 mbuf->data_len = sizeof(*fwr);
759 mbuf->pkt_len = mbuf->data_len;
761 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
762 t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
765 * Mark the filter as "pending" and ship off the Filter Work Request.
766 * When we get the Work Request Reply we'll clear the pending status.
769 t4_mgmt_tx(ctrlq, mbuf);
773 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
775 struct adapter *adapter = ethdev2adap(dev);
776 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
777 struct rte_mbuf *mbuf;
778 struct fw_filter2_wr *fwr;
779 struct sge_ctrl_txq *ctrlq;
780 unsigned int port_id = ethdev2pinfo(dev)->port_id;
783 ctrlq = &adapter->sge.ctrlq[port_id];
784 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
790 mbuf->data_len = sizeof(*fwr);
791 mbuf->pkt_len = mbuf->data_len;
793 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
794 memset(fwr, 0, sizeof(*fwr));
797 * Construct the work request to set the filter.
799 if (adapter->params.filter2_wr_support)
800 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
802 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
803 fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
805 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
806 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
807 V_FW_FILTER_WR_NOREPLY(0) |
808 V_FW_FILTER_WR_IQ(f->fs.iq));
809 fwr->del_filter_to_l2tix =
810 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
811 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
812 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
813 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
814 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
815 V_FW_FILTER_WR_INSVLAN
816 (f->fs.newvlan == VLAN_INSERT ||
817 f->fs.newvlan == VLAN_REWRITE) |
818 V_FW_FILTER_WR_RMVLAN
819 (f->fs.newvlan == VLAN_REMOVE ||
820 f->fs.newvlan == VLAN_REWRITE) |
821 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
822 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
823 V_FW_FILTER_WR_PRIO(f->fs.prio) |
824 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
825 fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
826 fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
827 fwr->frag_to_ovlan_vldm =
828 (V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
829 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
830 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
831 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
832 fwr->smac_sel = f->smt ? f->smt->hw_idx : 0;
833 fwr->rx_chan_rx_rpl_iq =
834 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
835 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
837 fwr->maci_to_matchtypem =
838 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
839 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
840 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
841 V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
842 fwr->ptcl = f->fs.val.proto;
843 fwr->ptclm = f->fs.mask.proto;
844 fwr->ttyp = f->fs.val.tos;
845 fwr->ttypm = f->fs.mask.tos;
846 fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
847 fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
848 fwr->ovlan = cpu_to_be16(f->fs.val.ovlan);
849 fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan);
850 rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
851 rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
852 rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
853 rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
854 fwr->lp = cpu_to_be16(f->fs.val.lport);
855 fwr->lpm = cpu_to_be16(f->fs.mask.lport);
856 fwr->fp = cpu_to_be16(f->fs.val.fport);
857 fwr->fpm = cpu_to_be16(f->fs.mask.fport);
859 if (adapter->params.filter2_wr_support) {
860 fwr->filter_type_swapmac =
861 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
862 fwr->natmode_to_ulp_type =
863 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
866 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
867 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
868 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
869 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
870 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
874 * Mark the filter as "pending" and ship off the Filter Work Request.
875 * When we get the Work Request Reply we'll clear the pending status.
878 t4_mgmt_tx(ctrlq, mbuf);
886 * Set the corresponding entries in the bitmap.
888 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
892 t4_os_lock(&t->ftid_lock);
893 if (rte_bitmap_get(t->ftid_bmap, fidx)) {
894 t4_os_unlock(&t->ftid_lock);
898 for (i = fidx; i < fidx + nentries; i++)
899 rte_bitmap_set(t->ftid_bmap, i);
900 t4_os_unlock(&t->ftid_lock);
905 * Clear the corresponding entries in the bitmap.
907 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
911 t4_os_lock(&t->ftid_lock);
912 for (i = fidx; i < fidx + nentries; i++)
913 rte_bitmap_clear(t->ftid_bmap, i);
914 t4_os_unlock(&t->ftid_lock);
918 * Check a delete filter request for validity and send it to the hardware.
919 * Return 0 on success, an error number otherwise. We attach any provided
920 * filter operation context to the internal filter specification in order to
921 * facilitate signaling completion of the operation.
923 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
924 struct ch_filter_specification *fs,
925 struct filter_ctx *ctx)
927 struct port_info *pi = dev->data->dev_private;
928 struct adapter *adapter = pi->adapter;
929 struct filter_entry *f;
930 unsigned int chip_ver;
934 if (is_hashfilter(adapter) && fs->cap)
935 return cxgbe_del_hash_filter(dev, filter_id, ctx);
937 if (filter_id >= adapter->tids.nftids)
940 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
943 * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
944 * and 4 slot boundary for cards below T6.
946 if (fs->type == FILTER_TYPE_IPV6) {
947 if (chip_ver < CHELSIO_T6)
953 nentries = cxgbe_filter_slots(adapter, fs->type);
954 ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
956 dev_warn(adap, "%s: could not find filter entry: %u\n",
957 __func__, filter_id);
961 f = &adapter->tids.ftid_tab[filter_id];
962 ret = writable_filter(f);
968 cxgbe_clear_ftid(&adapter->tids,
969 f->tid - adapter->tids.ftid_base,
971 return del_filter_wr(dev, filter_id);
975 * If the caller has passed in a Completion Context then we need to
976 * mark it as a successful completion so they don't stall waiting
981 t4_complete(&ctx->completion);
988 * Check a Chelsio Filter Request for validity, convert it into our internal
989 * format and send it to the hardware. Return 0 on success, an error number
990 * otherwise. We attach any provided filter operation context to the internal
991 * filter specification in order to facilitate signaling completion of the
994 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
995 struct ch_filter_specification *fs,
996 struct filter_ctx *ctx)
998 struct port_info *pi = ethdev2pinfo(dev);
999 struct adapter *adapter = pi->adapter;
1000 u8 nentries, bitoff[16] = {0};
1001 struct filter_entry *f;
1002 unsigned int chip_ver;
1003 unsigned int fidx, iq;
1007 if (is_hashfilter(adapter) && fs->cap)
1008 return cxgbe_set_hash_filter(dev, fs, ctx);
1010 if (filter_id >= adapter->tids.nftids)
1013 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1015 ret = cxgbe_validate_filter(adapter, fs);
1020 * IPv6 filters occupy four slots and must be aligned on four-slot
1021 * boundaries for T5. On T6, IPv6 filters occupy two-slots and
1022 * must be aligned on two-slot boundaries.
1024 * IPv4 filters only occupy a single slot and have no alignment
1028 if (fs->type == FILTER_TYPE_IPV6) {
1029 if (chip_ver < CHELSIO_T6)
1035 if (fidx != filter_id)
1038 nentries = cxgbe_filter_slots(adapter, fs->type);
1039 ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
1043 iq = get_filter_steerq(dev, fs);
1046 * Check to make sure that provided filter index is not
1047 * already in use by someone else
1049 f = &adapter->tids.ftid_tab[filter_id];
1053 fidx = adapter->tids.ftid_base + filter_id;
1054 ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
1059 * Check to make sure the filter requested is writable ...
1061 ret = writable_filter(f);
1063 /* Clear the bits we have set above */
1064 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1069 * Convert the filter specification into our internal format.
1070 * We copy the PF/VF specification into the Outer VLAN field
1071 * here so the rest of the code -- including the interface to
1072 * the firmware -- doesn't have to constantly do these checks.
1078 /* Allocate MPS TCAM entry to match Destination MAC. */
1079 if (f->fs.mask.macidx) {
1082 idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
1088 f->fs.val.macidx = idx;
1091 /* Allocate a clip table entry only if we have non-zero IPv6 address. */
1092 if (chip_ver > CHELSIO_T5 && f->fs.type &&
1093 memcmp(f->fs.val.lip, bitoff, sizeof(bitoff))) {
1094 f->clipt = cxgbe_clip_alloc(dev, (u32 *)&f->fs.val.lip);
1101 /* If the new filter requires loopback Destination MAC and/or VLAN
1102 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1105 if (f->fs.newvlan || f->fs.newdmac) {
1106 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
1107 f->fs.eport, f->fs.dmac);
1114 /* If the new filter requires Source MAC rewriting then we need to
1115 * allocate a SMT entry for the filter
1117 if (f->fs.newsmac) {
1118 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
1125 iconf = adapter->params.tp.ingress_config;
1127 /* Either PFVF or OVLAN can be active, but not both
1128 * So, if PFVF is enabled, then overwrite the OVLAN
1129 * fields with PFVF fields before writing the spec
1132 if (iconf & F_VNIC) {
1133 f->fs.val.ovlan = fs->val.pf << 13 | fs->val.vf;
1134 f->fs.mask.ovlan = fs->mask.pf << 13 | fs->mask.vf;
1135 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1136 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1140 * Attempt to set the filter. If we don't succeed, we clear
1141 * it and return the failure.
1144 f->tid = fidx; /* Save the actual tid */
1145 ret = set_filter_wr(dev, filter_id);
1152 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1158 * Handle a Hash filter write reply.
1160 void cxgbe_hash_filter_rpl(struct adapter *adap,
1161 const struct cpl_act_open_rpl *rpl)
1163 struct tid_info *t = &adap->tids;
1164 struct filter_entry *f;
1165 struct filter_ctx *ctx = NULL;
1166 unsigned int tid = GET_TID(rpl);
1167 unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1168 (be32_to_cpu(rpl->atid_status)));
1169 unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1171 f = lookup_atid(t, ftid);
1173 dev_warn(adap, "%s: could not find filter entry: %d\n",
1182 case CPL_ERR_NONE: {
1184 f->pending = 0; /* asynchronous setup completed */
1187 cxgbe_insert_tid(t, f, f->tid, 0);
1188 cxgbe_free_atid(t, ftid);
1194 set_tcb_field(adap, tid,
1196 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1197 V_TCB_T_RTT_TS_RECENT_AGE
1198 (M_TCB_T_RTT_TS_RECENT_AGE),
1199 V_TCB_TIMESTAMP(0ULL) |
1200 V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1203 set_tcb_tflag(adap, tid, S_TF_CCTRL_ECE, 1, 1);
1204 if (f->fs.newvlan == VLAN_INSERT ||
1205 f->fs.newvlan == VLAN_REWRITE)
1206 set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1207 if (f->fs.newsmac) {
1208 set_tcb_tflag(adap, tid, S_TF_CCTRL_CWR, 1, 1);
1209 set_tcb_field(adap, tid, W_TCB_SMAC_SEL,
1210 V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1211 V_TCB_SMAC_SEL(f->smt->hw_idx), 1);
1216 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1220 if (status == CPL_ERR_TCAM_FULL)
1221 ctx->result = -EAGAIN;
1223 ctx->result = -EINVAL;
1226 cxgbe_free_atid(t, ftid);
1232 t4_complete(&ctx->completion);
1236 * Handle a LE-TCAM filter write/deletion reply.
1238 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1240 struct filter_entry *f = NULL;
1241 unsigned int tid = GET_TID(rpl);
1242 int idx, max_fidx = adap->tids.nftids;
1244 /* Get the corresponding filter entry for this tid */
1245 if (adap->tids.ftid_tab) {
1246 /* Check this in normal filter region */
1247 idx = tid - adap->tids.ftid_base;
1248 if (idx >= max_fidx)
1251 f = &adap->tids.ftid_tab[idx];
1256 /* We found the filter entry for this tid */
1258 unsigned int ret = G_COOKIE(rpl->cookie);
1259 struct filter_ctx *ctx;
1262 * Pull off any filter operation context attached to the
1268 if (ret == FW_FILTER_WR_FLT_ADDED) {
1269 f->pending = 0; /* asynchronous setup completed */
1275 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1277 * Clear the filter when we get confirmation from the
1278 * hardware that the filter has been deleted.
1285 * Something went wrong. Issue a warning about the
1286 * problem and clear everything out.
1288 dev_warn(adap, "filter %u setup failed with error %u\n",
1292 ctx->result = -EINVAL;
1296 t4_complete(&ctx->completion);
1301 * Retrieve the packet count for the specified filter.
1303 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1304 u64 *c, int hash, bool get_byte)
1306 struct filter_entry *f;
1307 unsigned int tcb_base, tcbaddr;
1310 tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1311 if (is_hashfilter(adapter) && hash) {
1312 if (fidx < adapter->tids.ntids) {
1313 f = adapter->tids.tid_tab[fidx];
1317 if (is_t5(adapter->params.chip)) {
1321 tcbaddr = tcb_base + (fidx * TCB_SIZE);
1327 if (fidx >= adapter->tids.nftids)
1330 f = &adapter->tids.ftid_tab[fidx];
1334 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1337 f = &adapter->tids.ftid_tab[fidx];
1342 if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1344 * For T5, the Filter Packet Hit Count is maintained as a
1345 * 32-bit Big Endian value in the TCB field {timestamp}.
1346 * Similar to the craziness above, instead of the filter hit
1347 * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1348 * sizeof(u32)), it actually shows up at offset 24. Whacky.
1351 unsigned int word_offset = 4;
1352 __be64 be64_byte_count;
1354 t4_os_lock(&adapter->win0_lock);
1355 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1357 (word_offset * sizeof(__be32)),
1358 sizeof(be64_byte_count),
1361 t4_os_unlock(&adapter->win0_lock);
1364 *c = be64_to_cpu(be64_byte_count);
1366 unsigned int word_offset = 6;
1369 t4_os_lock(&adapter->win0_lock);
1370 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1372 (word_offset * sizeof(__be32)),
1373 sizeof(be32_count), &be32_count,
1375 t4_os_unlock(&adapter->win0_lock);
1378 *c = (u64)be32_to_cpu(be32_count);
1385 * Clear the packet count for the specified filter.
1387 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1388 int hash, bool clear_byte)
1390 u64 tcb_mask = 0, tcb_val = 0;
1391 struct filter_entry *f = NULL;
1394 if (is_hashfilter(adapter) && hash) {
1395 if (fidx >= adapter->tids.ntids)
1398 /* No hitcounts supported for T5 hashfilters */
1399 if (is_t5(adapter->params.chip))
1402 f = adapter->tids.tid_tab[fidx];
1404 if (fidx >= adapter->tids.nftids)
1407 f = &adapter->tids.ftid_tab[fidx];
1410 if (!f || !f->valid)
1413 tcb_word = W_TCB_TIMESTAMP;
1414 tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1415 tcb_val = V_TCB_TIMESTAMP(0ULL);
1417 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1420 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1422 V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1423 V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1424 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1425 V_TCB_T_RTSEQ_RECENT(0ULL);
1427 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1434 * Handle a Hash filter delete reply.
1436 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1437 const struct cpl_abort_rpl_rss *rpl)
1439 struct tid_info *t = &adap->tids;
1440 struct filter_entry *f;
1441 struct filter_ctx *ctx = NULL;
1442 unsigned int tid = GET_TID(rpl);
1444 f = lookup_tid(t, tid);
1446 dev_warn(adap, "%s: could not find filter entry: %u\n",
1454 cxgbe_remove_tid(t, 0, tid, 0);
1459 t4_complete(&ctx->completion);